source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
backend.py
|
#!/usr/bin/env python3
# This script supports autocompletion with argcomplete: PYTHON_ARGCOMPLETE_OK
from pyspimosim.base_model import create_parser_from_data_class, namespace_to_dataclass
from pyspimosim import spimosim_server
from dataclasses import dataclass
import http.server
import importlib
import multiprocessing
import sys
import signal
import os
from dataclasses import dataclass
import argparse
import subprocess
_script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_script_dir)
class HttpRequestHandler(http.server.CGIHTTPRequestHandler):
extensions_map = {
'': 'application/octet-stream',
'.manifest': 'text/cache-manifest',
'.html': 'text/html',
'.png': 'image/png',
'.jpg': 'image/jpg',
'.svg': 'image/svg+xml',
'.css': 'text/css',
'.js':'application/x-javascript',
'.wasm': 'application/wasm',
'.json': 'application/json',
'.xml': 'application/xml',
}
def log_message(self, format_str, *args):
try:
http_status = int(args[1])
if http_status >= 200 and http_status < 400:
return
except:
pass
super().log_message(format_str, *args)
def start_http_server(model, www_root, listen_on):
message = f"Webserver listens on http://{listen_on[0]}:{listen_on[1]}"
if listen_on[0] == "0.0.0.0":
try:
own_ip = subprocess.run(["hostname", "-I"], stdout=subprocess.PIPE).stdout.decode("UTF-8").split(" ")[0]
message = f"Webserver listens on http://{listen_on[0]}:{listen_on[1]}, try accessing via http://{own_ip}:{listen_on[1]}"
except:
pass
print(message)
os.chdir(www_root)
os.environ["model"] = model
httpd = http.server.HTTPServer(listen_on, HttpRequestHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
def start_http_server_process(model, www_root, listen_on):
process = multiprocessing.Process(target=start_http_server, args=(model, www_root, listen_on))
process.start()
return process
def load_classes(class_name):
module = importlib.import_module(class_name)
return getattr(module, 'Model'), getattr(module, 'ModelSettings')
def stop(process):
process.terminate()
process.join()
sys.exit(0)
def get_model_names(dirname=None):
if dirname is None:
dirname = _script_dir
for path in os.listdir(dirname):
if not os.path.isdir(os.path.join(dirname, path)) and path.endswith("_model.py"):
yield path[:-len(".py")]
def get_models(dirname=None):
models = {}
for model in get_model_names(dirname):
Model, ModelSettings = load_classes(model)
models[model] = (Model, ModelSettings)
return models
class _HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
print("\n\nHelp for model '{}'".format(choice))
print(subparser.format_help())
parser.exit()
def get_parser(models, default_root=_script_dir+"/../../../"):
parser = argparse.ArgumentParser(add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers(title="Available models to run", dest="model", required=True)
parser.add_argument("--www_root", "-W", default=default_root, help="Root directory for the web server")
parser.add_argument("--www_address", "-A", default="0.0.0.0", help="IP address for the web server")
parser.add_argument("--www_port", "-Q", default=8000, type=int, help="Port for the web server")
parser.add_argument('--help', "-h", "-?", action=_HelpAction, help='Help')
for model_name, (Model, ModelSettings) in models.items():
create_parser_from_data_class(ModelSettings, parser=subparsers.add_parser(model_name, formatter_class=argparse.ArgumentDefaultsHelpFormatter))
try:
import argcomplete
argcomplete.autocomplete(parser)
except:
pass # not fatal: bash completion is not available if argcomplete is not installed or fails
return parser
def start(args, models):
Model, ModelSettings = models[args.model]
model_settings = namespace_to_dataclass(ModelSettings, args, ignore=["model", "www_root", "www_address", "www_port"])
Model.prepare_environment(model_settings)
process = start_http_server_process(args.model, args.www_root, (args.www_address, args.www_port))
signal.signal(signal.SIGINT, lambda sig, frame: stop(process))
signal.signal(signal.SIGTERM, lambda sig, frame: stop(process))
spimosim_server.start(Model, model_settings)
if __name__ == "__main__":
models = get_models()
start(get_parser(models).parse_args(), models)
|
__init__.py
|
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Logging utils
"""
import os
import warnings
from threading import Thread
import pkg_resources as pkg
import torch
from torch.utils.tensorboard import SummaryWriter
from utils.general import colorstr, emojis
from utils.loggers.wandb.wandb_utils import WandbLogger
from utils.plots import plot_images, plot_results
from utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
RANK = int(os.getenv('RANK', -1))
try:
import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]:
try:
wandb_login_success = wandb.login(timeout=30)
except wandb.errors.UsageError: # known non-TTY terminal issue
wandb_login_success = False
if not wandb_login_success:
wandb = None
except (ImportError, AssertionError):
wandb = None
class Loggers():
# YOLOv5 Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.logger = logger # for printing results to console
self.include = include
self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
# Message
if not wandb:
prefix = colorstr('Weights & Biases: ')
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
print(emojis(s))
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
prefix = colorstr('TensorBoard: ')
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
if wandb and 'wandb' in self.include:
wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, run_id)
else:
self.wandb = None
def on_pretrain_routine_end(self):
# Callback runs on pre-train routine end
paths = self.save_dir.glob('*labels*.jpg') # training labels
if self.wandb:
self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
# Callback runs on train batch end
if plots:
if ni == 0:
if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if self.wandb and ni == 10:
files = sorted(self.save_dir.glob('train*.jpg'))
self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
def on_val_end(self):
# Callback runs on val end
if self.wandb:
files = sorted(self.save_dir.glob('val*.jpg'))
self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
x = {k: v for k, v in zip(self.keys, vals)} # dict
if self.csv:
file = self.save_dir / 'results.csv'
n = len(x) + 1 # number of cols
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
with open(file, 'a') as f:
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if self.wandb:
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_train_end(self, last, best, plots, epoch, results):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / 'results.csv') # save results.png
files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.tb:
import cv2
for f in files:
self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
if self.wandb:
self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(str(best if best.exists() else last), type='model',
name='run_' + self.wandb.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
self.wandb.finish_run()
else:
self.wandb.finish_run()
self.wandb = WandbLogger(self.opt)
|
test_io.py
|
import sys
import gc
import gzip
import os
import threading
import time
import warnings
import io
import re
import pytest
from pathlib import Path
from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
from multiprocessing import Process, Value
from ctypes import c_bool
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes
from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings,
break_cycles
)
from numpy.testing._private.utils import requires_memory
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
IS_64BIT = sys.maxsize > 2**32
try:
import bz2
HAS_BZ2 = True
except ImportError:
HAS_BZ2 = False
try:
import lzma
HAS_LZMA = True
except ImportError:
HAS_LZMA = False
def strptime(s, fmt=None):
"""
This function is available in the datetime module only from Python >=
2.5.
"""
if type(s) == bytes:
s = s.decode("latin1")
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest:
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if 'arr_reloaded' in locals():
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
@pytest.mark.slow
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
@pytest.mark.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp:
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a'] # Should succeed
npfile.close()
del a # Avoid pyflakes unused variable warning.
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
with temppath(suffix='.npz') as tmp:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
with temppath(suffix='.npz') as tmp:
with open(tmp, 'wb') as fp:
np.savez(fp, data='LOVELY LOAD')
with open(tmp, 'rb', 10000) as fp:
fp.seek(0)
assert_(not fp.closed)
np.load(fp)['data']
# fp must not get closed by .load
assert_(not fp.closed)
fp.seek(0)
assert_(not fp.closed)
@pytest.mark.slow_pypy
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
with temppath(suffix='.npz') as tmp:
np.savez(tmp, data='LOVELY LOAD')
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings.
with suppress_warnings() as sup:
sup.filter(ResourceWarning) # TODO: specify exact message
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
if IS_PYPY:
gc.collect()
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it. This needs to
# pass a file name to load for the test. On windows failure will
# cause a second error will be raised when the attempt to remove
# the open file is made.
prefix = 'numpy_test_closing_zipfile_after_load_'
with temppath(suffix='.npz', prefix=prefix) as tmp:
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt:
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_0D_3D(self):
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
def test_structured(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_structured_padded(self):
# gh-13297
a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[
('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
])
c = BytesIO()
np.savetxt(c, a[['foo', 'baz']], fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
def test_multifield_view(self):
a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
v = a[['x', 'z']]
with temppath(suffix='.npy') as path:
path = Path(path)
np.save(path, v)
data = np.load(path)
assert_array_equal(data, v)
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overridden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
with temppath() as name:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_complex_negative_exponent(self):
# Previous to 1.15, some formats generated x+-yj, gh 7895
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n',
b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
encoding='UTF-8')
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
if HAS_BZ2:
suffixes.append('.bz2')
if HAS_LZMA:
suffixes.extend(['.xz', '.lzma'])
with tempdir() as tmpdir:
for suffix in suffixes:
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
encoding='UTF-16-LE', dtype=np.unicode_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read().decode('UTF-8'), utf8 + '\n')
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
a = np.array([utf8], dtype=np.unicode_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
assert_equal(s.read(), utf8 + '\n')
@pytest.mark.parametrize("fmt", [u"%f", b"%f"])
@pytest.mark.parametrize("iotype", [StringIO, BytesIO])
def test_unicode_and_bytes_fmt(self, fmt, iotype):
# string type of fmt should not matter, see also gh-4053
a = np.array([1.])
s = iotype()
np.savetxt(s, a, fmt=fmt)
s.seek(0)
if iotype is StringIO:
assert_equal(s.read(), u"%f\n" % 1.)
else:
assert_equal(s.read(), b"%f\n" % 1.)
@pytest.mark.skipif(sys.platform=='win32', reason="files>4GB may not work")
@pytest.mark.slow
@requires_memory(free_bytes=7e9)
def test_large_zip(self):
def check_large_zip(memoryerror_raised):
memoryerror_raised.value = False
try:
# The test takes at least 6GB of memory, writes a file larger
# than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile``
test_data = np.asarray([np.random.rand(
np.random.randint(50,100),4)
for i in range(800000)], dtype=object)
with tempdir() as tmpdir:
np.savez(os.path.join(tmpdir, 'test.npz'),
test_data=test_data)
except MemoryError:
memoryerror_raised.value = True
raise
# run in a subprocess to ensure memory is released on PyPy, see gh-15775
# Use an object in shared memory to re-raise the MemoryError exception
# in our process if needed, see gh-16889
memoryerror_raised = Value(c_bool)
p = Process(target=check_large_zip, args=(memoryerror_raised,))
p.start()
p.join()
if memoryerror_raised.value:
raise MemoryError("Child process raised a MemoryError exception")
# -9 indicates a SIGKILL, probably an OOM.
if p.exitcode == -9:
pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient")
assert p.exitcode == 0
class LoadTxtBase:
def check_compressed(self, fopen, suffixes):
# Test that we can load data from a compressed file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
for suffix in suffixes:
with temppath(suffix=suffix) as name:
with fopen(name, mode='wt', encoding='UTF-32-LE') as f:
f.write(data)
res = self.loadfunc(name, encoding='UTF-32-LE')
assert_array_equal(res, wanted)
with fopen(name, "rt", encoding='UTF-32-LE') as f:
res = self.loadfunc(f)
assert_array_equal(res, wanted)
def test_compressed_gzip(self):
self.check_compressed(gzip.open, ('.gz',))
@pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
def test_compressed_bz2(self):
self.check_compressed(bz2.open, ('.bz2',))
@pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
def test_compressed_lzma(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
def test_encoding(self):
with temppath() as path:
with open(path, "wb") as f:
f.write('0.\n1.\n2.'.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16")
assert_array_equal(x, [0., 1., 2.])
def test_stringload(self):
# umlaute
nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8")
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
# test converters that decode strings
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
x = self.loadfunc(c, dtype=np.unicode_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
def test_converters_nodecode(self):
# test native string converters enabled by setting an encoding
utf8 = b'\xcf\x96'.decode('UTF-8')
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
x = self.loadfunc(path, dtype=np.unicode_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
assert_array_equal(x, a)
class TestLoadTxt(LoadTxtBase):
loadfunc = staticmethod(np.loadtxt)
def setup(self):
# lower chunksize for testing
self.orig_chunk = np.lib.npyio._loadtxt_chunksize
np.lib.npyio._loadtxt_chunksize = 1
def teardown(self):
np.lib.npyio._loadtxt_chunksize = self.orig_chunk
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments_unicode(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=u'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_byte(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=b'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_multiple(self):
c = TextIO()
c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=['#', '@', '//'])
a = np.array([[1, 2, 3], [4, 5, 6]], int)
assert_array_equal(x, a)
def test_comments_multi_chars(self):
c = TextIO()
c.write('/* comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='/*')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
# Check that '/*' is not transformed to ['/', '*']
c = TextIO()
c.write('*/ comment\n1,2,3,5\n')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
comments='/*')
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Testing with an integer instead of a sequence
for int_type in [int, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64]:
to_read = int_type(1)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=to_read)
assert_array_equal(x, a[:, 1])
# Testing with some crazy custom integer type
class CrazyInt:
def __index__(self):
return 1
crazy_int = CrazyInt()
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=crazy_int)
assert_array_equal(x, a[:, 1])
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(crazy_int,))
assert_array_equal(x, a[:, 1])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
# Testing non-ints in usecols
c.seek(0)
bogus_idx = 1.5
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=bogus_idx
)
assert_raises_regex(
TypeError,
'^usecols must be.*%s' % type(bogus_idx),
np.loadtxt, c, usecols=[0, bogus_idx, 0]
)
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_str_dtype(self):
# see gh-8033
c = ["str1", "str2"]
for dt in (str, np.bytes_):
a = np.array(["str1", "str2"], dtype=dt)
x = np.loadtxt(c, dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_from_float_hex(self):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
tgt = np.hstack((tgt, -tgt)).astype(float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
for dt in [float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
def test_default_float_converter_no_default_hex_conversion(self):
"""
Ensure that fromhex is only used for values with the correct prefix and
is not called by default. Regression test related to gh-19598.
"""
c = TextIO("a b c")
with pytest.raises(
ValueError, match="could not convert string to float"
):
np.loadtxt(c)
def test_default_float_converter_exception(self):
"""
Ensure that the exception message raised during failed floating point
conversion is correct. Regression test related to gh-19598.
"""
c = TextIO("qrs tuv") # Invalid values for default float converter
with pytest.raises(
ValueError, match="could not convert string to float"
):
np.loadtxt(c)
def test_from_complex(self):
tgt = (complex(1, 1), complex(1, -1))
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
def test_complex_misformatted(self):
# test for backward compatibility
# some complex formats used to generate x+-yj
a = np.zeros((2, 2), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re - 1.0j * im
c = BytesIO()
np.savetxt(c, a, fmt='%.16e')
c.seek(0)
txt = c.read()
c.seek(0)
# misformat the sign on the imaginary part, gh 7895
txt_bad = txt.replace(b'e+00-', b'e00+-')
assert_(txt_bad != txt)
c.write(txt_bad)
c.seek(0)
res = np.loadtxt(c, dtype=complex)
assert_equal(res, a)
def test_universal_newline(self):
with temppath() as name:
with open(name, 'w') as f:
f.write('1 21\r3 42\r')
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_unpack_structured(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with suppress_warnings() as sup:
sup.filter(message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
def test_none_as_string(self):
# gh-5155, None should work as string when format demands it
c = TextIO()
c.write('100,foo,200\n300,None,400')
c.seek(0)
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
@pytest.mark.skipif(locale.getpreferredencoding() == 'ANSI_X3.4-1968',
reason="Wrong preferred encoding")
def test_binary_load(self):
butf8 = b"5,6,7,\xc3\x95scarscar\n\r15,2,3,hello\n\r"\
b"20,2,3,\xc3\x95scar\n\r"
sutf8 = butf8.decode("UTF-8").replace("\r", "").splitlines()
with temppath() as path:
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
x = np.loadtxt(f, encoding="UTF-8", dtype="S")
x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
assert_array_equal(x, np.array(x, dtype="S"))
def test_max_rows(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_max_rows_with_skiprows(self):
c = TextIO()
c.write('comments\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
def test_max_rows_with_read_continuation(self):
c = TextIO()
c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
max_rows=2)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
assert_array_equal(x, a)
# test continuation
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([2,1,4,5], int)
assert_array_equal(x, a)
def test_max_rows_larger(self):
#test max_rows > num rows
c = TextIO()
c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1, max_rows=6)
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
assert_array_equal(x, a)
class Testfromregex:
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
@pytest.mark.parametrize("path_type", [str, Path])
def test_record_unicode(self, path_type):
utf8 = b'\xcf\x96'
with temppath() as str_path:
path = path_type(str_path)
with open(path, 'wb') as f:
f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux')
dt = [('num', np.float64), ('val', 'U4')]
x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8')
a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'),
(4.444, 'qux')], dtype=dt)
assert_array_equal(x, a)
regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE)
x = np.fromregex(path, regexp, dt, encoding='UTF-8')
assert_array_equal(x, a)
def test_compiled_bytes(self):
regexp = re.compile(b'(\\d)')
c = BytesIO(b'123')
dt = [('num', np.float64)]
a = np.array([1, 2, 3], dtype=dt)
x = np.fromregex(c, regexp, dt)
assert_array_equal(x, a)
def test_bad_dtype_not_structured(self):
regexp = re.compile(b'(\\d)')
c = BytesIO(b'123')
with pytest.raises(TypeError, match='structured datatype'):
np.fromregex(c, regexp, dtype=np.float64)
#####--------------------------------------------------------------------------
class TestFromTxt(LoadTxtBase):
loadfunc = staticmethod(np.genfromtxt)
def test_record(self):
# Test w/ explicit dtype
data = TextIO('1 2\n3 4')
test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.genfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
# Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
# Test squeezing to 1D
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.genfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
# Test the stripping of comments
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
# Test row skipping
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.genfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
# Test retrieving a header
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None, names=True)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
# Test the automatic definition of the output dtype
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
# Tests whether the output dtype can be uniformized
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.genfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
# Test overwriting the names of the dtype
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.genfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
# Check that names can be retrieved even if the line is commented out.
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test, ctrl)
def test_names_and_comments_none(self):
# Tests case when names is true but comments is None (gh-10780)
data = TextIO('col1 col2\n 1 2\n 3 4')
test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
assert_equal(test, control)
def test_file_is_closed_on_error(self):
# gh-13200
with tempdir() as tmpdir:
fpath = os.path.join(tmpdir, "test.csv")
with open(fpath, "wb") as f:
f.write(u'\N{GREEK PI SYMBOL}'.encode('utf8'))
# ResourceWarnings are emitted from a destructor, so won't be
# detected by regular propagation to errors.
with assert_no_warnings():
with pytest.raises(UnicodeDecodeError):
np.genfromtxt(fpath, encoding="ascii")
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
# Test the combination user-defined converters and usecol
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None,
converters={'C': lambda s: 2 * int(s)})
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
# Test the conversion to datetime.
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
# Test the conversion to datetime64.
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
# Test whether unused converters are forgotten
data = TextIO("1 21\n 3 42\n")
test = np.genfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.genfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
# Test some corner cases
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
ndtype = [('nest', [('idx', int), ('code', object)])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
# nested but empty fields also aren't supported
ndtype = [('idx', int), ('code', object), ('nest', [])]
with assert_raises_regex(NotImplementedError,
'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
def test_dtype_with_object_no_converter(self):
# Object without a converter uses bytes:
parsed = np.genfromtxt(TextIO("1"), dtype=object)
assert parsed[()] == b"1"
parsed = np.genfromtxt(TextIO("string"), dtype=object)
assert parsed[()] == b"string"
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_utf8_userconverters_with_explicit_dtype(self):
utf8 = b'\xcf\x96'
with temppath() as path:
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: np.compat.unicode},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
# Test space delimiter
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.genfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
# Test using an integer for delimiter
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
# Test w/ a delimiter tab
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
# Test the selection of columns
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
# Test giving usecols with a comma-separated string
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
# Test usecols with an explicit structured dtype
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.genfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
# Test usecols with an integer
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
# Test usecols with named columns
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
# Test that an empty file raises the proper warning.
with suppress_warnings() as sup:
sup.filter(message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
# when skip_header > 0
test = np.genfromtxt(data, skip_header=1)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True)
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.genfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.genfromtxt(data, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.genfromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
usemask=True,
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
# Test with missing and filling values
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.genfromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True, usemask=True)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
# Test masked column
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
# Test masked column
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
# Test invalid raise
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True)
def f():
return np.genfromtxt(mdata, invalid_raise=False, **kwargs)
mtest = assert_warns(ConversionWarning, f)
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.genfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
# Test invalid_raise with usecols
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
def f():
return np.genfromtxt(mdata, usecols=(0, 4), **kwargs)
mtest = assert_warns(ConversionWarning, f)
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
# Test inconsistent dtype
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x.decode()}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
# Test default format
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
# Test single dtype w/o names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
# Test single dtype w explicit names
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
# Test single dtype w implicit names
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
# Test easy structured dtype
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.genfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
# Test autostrip
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
# Test the 'replace_space' option
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_replace_space_known_dtype(self):
# Test the 'replace_space' (and related) options when dtype != None
txt = "A.A, B (B), C:C\n1, 2, 3"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
# Test w/ incomplete names
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.genfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
# Make sure that names are properly completed
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
# Make sure we pick up the right names w/ usecols
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
# Test fix-width w/ names
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
# Test missing values
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b'testNonetherestofthedata')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1], b' testNonetherestofthedata')
def test_latin1(self):
latin1 = b'\xf6\xfc\xf6'
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + latin1 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test[1, 0], b"test1")
assert_equal(test[1, 1], b"testNonethe" + latin1)
assert_equal(test[1, 2], b"test3")
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',',
encoding='latin1')
assert_equal(test[1, 0], u"test1")
assert_equal(test[1, 1], u"testNonethe" + latin1.decode('latin1'))
assert_equal(test[1, 2], u"test3")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
assert_equal(test['f0'], 0)
assert_equal(test['f1'], b"testNonethe" + latin1)
def test_binary_decode_autodtype(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_utf8_byte_encoding(self):
utf8 = b"\xcf\x96"
norm = b"norm1,norm2,norm3\n"
enc = b"test1,testNonethe" + utf8 + b",test3\n"
s = norm + enc + norm
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
test = np.genfromtxt(TextIO(s),
dtype=None, comments=None, delimiter=',')
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
[b'norm1', b'norm2', b'norm3'],
[b'test1', b'testNonethe' + utf8, b'test3'],
[b'norm1', b'norm2', b'norm3']])
assert_array_equal(test, ctl)
def test_utf8_file(self):
utf8 = b"\xcf\x96"
with temppath() as path:
with open(path, "wb") as f:
f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
dtype=np.unicode_)
assert_array_equal(test, ctl)
# test a mixed dtype
with open(path, "wb") as f:
f.write(b"0,testNonethe" + utf8)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',', encoding="UTF-8")
assert_equal(test['f0'], 0)
assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
def test_utf8_file_nodtype_unicode(self):
# bytes encoding with non-latin1 -> unicode upcast
utf8 = u'\u03d6'
latin1 = u'\xf6\xfc\xf6'
# skip test if cannot encode utf8 test string with preferred
# encoding. The preferred encoding is assumed to be the default
# encoding of io.open. Will need to change this for PyTest, maybe
# using pytest.mark.xfail(raises=***).
try:
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
'unable to encode utf8 in preferred encoding')
with temppath() as path:
with io.open(path, "wt") as f:
f.write(u"norm1,norm2,norm3\n")
f.write(u"norm1," + latin1 + u",norm3\n")
f.write(u"test1,testNonethe" + utf8 + u",test3\n")
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '',
np.VisibleDeprecationWarning)
test = np.genfromtxt(path, dtype=None, comments=None,
delimiter=',')
# Check for warning when encoding not specified.
assert_(w[0].category is np.VisibleDeprecationWarning)
ctl = np.array([
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
dtype=np.unicode_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', int), ('b', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', int), ('b', float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#gh-10394
data = TextIO('color\n"red"\n"blue"')
test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')})
control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))])
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def test_max_rows(self):
# Test the `max_rows` keyword argument.
data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
txt = TextIO(data)
a1 = np.genfromtxt(txt, max_rows=3)
a2 = np.genfromtxt(txt)
assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
assert_equal(a2, [[7, 8], [9, 10]])
# max_rows must be at least 1.
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
# An input with several invalid rows.
data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
test = np.genfromtxt(TextIO(data), max_rows=2)
control = np.array([[1., 1.], [2., 2.]])
assert_equal(test, control)
# Test keywords conflict
assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
max_rows=4)
# Test with invalid value
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
# Test with invalid not raise
with suppress_warnings() as sup:
sup.filter(ConversionWarning)
test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
# Structured array with field names.
data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
# Test with header, names and comments
txt = TextIO(data)
test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
# To continue reading the same "file", don't use skip_header or
# names, and use the previously determined dtype.
test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
control = np.array([(4.0, 4.0), (5.0, 5.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file
# object
tgt = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
with temppath() as name:
with open(name, 'w') as f:
f.write(data)
res = np.genfromtxt(name)
assert_array_equal(res, tgt)
def test_gft_from_gzip(self):
# Test that we can load data from a gzipped file
wanted = np.arange(6).reshape((2, 3))
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
s = BytesIO()
with gzip.GzipFile(fileobj=s, mode='w') as g:
g.write(asbytes(data))
with temppath(suffix='.gz2') as name:
with open(name, 'w') as f:
f.write(data)
assert_array_equal(np.genfromtxt(name), wanted)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_auto_dtype_largeint(self):
# Regression test for numpy/numpy#5635 whereby large integers could
# cause OverflowErrors.
# Test the automatic definition of the output dtype
#
# 2**66 = 73786976294838206464 => should convert to float
# 2**34 = 17179869184 => should convert to int64
# 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
# int64 on 64-bit systems)
data = TextIO('73786976294838206464 17179869184 1024')
test = np.genfromtxt(data, dtype=None)
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.int_)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
def test_unpack_structured(self):
# Regression test for gh-4341
# Unpacking should work on structured arrays
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')}
a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True)
assert_equal(a.dtype, np.dtype('S1'))
assert_equal(b.dtype, np.dtype('i4'))
assert_equal(c.dtype, np.dtype('f4'))
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_unpack_auto_dtype(self):
# Regression test for gh-4341
# Unpacking should work when dtype=None
txt = TextIO("M 21 72.\nF 35 58.")
expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.]))
test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8")
for arr, result in zip(expected, test):
assert_array_equal(arr, result)
assert_equal(arr.dtype, result.dtype)
def test_unpack_single_name(self):
# Regression test for gh-4341
# Unpacking should work when structured dtype has only one field
txt = TextIO("21\n35")
dt = {'names': ('a',), 'formats': ('i4',)}
expected = np.array([21, 35], dtype=np.int32)
test = np.genfromtxt(txt, dtype=dt, unpack=True)
assert_array_equal(expected, test)
assert_equal(expected.dtype, test.dtype)
def test_squeeze_scalar(self):
# Regression test for gh-4341
# Unpacking a scalar should give zero-dim output,
# even if dtype is structured
txt = TextIO("1")
dt = {'names': ('a',), 'formats': ('i4',)}
expected = np.array((1,), dtype=np.int32)
test = np.genfromtxt(txt, dtype=dt, unpack=True)
assert_array_equal(expected, test)
assert_equal((), test.shape)
assert_equal(expected.dtype, test.dtype)
class TestPathUsage:
# Test that pathlib.Path can be used
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([[1.1, 2], [3, 4]])
np.savetxt(path, a)
x = np.loadtxt(path)
assert_array_equal(x, a)
def test_save_load(self):
# Test that pathlib.Path instances can be used with save.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path)
assert_array_equal(data, a)
def test_save_load_memmap(self):
# Test that pathlib.Path instances can be loaded mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
data = np.load(path, mmap_mode='r')
assert_array_equal(data, a)
# close the mem-mapped file
del data
if IS_PYPY:
break_cycles()
break_cycles()
def test_save_load_memmap_readwrite(self):
# Test that pathlib.Path instances can be written mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
b = np.load(path, mmap_mode='r+')
a[0][0] = 5
b[0][0] = 5
del b # closes the file
if IS_PYPY:
break_cycles()
break_cycles()
data = np.load(path)
assert_array_equal(data, a)
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
path = Path(path)
np.savez_compressed(path, lab='place holder')
data = np.load(path)
assert_array_equal(data['lab'], 'place holder')
data.close()
def test_genfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
a = np.array([(1, 2), (3, 4)])
np.savetxt(path, a)
data = np.genfromtxt(path)
assert_array_equal(a, data)
def test_recfromtxt(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(path, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_recfromcsv(self):
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
f.write(u'A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(path, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
# These next two classes encode the minimal API needed to save()/load() arrays.
# The `test_ducktyping` ensures they work correctly
class JustWriter:
def __init__(self, base):
self.base = base
def write(self, s):
return self.base.write(s)
def flush(self):
return self.base.flush()
class JustReader:
def __init__(self, base):
self.base = base
def read(self, n):
return self.base.read(n)
def seek(self, off, whence=0):
return self.base.seek(off, whence)
def test_ducktyping():
a = np.random.random((5, 5))
s = BytesIO()
f = JustWriter(s)
np.save(f, a)
f.flush()
s.seek(0)
f = JustReader(s)
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
with temppath(suffix='.gz') as name:
with open(name, 'wb') as f:
f.write(s.read())
res = np.loadtxt(name)
s.close()
assert_array_equal(res, [1, 2, 3])
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
with assert_no_gc_cycles():
np.load(f)
f.seek(0)
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
with assert_no_gc_cycles():
x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
|
train_script.py
|
"""
Train script for a single file
Need to set the TPU address first:
export XRT_TPU_CONFIG="localservice;0;localhost:51011"
"""
import torch.multiprocessing as mp
import threading
import time
import random
import sys
import argparse
import gzip
import json
import logging
import tqdm
import torch
from torch import nn
from torch.utils.data import DataLoader
import torch
import torch_xla
import torch_xla.core
import torch_xla.core.functions
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
import torch_xla.distributed.parallel_loader as pl
import os
from shutil import copyfile
from transformers import (
AdamW,
AutoModel,
AutoTokenizer,
get_linear_schedule_with_warmup,
set_seed,
)
class AutoModelForSentenceEmbedding(nn.Module):
def __init__(self, model_name, tokenizer, normalize=True):
super(AutoModelForSentenceEmbedding, self).__init__()
self.model = AutoModel.from_pretrained(model_name)
self.normalize = normalize
self.tokenizer = tokenizer
def forward(self, **kwargs):
model_output = self.model(**kwargs)
embeddings = self.mean_pooling(model_output, kwargs['attention_mask'])
if self.normalize:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
return embeddings
def mean_pooling(self, model_output, attention_mask):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def save_pretrained(self, output_path):
if xm.is_master_ordinal():
self.tokenizer.save_pretrained(output_path)
self.model.config.save_pretrained(output_path)
xm.save(self.model.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def train_function(index, args, queue):
tokenizer = AutoTokenizer.from_pretrained(args.model)
model = AutoModelForSentenceEmbedding(args.model, tokenizer)
### Train Loop
device = xm.xla_device()
model = model.to(device)
# Instantiate optimizer
optimizer = AdamW(params=model.parameters(), lr=2e-5, correct_bias=True)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=500,
num_training_steps=args.steps,
)
# Now we train the model
cross_entropy_loss = nn.CrossEntropyLoss()
max_grad_norm = 1
model.train()
for global_step in tqdm.trange(args.steps, disable=not xm.is_master_ordinal()):
#### Get the batch data
batch = queue.get()
#print(index, "batch {}x{}".format(len(batch), ",".join([str(len(b)) for b in batch])))
if len(batch[0]) == 2: #(anchor, positive)
text1 = tokenizer([b[0] for b in batch], return_tensors="pt", max_length=args.max_length, truncation=True, padding="max_length")
text2 = tokenizer([b[1] for b in batch], return_tensors="pt", max_length=args.max_length, truncation=True, padding="max_length")
### Compute embeddings
embeddings_a = model(**text1.to(device))
embeddings_b = model(**text2.to(device))
### Gather all embedings
embeddings_a = torch_xla.core.functions.all_gather(embeddings_a)
embeddings_b = torch_xla.core.functions.all_gather(embeddings_b)
### Compute similarity scores 512 x 512
scores = torch.mm(embeddings_a, embeddings_b.transpose(0, 1)) * args.scale
### Compute cross-entropy loss
labels = torch.tensor(range(len(scores)), dtype=torch.long, device=embeddings_a.device) # Example a[i] should match with b[i]
## Symmetric loss as in CLIP
loss = (cross_entropy_loss(scores, labels) + cross_entropy_loss(scores.transpose(0, 1), labels)) / 2
else: #(anchor, positive, negative)
text1 = tokenizer([b[0] for b in batch], return_tensors="pt", max_length=args.max_length, truncation=True, padding="max_length")
text2 = tokenizer([b[1] for b in batch], return_tensors="pt", max_length=args.max_length, truncation=True, padding="max_length")
text3 = tokenizer([b[2] for b in batch], return_tensors="pt", max_length=args.max_length, truncation=True, padding="max_length")
embeddings_a = model(**text1.to(device))
embeddings_b1 = model(**text2.to(device))
embeddings_b2 = model(**text3.to(device))
embeddings_a = torch_xla.core.functions.all_gather(embeddings_a)
embeddings_b1 = torch_xla.core.functions.all_gather(embeddings_b1)
embeddings_b2 = torch_xla.core.functions.all_gather(embeddings_b2)
embeddings_b = torch.cat([embeddings_b1, embeddings_b2])
### Compute similarity scores 512 x 1024
scores = torch.mm(embeddings_a, embeddings_b.transpose(0, 1)) * args.scale
### Compute cross-entropy loss
labels = torch.tensor(range(len(scores)), dtype=torch.long, device=embeddings_a.device) # Example a[i] should match with b[i]
## One-way loss
loss = cross_entropy_loss(scores, labels)
# Backward pass
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
xm.optimizer_step(optimizer, barrier=True)
lr_scheduler.step()
#Save model
if (global_step+1) % args.save_steps == 0:
output_path = os.path.join(args.output, str(global_step+1))
xm.master_print("save model: "+output_path)
model.save_pretrained(output_path)
output_path = os.path.join(args.output, "final")
xm.master_print("save model final: "+ output_path)
model.save_pretrained(output_path)
def produce_data(args, queue, filepaths, dataset_indices):
global_batch_size = args.batch_size*args.nprocs #Global batch size
size_per_dataset = int(global_batch_size / args.datasets_per_batch) #How many datasets per batch
num_same_dataset = int(size_per_dataset / args.batch_size)
print("producer", "global_batch_size", global_batch_size)
print("producer", "size_per_dataset", size_per_dataset)
print("producer", "num_same_dataset", num_same_dataset)
datasets = []
for filepath in filepaths:
if "reddit_" in filepath: #Special dataset class for Reddit files
data_obj = RedditDataset(filepath)
else:
data_obj = Dataset(filepath)
datasets.append(iter(data_obj))
# Store if dataset is in a 2 col or 3 col format
num_cols = {idx: len(next(dataset)) for idx, dataset in enumerate(datasets)}
while True:
texts_in_batch = set()
batch_format = None #2 vs 3 col format for this batch
#Add data from several sub datasets
for _ in range(args.datasets_per_batch):
valid_dataset = False #Check that datasets have the same 2/3 col format
while not valid_dataset:
data_idx = random.choice(dataset_indices)
if batch_format is None:
batch_format = num_cols[data_idx]
valid_dataset = True
else: #Check that this dataset has the same format
valid_dataset = (batch_format == num_cols[data_idx])
#Get data from this dataset
dataset = datasets[data_idx]
for _ in range(num_same_dataset):
for _ in range(args.nprocs):
batch_device = [] #A batch for one device
while len(batch_device) < args.batch_size:
sample = next(dataset)
in_batch = False
for text in sample:
if text in texts_in_batch:
in_batch = True
break
if not in_batch:
for text in sample:
texts_in_batch.add(text)
batch_device.append(sample)
queue.put(batch_device)
class RedditDataset:
"""
A class that handles the reddit data files
"""
def __init__(self, filepath):
self.filepath = filepath
def __iter__(self):
while True:
with gzip.open(self.filepath, "rt") as fIn:
for line in fIn:
data = json.loads(line)
if "response" in data and "context" in data:
yield [data["response"], data["context"]]
class Dataset:
"""
A class that handles one dataset
"""
def __init__(self, filepath):
self.filepath = filepath
def __iter__(self):
max_dataset_size = 10*1000*1000 #Cache small datasets in memory
dataset = []
data_format = None
while dataset is None or len(dataset) == 0:
with gzip.open(self.filepath, "rt") as fIn:
for line in fIn:
data = json.loads(line)
if isinstance(data, dict):
data = data['texts']
if data_format is None:
data_format = len(data)
#Ensure that all entries are of the same 2/3 col format
assert len(data) == data_format
if dataset is not None:
dataset.append(data)
if len(dataset) >= max_dataset_size:
dataset = None
yield data
# Data loaded. Now stream to the queue
# Shuffle for each epoch
while True:
random.shuffle(dataset)
for data in dataset:
yield data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='nreimers/MiniLM-L6-H384-uncased')
parser.add_argument('--steps', type=int, default=2000)
parser.add_argument('--save_steps', type=int, default=10000)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--max_length', type=int, default=128)
parser.add_argument('--nprocs', type=int, default=8)
parser.add_argument('--datasets_per_batch', type=int, default=2, help="Number of datasets per batch")
parser.add_argument('--scale', type=float, default=20, help="Use 20 for cossim, and 1 when you work with unnormalized embeddings with dot product")
parser.add_argument('--data_folder', default="/data", help="Folder with your dataset files")
parser.add_argument('data_config', help="A data_config.json file")
parser.add_argument('output')
args = parser.parse_args()
# Ensure global batch size is divisble by data_sample_size
assert (args.batch_size*args.nprocs) % args.datasets_per_batch == 0
logging.info("Output: "+args.output)
if os.path.exists(args.output):
print("Output folder already exists.")
input("Continue?")
# Write train script to output path
os.makedirs(args.output, exist_ok=True)
data_config_path = os.path.join(args.output, 'data_config.json')
copyfile(args.data_config, data_config_path)
train_script_path = os.path.join(args.output, 'train_script.py')
copyfile(__file__, train_script_path)
with open(train_script_path, 'a') as fOut:
fOut.write("\n\n# Script was called via:\n#python " + " ".join(sys.argv))
#Load data config
with open(args.data_config) as fIn:
data_config = json.load(fIn)
queue = mp.Queue(maxsize=100*args.nprocs)
filepaths = []
dataset_indices = []
for idx, data in enumerate(data_config):
filepaths.append(os.path.join(os.path.expanduser(args.data_folder), data['name']))
dataset_indices.extend([idx]*data['weight'])
# Start producer
p = mp.Process(target=produce_data, args=(args, queue, filepaths, dataset_indices))
p.start()
# Run training
print("Start processes:", args.nprocs)
xmp.spawn(train_function, args=(args, queue), nprocs=args.nprocs, start_method='fork')
print("Training done")
print("It might be that not all processes exit automatically. In that case you must manually kill this process.")
print("With 'pkill python' you can kill all remaining python processes")
p.kill()
exit()
# Script was called via:
#python train_many_data_files_v2.py --steps 1000000 --batch_size 128 --model nreimers/MiniLM-L6-H384-uncased train_data_configs/all_datasets_v4.json output/all_datasets_v4_MiniLM-L6-H384-uncased-batch128
|
__init__.py
|
import contextlib
import copy
import hashlib
import itertools
import json
import tqdm
import torchvision
import pprint
import os
import pickle
import shlex
import subprocess
import threading
import time
import numpy as np
import pylab as plt
import torch
import pandas as pd
from .. import haven_img as hi
from .image_utils import *
from .file_utils import *
from .string_utils import *
from .exp_utils import *
from datetime import datetime
from PIL import Image
def get_function_from_file():
pass
def create_command(base_command, args):
"""
args is the parser
"""
run_command = base_command
arg_keys = vars(args).keys()
assert "exp_group_list" in arg_keys
assert "exp_id" in arg_keys
assert "run_jobs" in arg_keys
for a, v in vars(args).items():
if a == "exp_group_list" or a == "exp_id" or a == "run_jobs" or a == "reset":
print("argument: %s ignored..." % a)
continue
run_command += " --%s %s" % (a, v)
print("command: %s" % run_command)
return run_command
def mask_on_image(mask, image):
from skimage.color import label2rgb
from skimage.color import color_dict, colorlabel
from skimage.segmentation import mark_boundaries
default_colors = [
"red",
"blue",
"yellow",
"magenta",
"green",
"indigo",
"darkorange",
"cyan",
"pink",
"yellowgreen",
]
mask = mask.squeeze().astype(int)
image = hi.image_as_uint8(image) / 255.0
labels = [l for l in np.unique(mask) if l < len(color_dict)]
colors = default_colors + list(color_dict.keys())[len(default_colors) :]
colors = np.array(colors)[labels]
image_label_overlay = label2rgb(
mask, image=f2l(image).squeeze().clip(0, 1), colors=colors, bg_label=0, bg_color=None, kind="overlay"
)
return mark_boundaries(image_label_overlay, mask)
def get_image(img, denorm=None, size=None, points=None, radius=10, mask=None, heatmap=None):
return save_image(None, img, denorm, size, points, radius, mask, heatmap, return_image=True)
def save_image(
fname,
img,
denorm=None,
size=None,
points=None,
radius=10,
mask=None,
heatmap=None,
makedirs=True,
return_image=False,
nrow=8,
):
"""Save an image into a file.
Parameters
----------
fname : str
Name of the file
img : [type]
Image data. #TODO We asume it is.....?????? \in [0, 1]? Numpy? PIL? RGB?
makedirs : bool, optional
If enabled creates the folder for saving the file, by default True
"""
if not isinstance(img, torch.Tensor):
if img.min() >= 0 and img.max() > 1:
img = img / 255.0
img = torch.as_tensor(img)
if img.ndim == 4:
img = torchvision.utils.make_grid(img, nrow=nrow)
if denorm:
img = denormalize(img, mode=denorm)
if points is not None:
if isinstance(img, np.ndarray):
img = torch.FloatTensor(img)
img = img.squeeze()
if img.ndim == 2:
img = img[None].repeat(3, 1, 1)
y_list, x_list = np.where(points.squeeze())
c_list = []
for y, x in zip(y_list, x_list):
c_list += [points.squeeze()[y, x]]
img = hi.points_on_image(y_list, x_list, img, radius=radius, c_list=c_list)
if mask is not None:
img = mask_on_image(mask, img)
if img.dtype == "uint8":
img = Image.fromarray(img)
else:
arr = f2l(t2n(img)).squeeze()
# print(arr.shape)
if size is not None:
arr = Image.fromarray(arr)
arr = arr.resize(size)
arr = np.array(arr)
img = Image.fromarray(np.uint8(arr * 255))
if return_image:
return img
if fname is not None:
dirname = os.path.dirname(fname)
if makedirs and dirname != "":
os.makedirs(dirname, exist_ok=True)
img.save(fname)
def load_txt(fname):
"""Load the content of a txt file.
Parameters
----------
fname : str
File name
Returns
-------
list
Content of the file. List containing the lines of the file
"""
with open(fname, "r", encoding="utf-8") as f:
lines = f.readlines()
return lines
def save_txt(fname, lines):
"""Load the content of a txt file.
Parameters
----------
fname : str
File name
Returns
-------
list
Content of the file. List containing the lines of the file
"""
with open(fname, "w", encoding="utf-8") as f:
for l in lines:
f.writelines(l)
def torch_load(fname, map_location=None):
"""Load the content of a torch file.
Parameters
----------
fname : str
File name
map_location : [type], optional
Maping the loaded model to a specific device (i.e., CPU or GPU), this
is needed if trained in CPU and loaded in GPU and viceversa, by default
None
Returns
-------
[type]
Loaded torch model
"""
obj = torch.load(fname, map_location=map_location)
return obj
def torch_save(fname, obj):
"""Save data in torch format.
Parameters
----------
fname : str
File name
obj : [type]
Data to save
"""
# Create folder
os.makedirs(os.path.dirname(fname), exist_ok=True) # TODO: add makedirs parameter?
# Define names of temporal files
fname_tmp = fname + ".tmp" # TODO: Make the safe flag?
torch.save(obj, fname_tmp)
if os.path.exists(fname):
os.remove(fname)
os.rename(fname_tmp, fname)
class Parallel:
"""Class for run a function in parallel."""
def __init__(self):
"""Constructor"""
self.threadList = []
self.count = 0
self.thread_logs = []
def add(self, func, *args):
"""Add a function to run as a process.
Parameters
----------
func : function
Pointer to the function to parallelize
args : list
Arguments of the funtion to parallelize
"""
self.threadList += [threading.Thread(target=func, name="thread-%d" % self.count, args=args)]
self.count += 1
def run(self):
"""Run the added functions in parallel"""
for thread in tqdm.tqdm(self.threadList, desc="Starting threads", leave=False):
thread.daemon = True
thread.start()
def close(self):
"""Finish: wait for all the functions to finish"""
for thread in tqdm.tqdm(self.threadList, desc="Joining threads", leave=False):
thread.join()
def pretty_print_df(df):
# wrap text for prettiness
for c in df.columns:
if df[c].dtype == "O":
# df[c] = df[c].str.wrap(wrap_size)
df[c] = df[c].apply(pprint.pformat)
return df
def flatten_column(result_dict):
new_dict = {}
for k, v in result_dict.items():
new_dict.update(flatten_dict(k, v))
result_dict = new_dict
return result_dict
def sort_df_columns(table, also_first=[]):
first = ["exp_id", "job_state", "job_id", "restarts", "started_at"]
first += also_first
col_list = []
for col in first:
if col in table.columns:
col_list += [col]
for col in table.columns:
if col in first:
continue
col_list += [col]
return table[col_list]
def subprocess_call(cmd_string):
"""Run a terminal process.
Parameters
----------
cmd_string : str
Command to execute in the terminal
Returns
-------
[type]
Error code or 0 if no error happened
"""
return subprocess.check_output(shlex.split(cmd_string), shell=False, stderr=subprocess.STDOUT).decode("utf-8")
def copy_code(src_path, dst_path, verbose=1):
"""Copy the code.
Typically, when you run an experiment, first you copy the code used to the
experiment folder. This function copies the code using rsync terminal
command.
Parameters
----------
src_path : str
Source code directory
dst_path : str
Destination code directory
verbose : int, optional
Verbosity level. If 0 does not print stuff, by default 1
Raises
------
ValueError
[description]
"""
time.sleep(0.5) # TODO: Why? Why?
if verbose:
print(" > Copying code from %s to %s" % (src_path, dst_path))
# Create destination folder
os.makedirs(dst_path, exist_ok=True)
# Define the command for copying the code using rsync
if os.path.exists(os.path.join(src_path, ".havenignore")):
rsync_code = (
"rsync -av -r -q --delete-before --exclude='.*' --exclude-from=%s \
--exclude '__pycache__/' %s %s"
% (os.path.join(src_path, ".havenignore"), src_path, dst_path)
)
else:
rsync_code = (
"rsync -av -r -q --delete-before --exclude='.*' \
--exclude '__pycache__/' %s %s"
% (src_path, dst_path)
)
# Run the command in the terminal
try:
subprocess_call(rsync_code)
except subprocess.CalledProcessError as e:
raise ValueError("Ping stdout output:\n", e.output)
time.sleep(0.5) # TODO: Why?
def zipdir(src_dirname, out_fname, include_list=None):
"""Compress a folder using ZIP.
Parameters
----------
src_dirname : str
Directory to compress
out_fname : str
File name of the compressed file
include_list : list, optional
List of files to include. If None, include all files in the folder, by
default None
"""
import zipfile # TODO: Move to the beggining of the file
# TODO: Do we need makedirs?
# Create the zip file
zipf = zipfile.ZipFile(out_fname, "w", zipfile.ZIP_DEFLATED)
# ziph is zipfile handle
for root, dirs, files in os.walk(src_dirname):
for file in files:
# Descard files if needed
if include_list is not None and file not in include_list:
continue
abs_path = os.path.join(root, file)
rel_path = fname_parent(abs_path) # TODO: fname_parent not defined
print(rel_path)
zipf.write(abs_path, rel_path)
zipf.close()
def zip_score_list(exp_list, savedir_base, out_fname, include_list=None):
"""Compress a list of experiments in zip.
Parameters
----------
exp_list : list
List of experiments to zip
savedir_base : str
Directory where the experiments from the list are saved
out_fname : str
File name for the zip file
include_list : list, optional
List of files to include. If None, include all files in the folder, by
default None
"""
for exp_dict in exp_list: # TODO: This will zip only the last experiments, zipdir will overwritwe the previous file
# Get the experiment id
exp_id = hash_dict(exp_dict)
# Zip folder
zipdir(os.path.join(savedir_base, exp_id), out_fname, include_list=include_list)
def time_to_montreal(fname=None, timezone="US/Eastern"):
"""Get time in Montreal zone.
Returns
-------
str
Current date at the selected timezone in string format
"""
# Get time
os.environ["TZ"] = timezone
try:
time.tzset()
except:
pass
if fname:
tstamp = os.path.getctime(fname)
else:
tstamp = time.time()
time_str = datetime.fromtimestamp(tstamp).strftime("%I:%M %p (%b %d)")
return time_str
def time2mins(time_taken):
"""Convert time into minutes.
Parameters
----------
time_taken : float
Time in seconds
Returns
-------
float
Minutes
"""
return time_taken / 60.0
def n2t(x, dtype="float"): # TODO: dtype is not used!!
"""Array or Numpy array to Pytorch tensor.
Parameters
----------
x : array or Numpy array
Data to transform
dtype : [type]
[description]
Returns
-------
Pytorch tensor
x converted to pytorch tensor format
"""
if isinstance(x, (int, np.int64, float)):
x = np.array([x])
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
return x
def t2n(x):
"""Pytorch tensor to Numpy array.
Parameters
----------
x : Pytorch tensor
A Pytorch tensor to transform
Returns
-------
Numpy array
x transformed to numpy array
"""
try:
x = x.detach().cpu().numpy()
except Exception:
x = x
return x
def l2f(X):
"""Move the channels from the last dimension to the first dimension.
Parameters
----------
X : Numpy array
Tensor with the channel dimension at the last dimension
Returns
-------
Numpy array
X transformed with the channel dimension at the first dimension
"""
if X.ndim == 3 and (X.shape[0] == 3 or X.shape[0] == 1):
return X
if X.ndim == 4 and (X.shape[1] == 3 or X.shape[1] == 1):
return X
if X.ndim == 4 and (X.shape[1] < X.shape[3]):
return X
# Move the channel dimension from the last position to the first one
if X.ndim == 3:
return np.transpose(X, (2, 0, 1))
if X.ndim == 4:
return np.transpose(X, (0, 3, 1, 2))
return X
def f2l(X):
"""Move the channels from the first dimension to the last dimension.
` Parameters
----------
X : Numpy array
Tensor with the channel dimension at the first dimension
Returns
-------
Numpy array
X transformed with the channel dimension at the last dimension
"""
if X.ndim == 3 and (X.shape[2] == 3 or X.shape[2] == 1):
return X
if X.ndim == 4 and (X.shape[3] == 3 or X.shape[3] == 1):
return X
# Move the channel dimension from the first position to the last one
if X.ndim == 3:
return np.transpose(X, (1, 2, 0))
if X.ndim == 4:
return np.transpose(X, (0, 2, 3, 1))
return X
def n2p(image): # TODO: Create p2n function and use it in get_image()
"""Numpy image to PIL image.
Parameters
----------
image : Numpy array
Input image in numpy format
Returns
-------
PIL image
Input image converted into PIL format
"""
image = f2l(image.squeeze())
if image.max() <= 1:
image = image * 255
return Image.fromarray(image.astype("uint8"))
def _denorm(image, mu, var, bgr2rgb=False):
"""Denormalize an image.
Parameters
----------
image : [type]
Image to denormalize
mu : [type]
Mean used to normalize the image
var : [type]
Variance used to normalize the image
bgr2rgb : bool, optional
Whether to also convert from bgr 2 rgb, by default False
Returns
-------
[type]
Denormalized image
"""
if image.ndim == 3:
result = image * var[:, None, None] + mu[:, None, None] # TODO: Is it variance or std?
if bgr2rgb:
result = result[::-1]
else:
result = image * var[None, :, None, None] + mu[None, :, None, None]
if bgr2rgb:
result = result[:, ::-1]
return result
def denormalize(img, mode=0): # TODO: Remove the default value or set to a valid number, complete documentation
"""Denormalize an image.
Parameters
----------
img : [type]
Input image to denormalize
mode : int or str, optional
Predefined denormalizations, by default 0
If 1 or 'rgb'...
If 2 or 'brg'...,
If 3 or 'basic'...
Else do nothing
Returns
-------
[type]
Denormalized image
"""
# _img = t2n(img)
# _img = _img.copy()
image = t2n(img).copy().astype("float")
if mode in [1, "rgb"]:
mu = np.array([0.485, 0.456, 0.406])
var = np.array([0.229, 0.224, 0.225])
image = _denorm(image, mu, var)
elif mode in [2, "bgr"]:
mu = np.array([102.9801, 115.9465, 122.7717])
var = np.array([1, 1, 1])
image = _denorm(image, mu, var, bgr2rgb=True).clip(0, 255).round()
elif mode in [3, "basic"]:
mu = np.array([0.5, 0.5, 0.5])
var = np.array([0.5, 0.5, 0.5])
image = _denorm(image, mu, var)
# TODO: Add a case for 0 or None and else raise an error exception.
return image
# def get_image(imgs, mask=None, label=False, enlarge=0, gray=False, denorm=0,
# bbox_yxyx=None, annList=None, pretty=False, pointList=None,
# **options): # TODO: Issam, can you document this?
# """[summary]
# Parameters
# ----------
# imgs : [type]
# [description]
# mask : [type], optional
# [description], by default None
# label : bool, optional
# [description], by default False
# enlarge : int, optional
# [description], by default 0
# gray : bool, optional
# [description], by default False
# denorm : int, optional
# [description], by default 0
# bbox_yxyx : [type], optional
# [description], by default None
# annList : [type], optional
# [description], by default None
# pretty : bool, optional
# [description], by default False
# pointList : [type], optional
# [description], by default None
# Returns
# -------
# [type]
# [description]
# """
# # TODO: Comment these transformations and make sure they are correct. Difficult to follow.
# imgs = denormalize(imgs, mode=denorm)
# if isinstance(imgs, Image.Image):
# imgs = np.array(imgs)
# if isinstance(mask, Image.Image):
# mask = np.array(mask)
# imgs = t2n(imgs).copy()
# imgs = l2f(imgs)
# if pointList is not None and len(pointList):
# h, w = pointList[0]["h"], pointList[0]["w"]
# mask_points = np.zeros((h, w))
# for p in pointList:
# y, x = p["y"], p["x"]
# mask_points[int(h*y), int(w*x)] = 1
# imgs = maskOnImage(imgs, mask_points, enlarge=1)
# if pretty or annList is not None:
# imgs = pretty_vis(imgs, annList, **options)
# imgs = l2f(imgs)
# if mask is not None and mask.sum() != 0:
# imgs = maskOnImage(imgs, mask, enlarge)
# if bbox_yxyx is not None:
# _, _, h, w = imgs.shape
# mask = bbox_yxyx_2_mask(bbox_yxyx, h, w)
# imgs = maskOnImage(imgs, mask, enlarge=1)
# # LABEL
# elif (not gray) and (label or imgs.ndim == 2 or
# (imgs.ndim == 3 and imgs.shape[0] != 3) or
# (imgs.ndim == 4 and imgs.shape[1] != 3)):
# imgs = label2Image(imgs)
# if enlarge:
# imgs = zoom(imgs, 11)
# # Make sure it is 4-dimensional
# if imgs.ndim == 3:
# imgs = imgs[np.newaxis]
# return imgs
def show_image(fname): # TODO: Why the input is a filename instead of an image?
"""Load and image from hard disk and plot it.
Parameters
----------
fname : str
Name of an image to load and show
"""
ncols = 1 # TODO: Magic numbers
nrows = 1
height = 12
width = 12
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(ncols * width, nrows * height))
if not hasattr(axs, "size"): # TODO: What is this?
axs = [[axs]]
for i in range(ncols):
img = plt.imread(fname)
axs[0][i].imshow(img)
axs[0][i].set_axis_off()
axs[0][i].set_title("%s" % (fname))
plt.axis("off")
plt.tight_layout()
plt.show()
def shrink2roi(img, roi):
"""[summary]
Parameters
----------
img : [type]
[description]
roi : [type]
[description]
Returns
-------
[type]
[description]
"""
ind = np.where(roi != 0)
y_min = min(ind[0])
y_max = max(ind[0])
x_min = min(ind[1])
x_max = max(ind[1])
return img[y_min:y_max, x_min:x_max]
@contextlib.contextmanager
def random_seed(seed):
"""[summary]
Parameters
----------
seed : [type]
[description]
"""
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def is_subset(d1, d2, strict=False):
"""[summary]
Parameters
----------
d1 : [type]
[description]
d2 : [type]
[description]
Returns
-------
[type]
[description]
"""
flag = True
for k in d1:
v1, v2 = d1.get(k), d2.get(k)
# if both are values
if not isinstance(v2, dict) and not isinstance(v1, dict):
if v1 != v2:
flag = False
# if both are dicts
elif isinstance(v2, dict) and isinstance(v1, dict):
flag = is_subset(v1, v2)
# if d1 is dict and not d2
elif isinstance(v1, dict) and not isinstance(v2, dict):
flag = False
# if d1 is not and d2 is dict
elif not isinstance(v1, dict) and isinstance(v2, dict):
flag = False
if flag is False:
break
return flag
def as_double_list(v):
"""[summary]
Parameters
----------
v : [type]
[description]
Returns
-------
[type]
[description]
"""
if not isinstance(v, list) and not isinstance(v, np.ndarray):
v = [v]
if not isinstance(v[0], list) and not isinstance(v[0], np.ndarray):
v = [v]
return v
def ignore_duplicates(list_of_dicts):
# ensure no duplicates in exp_list
dict_list = []
hash_list = set()
for data_dict in list_of_dicts:
dict_id = hash_dict(data_dict)
if dict_id in hash_list:
continue
else:
hash_list.add(dict_id)
dict_list += [data_dict]
return dict_list
def filter_duplicates(list_of_dicts):
# ensure no duplicates in exp_list
tmp_list = []
hash_list = set()
for data_dict in list_of_dicts:
dict_id = hash_dict(data_dict)
if dict_id in hash_list:
continue
else:
hash_list.add(dict_id)
tmp_list += [data_dict]
return tmp_list
def check_duplicates(list_of_dicts):
# ensure no duplicates in exp_list
hash_list = set()
for data_dict in list_of_dicts:
dict_id = hash_dict(data_dict)
if dict_id in hash_list:
raise ValueError("duplicated dictionary detected:\n%s" % pprint.pformat(data_dict))
else:
hash_list.add(dict_id)
def load_py(fname):
"""[summary]
Parameters
----------
fname : [type]
[description]
Returns
-------
[type]
[description]
"""
import sys
from importlib import reload
from importlib import import_module
if not os.path.exists(fname):
raise ValueError("%s not found..." % fname)
sys.path.append(os.path.dirname(fname))
name = os.path.split(fname)[-1].replace(".py", "")
module = import_module(name)
reload(module)
sys.path.pop()
return module
def get_exp_list_from_ids(exp_id_list, savedir_base):
exp_list = []
for exp_id in exp_id_list:
exp_list += [load_json(os.path.join(savedir_base, exp_id, "exp_dict.json"))]
return exp_list
def flatten_dict(key_name, v_dict):
if not isinstance(v_dict, dict):
return {key_name: v_dict}
leaf_dict = {}
for k in v_dict:
if key_name != "":
k_new = key_name + "." + k
else:
k_new = k
leaf_dict.update(flatten_dict(key_name=k_new, v_dict=v_dict[k]))
return leaf_dict
def get_diff_hparam(exp_list):
df = pd.DataFrame([flatten_column(e) for e in exp_list])
return get_diff_columns(df, min_threshold=2, max_threshold="auto")
def get_diff_columns(df, min_threshold=2, max_threshold="auto"):
df.reset_index()
if max_threshold == "auto":
max_threshold = df.shape[0]
if max_threshold < 0:
max_threshold = df.shape[0] + max_threshold
column_count = []
for column in df.columns:
_set = set([str(v) for v in df[column].values])
column_count.append(len(_set))
indices = np.arange(len(df.columns))
column_count = np.array(column_count)
indices = indices[(column_count >= min_threshold) & (column_count <= max_threshold)]
diff_columns = [df.columns[i] for i in indices]
return diff_columns
def collate_fn(batch, mode="list"):
if mode == "list":
batch_dict = {}
for k in batch[0]:
batch_dict[k] = []
for i in range(len(batch)):
batch_dict[k] += [batch[i][k]]
return batch_dict
elif mode == "default":
return torch.utils.data.dataloader.default_collate(batch)
def timeit(func, n_times=10, **args):
for i in range(n_times):
if i == 1:
s = time.time()
func(**args)
print("time:", (time.time() - s) / (n_times - 1))
# Create Helper
def make_binary_linear(n, d, margin, separable=True, seed=42):
np.random.seed(seed)
labels = [-1, 1]
w = np.random.randn(d)
w /= np.linalg.norm(w)
p = np.random.randn(d - 1)
l = (-p @ w[: d - 1]) / w[-1]
p = np.append(p, [l])
v0 = p - margin * w
v1 = p + margin * w
yv = np.copy(labels)
# Start generating points with rejection sampling
X = []
y = []
for i in range(n - 2):
s = 1
label = np.random.choice(labels)
# Generate a random point with mean at the center
xi = np.random.randn(d)
xi = (xi / np.linalg.norm(xi)) * s
dist = xi @ w
while dist * label <= margin:
u = v0 - v1 if label == -1 else v1 - v0
u /= np.linalg.norm(u)
xi = xi + u
xi = (xi / np.linalg.norm(xi)) * s
dist = xi @ w
X.append(xi)
y.append(label)
X = np.array(X).astype(float)
y = np.array(y)
# shuffle
ind = np.random.permutation(n - 2)
X = X[ind]
y = y[ind]
# Put the support vectors at the beginning
X = np.r_[np.array([v0, v1]), X]
y = np.r_[np.array(yv), y]
if not separable:
flip_ind = np.random.choice(n, int(n * 0.01))
y[flip_ind] = -y[flip_ind]
y[y == -1] = 0
X = np.c_[np.ones(n), X]
return X, y
def get_split_torch_dataset(X, y, split):
from sklearn.model_selection import train_test_split
splits = train_test_split(X, y, test_size=0.2, shuffle=False, random_state=42)
X_train, X_test, Y_train, Y_test = splits
X_train, X_test = torch.FloatTensor(X_train), torch.FloatTensor(X_test)
Y_train, Y_test = torch.LongTensor(Y_train), torch.LongTensor(Y_test)
if split == "train":
dataset = torch.utils.data.TensorDataset(X_train, Y_train)
elif split == "val":
dataset = torch.utils.data.TensorDataset(X_test, Y_test)
dataset.n_input = X.shape[1]
return dataset
|
python_instance.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
"""python_instance.py: Python Instance for running python functions
"""
import base64
import os
import signal
import time
try:
import Queue as queue
except:
import queue
import threading
import sys
import re
import pulsar
import contextimpl
import Function_pb2
import log
import util
import InstanceCommunication_pb2
from functools import partial
from collections import namedtuple
from function_stats import Stats
Log = log.Log
# Equivalent of the InstanceConfig in Java
InstanceConfig = namedtuple('InstanceConfig', 'instance_id function_id function_version function_details max_buffered_tuples')
# This is the message that the consumers put on the queue for the function thread to process
InternalMessage = namedtuple('InternalMessage', 'message topic serde consumer')
InternalQuitMessage = namedtuple('InternalQuitMessage', 'quit')
DEFAULT_SERIALIZER = "serde.IdentitySerDe"
PY3 = sys.version_info[0] >= 3
def base64ify(bytes_or_str):
if PY3 and isinstance(bytes_or_str, str):
input_bytes = bytes_or_str.encode('utf8')
else:
input_bytes = bytes_or_str
output_bytes = base64.urlsafe_b64encode(input_bytes)
if PY3:
return output_bytes.decode('ascii')
else:
return output_bytes
class PythonInstance(object):
def __init__(self, instance_id, function_id, function_version, function_details, max_buffered_tuples,
expected_healthcheck_interval, user_code, pulsar_client, secrets_provider, cluster_name):
self.instance_config = InstanceConfig(instance_id, function_id, function_version, function_details, max_buffered_tuples)
self.user_code = user_code
self.queue = queue.Queue(max_buffered_tuples)
self.log_topic_handler = None
if function_details.logTopic is not None and function_details.logTopic != "":
self.log_topic_handler = log.LogTopicHandler(str(function_details.logTopic), pulsar_client)
self.pulsar_client = pulsar_client
self.input_serdes = {}
self.consumers = {}
self.output_serde = None
self.function_class = None
self.function_purefunction = None
self.producer = None
self.execution_thread = None
self.atmost_once = self.instance_config.function_details.processingGuarantees == Function_pb2.ProcessingGuarantees.Value('ATMOST_ONCE')
self.atleast_once = self.instance_config.function_details.processingGuarantees == Function_pb2.ProcessingGuarantees.Value('ATLEAST_ONCE')
self.auto_ack = self.instance_config.function_details.autoAck
self.contextimpl = None
self.last_health_check_ts = time.time()
self.timeout_ms = function_details.source.timeoutMs if function_details.source.timeoutMs > 0 else None
self.expected_healthcheck_interval = expected_healthcheck_interval
self.secrets_provider = secrets_provider
self.metrics_labels = [function_details.tenant,
"%s/%s" % (function_details.tenant, function_details.namespace),
function_details.name,
instance_id, cluster_name,
"%s/%s/%s" % (function_details.tenant, function_details.namespace, function_details.name)]
self.stats = Stats(self.metrics_labels)
def health_check(self):
self.last_health_check_ts = time.time()
health_check_result = InstanceCommunication_pb2.HealthCheckResult()
health_check_result.success = True
return health_check_result
def process_spawner_health_check_timer(self):
if time.time() - self.last_health_check_ts > self.expected_healthcheck_interval * 3:
Log.critical("Haven't received health check from spawner in a while. Stopping instance...")
os.kill(os.getpid(), signal.SIGKILL)
sys.exit(1)
def run(self):
# Setup consumers and input deserializers
mode = pulsar._pulsar.ConsumerType.Shared
if self.instance_config.function_details.source.subscriptionType == Function_pb2.SubscriptionType.Value("FAILOVER"):
mode = pulsar._pulsar.ConsumerType.Failover
subscription_name = str(self.instance_config.function_details.tenant) + "/" + \
str(self.instance_config.function_details.namespace) + "/" + \
str(self.instance_config.function_details.name)
properties = util.get_properties(util.getFullyQualifiedFunctionName(
self.instance_config.function_details.tenant,
self.instance_config.function_details.namespace,
self.instance_config.function_details.name),
self.instance_config.instance_id)
for topic, serde in self.instance_config.function_details.source.topicsToSerDeClassName.items():
if not serde:
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
else:
serde_kclass = util.import_class(os.path.dirname(self.user_code), serde)
self.input_serdes[topic] = serde_kclass()
Log.debug("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None,
properties=properties
)
for topic, consumer_conf in self.instance_config.function_details.source.inputSpecs.items():
if not consumer_conf.serdeClassName:
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
else:
serde_kclass = util.import_class(os.path.dirname(self.user_code), consumer_conf.serdeClassName)
self.input_serdes[topic] = serde_kclass()
Log.debug("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
if consumer_conf.isRegexPattern:
self.consumers[topic] = self.pulsar_client.subscribe(
re.compile(str(topic)), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None,
properties=properties
)
else:
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None,
properties=properties
)
function_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_details.className)
if function_kclass is None:
Log.critical("Could not import User Function Module %s" % self.instance_config.function_details.className)
raise NameError("Could not import User Function Module %s" % self.instance_config.function_details.className)
try:
self.function_class = function_kclass()
except:
self.function_purefunction = function_kclass
self.contextimpl = contextimpl.ContextImpl(self.instance_config, Log, self.pulsar_client,
self.user_code, self.consumers,
self.secrets_provider, self.metrics_labels)
# Now launch a thread that does execution
self.execution_thread = threading.Thread(target=self.actual_execution)
self.execution_thread.start()
# start proccess spawner health check timer
self.last_health_check_ts = time.time()
if self.expected_healthcheck_interval > 0:
timer = util.FixedTimer(self.expected_healthcheck_interval, self.process_spawner_health_check_timer, name="health-check-timer")
timer.start()
def actual_execution(self):
Log.debug("Started Thread for executing the function")
while True:
try:
msg = self.queue.get(True)
if isinstance(msg, InternalQuitMessage):
break
Log.debug("Got a message from topic %s" % msg.topic)
# deserialize message
input_object = msg.serde.deserialize(msg.message.data())
# set current message in context
self.contextimpl.set_current_message_context(msg.message, msg.topic)
output_object = None
self.saved_log_handler = None
if self.log_topic_handler is not None:
self.saved_log_handler = log.remove_all_handlers()
log.add_handler(self.log_topic_handler)
successfully_executed = False
try:
# get user function start time for statistic calculation
self.stats.set_last_invocation(time.time())
# start timer for process time
self.stats.process_time_start()
if self.function_class is not None:
output_object = self.function_class.process(input_object, self.contextimpl)
else:
output_object = self.function_purefunction.process(input_object)
successfully_executed = True
# stop timer for process time
self.stats.process_time_end()
except Exception as e:
Log.exception("Exception while executing user method")
self.stats.incr_total_user_exceptions(e)
if self.log_topic_handler is not None:
log.remove_all_handlers()
log.add_handler(self.saved_log_handler)
if successfully_executed:
self.process_result(output_object, msg)
self.stats.incr_total_processed_successfully()
except Exception as e:
Log.error("Uncaught exception in Python instance: %s" % e);
self.stats.incr_total_sys_exceptions(e)
def done_producing(self, consumer, orig_message, result, sent_message):
if result == pulsar.Result.Ok and self.auto_ack and self.atleast_once:
consumer.acknowledge(orig_message)
def process_result(self, output, msg):
if output is not None and self.instance_config.function_details.sink.topic != None and \
len(self.instance_config.function_details.sink.topic) > 0:
if self.output_serde is None:
self.setup_output_serde()
if self.producer is None:
self.setup_producer()
# serialize function output
output_bytes = self.output_serde.serialize(output)
if output_bytes is not None:
props = {"__pfn_input_topic__" : str(msg.topic), "__pfn_input_msg_id__" : base64ify(msg.message.message_id().serialize())}
self.producer.send_async(output_bytes, partial(self.done_producing, msg.consumer, msg.message), properties=props)
elif self.auto_ack and self.atleast_once:
msg.consumer.acknowledge(msg.message)
def setup_output_serde(self):
if self.instance_config.function_details.sink.serDeClassName != None and \
len(self.instance_config.function_details.sink.serDeClassName) > 0:
serde_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_details.sink.serDeClassName)
self.output_serde = serde_kclass()
else:
global DEFAULT_SERIALIZER
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
self.output_serde = serde_kclass()
def setup_producer(self):
if self.instance_config.function_details.sink.topic != None and \
len(self.instance_config.function_details.sink.topic) > 0:
Log.debug("Setting up producer for topic %s" % self.instance_config.function_details.sink.topic)
self.producer = self.pulsar_client.create_producer(
str(self.instance_config.function_details.sink.topic),
block_if_queue_full=True,
batching_enabled=True,
batching_max_publish_delay_ms=1,
# set send timeout to be infinity to prevent potential deadlock with consumer
# that might happen when consumer is blocked due to unacked messages
send_timeout_millis=0,
max_pending_messages=100000,
properties=util.get_properties(util.getFullyQualifiedFunctionName(
self.instance_config.function_details.tenant,
self.instance_config.function_details.namespace,
self.instance_config.function_details.name),
self.instance_config.instance_id)
)
def message_listener(self, serde, consumer, message):
# increment number of received records from source
self.stats.incr_total_received()
item = InternalMessage(message, message.topic_name(), serde, consumer)
self.queue.put(item, True)
if self.atmost_once and self.auto_ack:
consumer.acknowledge(message)
def get_and_reset_metrics(self):
# First get any user metrics
metrics = self.get_metrics()
self.reset_metrics()
return metrics
def reset_metrics(self):
self.stats.reset()
self.contextimpl.reset_metrics()
def get_metrics(self):
total_received = self.stats.get_total_received()
total_processed_successfully = self.stats.get_total_processed_successfully()
total_user_exceptions = self.stats.get_total_user_exceptions()
total_sys_exceptions = self.stats.get_total_sys_exceptions()
avg_process_latency_ms = self.stats.get_avg_process_latency()
last_invocation = self.stats.get_last_invocation()
total_received_1min = self.stats.get_total_received_1min()
total_processed_successfully_1min = self.stats.get_total_processed_successfully_1min()
total_user_exceptions_1min = self.stats.get_total_user_exceptions_1min()
total_sys_exceptions_1min = self.stats.get_total_sys_exceptions_1min()
avg_process_latency_ms_1min = self.stats.get_avg_process_latency_1min()
metrics_data = InstanceCommunication_pb2.MetricsData()
# total metrics
metrics_data.receivedTotal = int(total_received) if sys.version_info.major >= 3 else long(total_received)
metrics_data.processedSuccessfullyTotal = int(total_processed_successfully) if sys.version_info.major >= 3 else long(total_processed_successfully)
metrics_data.systemExceptionsTotal = int(total_sys_exceptions) if sys.version_info.major >= 3 else long(total_sys_exceptions)
metrics_data.userExceptionsTotal = int(total_user_exceptions) if sys.version_info.major >= 3 else long(total_user_exceptions)
metrics_data.avgProcessLatency = avg_process_latency_ms
metrics_data.lastInvocation = int(last_invocation) if sys.version_info.major >= 3 else long(last_invocation)
# 1min metrics
metrics_data.receivedTotal_1min = int(total_received_1min) if sys.version_info.major >= 3 else long(total_received_1min)
metrics_data.processedSuccessfullyTotal_1min = int(
total_processed_successfully_1min) if sys.version_info.major >= 3 else long(total_processed_successfully_1min)
metrics_data.systemExceptionsTotal_1min = int(total_sys_exceptions_1min) if sys.version_info.major >= 3 else long(
total_sys_exceptions_1min)
metrics_data.userExceptionsTotal_1min = int(total_user_exceptions_1min) if sys.version_info.major >= 3 else long(
total_user_exceptions_1min)
metrics_data.avgProcessLatency_1min = avg_process_latency_ms_1min
# get any user metrics
user_metrics = self.contextimpl.get_metrics()
for metric_name, value in user_metrics.items():
metrics_data.userMetrics[metric_name] = value
return metrics_data
def add_system_metrics(self, metric_name, value, metrics):
metrics.metrics[metric_name].count = value
metrics.metrics[metric_name].sum = value
metrics.metrics[metric_name].min = 0
metrics.metrics[metric_name].max = value
def get_function_status(self):
status = InstanceCommunication_pb2.FunctionStatus()
status.running = True
total_received = self.stats.get_total_received()
total_processed_successfully = self.stats.get_total_processed_successfully()
total_user_exceptions = self.stats.get_total_user_exceptions()
total_sys_exceptions = self.stats.get_total_sys_exceptions()
avg_process_latency_ms = self.stats.get_avg_process_latency()
last_invocation = self.stats.get_last_invocation()
status.numReceived = int(total_received) if sys.version_info.major >= 3 else long(total_received)
status.numSuccessfullyProcessed = int(total_processed_successfully) if sys.version_info.major >= 3 else long(total_processed_successfully)
status.numUserExceptions = int(total_user_exceptions) if sys.version_info.major >= 3 else long(total_user_exceptions)
status.instanceId = self.instance_config.instance_id
for ex, tm in self.stats.latest_user_exception:
to_add = status.latestUserExceptions.add()
to_add.exceptionString = ex
to_add.msSinceEpoch = tm
status.numSystemExceptions = int(total_sys_exceptions) if sys.version_info.major >= 3 else long(total_sys_exceptions)
for ex, tm in self.stats.latest_sys_exception:
to_add = status.latestSystemExceptions.add()
to_add.exceptionString = ex
to_add.msSinceEpoch = tm
status.averageLatency = avg_process_latency_ms
status.lastInvocationTime = int(last_invocation) if sys.version_info.major >= 3 else long(last_invocation)
return status
def join(self):
self.queue.put(InternalQuitMessage(True), True)
self.execution_thread.join()
self.close()
def close(self):
Log.info("Closing python instance...")
if self.producer:
self.producer.close()
if self.consumers:
for consumer in self.consumers.values():
try:
consumer.close()
except:
pass
if self.pulsar_client:
self.pulsar_client.close()
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import unittest
from absl.testing import parameterized
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top
except:
pass
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return pywrap_tensorflow.IsBuiltWithROCm()
def GpuSupportsHalfMatMulAndConv():
return pywrap_tensorflow.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return pywrap_tensorflow.IsMklEnabled()
def InstallStackTraceHandler():
pywrap_tensorflow.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def assert_no_new_pyobjects_executing_eagerly(f):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
"""
def decorator(self, **kwargs):
"""Warms up, gets an object count, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various versions
# of python2.7.x.
for _ in range(2):
f(self, **kwargs)
gc.collect()
previous_count = len(gc.get_objects())
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
new_count = len(gc.get_objects())
# In some cases (specifacally on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert new_count <= previous_count, (
"new_count(%d) is not less than or equal to previous_count(%d)" %
(new_count, previous_count))
gc.enable()
return decorator
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of refferents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = _combine_named_parameters(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
combinations = [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
return combinations
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith(
unittest.TestLoader.testMethodPrefix) and not (
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or name == "test_session"):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
reset_test=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
reset_test: If True, tearDown and SetUp the test case between the two
executions of the test (once with and once without eager execution).
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
if reset_test:
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return decorated
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.get_default_graph()._building_function:
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if tf2.enabled():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only comptaible in v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the precense
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True if a GPU device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(
local_device.physical_device_desc) >=
min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evalaute `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def use_deterministic_cudnn(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CUDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = os.environ.get("TF_CUDNN_DETERMINISTIC", "")
os.environ["TF_CUDNN_DETERMINISTIC"] = "true"
result = f(self, *args, **kwargs)
os.environ["TF_CUDNN_DETERMINISTIC"] = original_var
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tensorflow.TF_GetXlaConstantFoldingDisabled()
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# The description is just for documentation purposes.
def disable_xla(description):
def disable_xla_impl(func):
"""Execute the test method only if xla is not enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_xla_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
def no_xla_auto_jit_impl(func):
"""This test is not intended to be run with XLA auto jit enabled."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Skip test if using XLA is forced.
return
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return no_xla_auto_jit_impl
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tensorflow.TF_SetXlaAutoJitMode("2")
pywrap_tensorflow.TF_SetXlaMinClusterSize(1)
pywrap_tensorflow.TF_SetXlaEnableLazyCompilation(False)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tensorflow.TF_SetXlaConstantFoldingDisabled(True)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
context.context().summary_writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
tensor.values.numpy(), tensor.row_splits.numpy())
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
return tensor.numpy()
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is a tensor then convert it to ndarray
if isinstance(a, ops.Tensor):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join([str(p) for p in path]) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections.Mapping)
if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %s. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = {}".format(x))
msgs.append("not equal rhs = {}".format(y))
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
cli.py
|
# encoding: utf-8
from __future__ import print_function
import collections
import csv
import multiprocessing as mp
import os
import datetime
import sys
from pprint import pprint
import re
import itertools
import json
import logging
from optparse import OptionConflictError
import traceback
from six import text_type
from six.moves import input, xrange
from six.moves.urllib.error import HTTPError
from six.moves.urllib.parse import urljoin, urlparse
from six.moves.urllib.request import urlopen
import sqlalchemy as sa
import routes
import paste.script
from paste.registry import Registry
from paste.script.util.logging_config import fileConfig
import click
from ckan.config.middleware import make_app
import ckan.logic as logic
import ckan.model as model
import ckan.include.rjsmin as rjsmin
import ckan.include.rcssmin as rcssmin
import ckan.plugins as p
from ckan.common import config
from ckan.common import asbool
# This is a test Flask request context to be used internally.
# Do not use it!
_cli_test_request_context = None
# NB No CKAN imports are allowed until after the config file is loaded.
# i.e. do the imports in methods, after _load_config is called.
# Otherwise loggers get disabled.
def deprecation_warning(message=None):
'''
Print a deprecation warning to STDERR.
If ``message`` is given it is also printed to STDERR.
'''
sys.stderr.write(u'WARNING: This function is deprecated.')
if message:
sys.stderr.write(u' ' + message.strip())
sys.stderr.write(u'\n')
def error(msg):
'''
Print an error message to STDOUT and exit with return code 1.
'''
sys.stderr.write(msg)
if not msg.endswith('\n'):
sys.stderr.write('\n')
sys.exit(1)
def _parse_db_config(config_key=u'sqlalchemy.url'):
db_config = model.parse_db_config(config_key)
if not db_config:
raise Exception(
u'Could not extract db details from url: %r' % config[config_key]
)
return db_config
def user_add(args):
'''Add new user if we use paster sysadmin add
or paster user add
'''
if len(args) < 1:
error('Error: you need to specify the user name.')
username = args[0]
# parse args into data_dict
data_dict = {'name': username}
for arg in args[1:]:
try:
field, value = arg.split('=', 1)
if field == 'sysadmin':
value = asbool(value)
data_dict[field] = value
except ValueError:
raise ValueError(
'Could not parse arg: %r (expected "<option>=<value>)"' % arg
)
# Required
while '@' not in data_dict.get('email', ''):
print('Error: Invalid email address')
data_dict['email'] = input('Email address: ').strip()
if 'password' not in data_dict:
data_dict['password'] = UserCmd.password_prompt()
# Optional
if 'fullname' in data_dict:
data_dict['fullname'] = data_dict['fullname'].decode(
sys.getfilesystemencoding()
)
print('Creating user: %r' % username)
try:
import ckan.logic as logic
import ckan.model as model
site_user = logic.get_action('get_site_user')({
'model': model,
'ignore_auth': True},
{}
)
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
user_dict = logic.get_action('user_create')(context, data_dict)
pprint(user_dict)
except logic.ValidationError as e:
error(traceback.format_exc())
## from http://code.activestate.com/recipes/577058/ MIT licence.
## Written by Trent Mick
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = input().strip().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
def _get_config(config=None):
from paste.deploy import appconfig
if config:
filename = os.path.abspath(config)
config_source = '-c parameter'
elif os.environ.get('CKAN_INI'):
filename = os.environ.get('CKAN_INI')
config_source = '$CKAN_INI'
else:
default_filename = 'development.ini'
filename = os.path.join(os.getcwd(), default_filename)
if not os.path.exists(filename):
# give really clear error message for this common situation
msg = 'ERROR: You need to specify the CKAN config (.ini) '\
'file path.'\
'\nUse the --config parameter or set environment ' \
'variable CKAN_INI or have {}\nin the current directory.' \
.format(default_filename)
exit(msg)
if not os.path.exists(filename):
msg = 'Config file not found: %s' % filename
msg += '\n(Given by: %s)' % config_source
exit(msg)
fileConfig(filename)
return appconfig('config:' + filename)
def load_config(config, load_site_user=True):
conf = _get_config(config)
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
# first time.
from ckan.config.environment import load_environment
load_environment(conf.global_conf, conf.local_conf)
# Set this internal test request context with the configured environment so
# it can be used when calling url_for from the CLI.
global _cli_test_request_context
app = make_app(conf.global_conf, **conf.local_conf)
flask_app = app.apps['flask_app']._wsgi_app
_cli_test_request_context = flask_app.test_request_context()
registry = Registry()
registry.prepare()
import pylons
registry.register(pylons.translator, MockTranslator())
site_user = None
if model.user_table.exists() and load_site_user:
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
c = pylons.util.AttribSafeContextObj()
registry.register(pylons.c, c)
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
pylons.c.user = site_user['name']
pylons.c.userobj = model.User.get(site_user['name'])
## give routes enough information to run url_for
parsed = urlparse(conf.get('ckan.site_url', 'http://0.0.0.0'))
request_config = routes.request_config()
request_config.host = parsed.netloc + parsed.path
request_config.protocol = parsed.scheme
return site_user
def paster_click_group(summary):
'''Return a paster command click.Group for paster subcommands
:param command: the paster command linked to this function from
setup.py, used in help text (e.g. "datastore")
:param summary: summary text used in paster's help/command listings
(e.g. "Perform commands to set up the datastore")
'''
class PasterClickGroup(click.Group):
'''A click.Group that may be called like a paster command'''
def __call__(self, ignored_command):
sys.argv.remove(ignored_command)
return super(PasterClickGroup, self).__call__(
prog_name=u'paster ' + ignored_command,
help_option_names=[u'-h', u'--help'],
obj={})
@click.group(cls=PasterClickGroup)
@click.option(
'--plugin',
metavar='ckan',
help='paster plugin (when run outside ckan directory)')
@click_config_option
@click.pass_context
def cli(ctx, plugin, config):
ctx.obj['config'] = config
cli.summary = summary
cli.group_name = u'ckan'
return cli
# common definition for paster ... --config
click_config_option = click.option(
'-c',
'--config',
default=None,
metavar='CONFIG',
help=u'Config file to use (default: development.ini)')
class CkanCommand(paste.script.command.Command):
'''Base class for classes that implement CKAN paster commands to inherit.'''
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
help='Config file to use.')
parser.add_option('-f', '--file',
action='store',
dest='file_path',
help="File to dump results to (if needed)")
default_verbosity = 1
group_name = 'ckan'
def _load_config(self, load_site_user=True):
self.site_user = load_config(self.options.config, load_site_user)
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
db create - alias of db upgrade
db init - create and put in default data
db clean - clears db (including dropping tables) and
search index
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db create-from-model - create database from the model (indexes not made)
db migrate-filestore - migrate all uploaded data from the 2.1 filesore.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 1
def command(self):
cmd = self.args[0]
self._load_config(cmd != 'upgrade')
import ckan.model as model
import ckan.lib.search as search
if cmd == 'init':
model.repo.init_db()
if self.verbose:
print('Initialising DB: SUCCESS')
elif cmd == 'clean' or cmd == 'drop':
# remove any *.pyc version files to prevent conflicts
v_path = os.path.join(os.path.dirname(__file__),
'..', 'migration', 'versions', '*.pyc')
import glob
filelist = glob.glob(v_path)
for f in filelist:
os.remove(f)
model.repo.clean_db()
search.clear_all()
if self.verbose:
print('Cleaning DB: SUCCESS')
elif cmd == 'upgrade':
model.repo.upgrade_db(*self.args[1:])
elif cmd == 'downgrade':
model.repo.downgrade_db(*self.args[1:])
elif cmd == 'version':
self.version()
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
print('Creating DB: SUCCESS')
else:
error('Command %s not recognized' % cmd)
def version(self):
from ckan.model import Session
print(Session.execute('select version from '
'migrate_version;').fetchall())
class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
search-index [-i] [-o] [-r] [-e] [-q] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
full search index (all datasets)
search-index rebuild_fast - reindex using multiprocessing using all cores.
This acts in the same way as rubuild -r [EXPERIMENTAL]
search-index check - checks for datasets not indexed
search-index show DATASET_NAME - shows index of a dataset
search-index clear [dataset_name] - clears the search index for the provided dataset or
for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def __init__(self, name):
super(SearchIndexCommand, self).__init__(name)
self.parser.add_option('-i', '--force', dest='force',
action='store_true', default=False,
help='Ignore exceptions when rebuilding the index')
self.parser.add_option('-o', '--only-missing', dest='only_missing',
action='store_true', default=False,
help='Index non indexed datasets only')
self.parser.add_option('-r', '--refresh', dest='refresh',
action='store_true', default=False,
help='Refresh current index (does not clear the existing one)')
self.parser.add_option('-q', '--quiet', dest='quiet',
action='store_true', default=False,
help='Do not output index rebuild progress')
self.parser.add_option('-e', '--commit-each', dest='commit_each',
action='store_true', default=False, help=
'''Perform a commit after indexing each dataset. This ensures that changes are
immediately available on the search, but slows significantly the process.
Default is false.''')
def command(self):
if not self.args:
# default to printing help
print(self.usage)
return
cmd = self.args[0]
# Do not run load_config yet
if cmd == 'rebuild_fast':
self.rebuild_fast()
return
self._load_config()
if cmd == 'rebuild':
self.rebuild()
elif cmd == 'check':
self.check()
elif cmd == 'show':
self.show()
elif cmd == 'clear':
self.clear()
else:
print('Command %s not recognized' % cmd)
def rebuild(self):
from ckan.lib.search import rebuild, commit
# BY default we don't commit after each request to Solr, as it is
# a really heavy operation and slows things a lot
if len(self.args) > 1:
rebuild(self.args[1])
else:
rebuild(only_missing=self.options.only_missing,
force=self.options.force,
refresh=self.options.refresh,
defer_commit=(not self.options.commit_each),
quiet=self.options.quiet)
if not self.options.commit_each:
commit()
def check(self):
from ckan.lib.search import check
check()
def show(self):
from ckan.lib.search import show
if not len(self.args) == 2:
print('Missing parameter: dataset-name')
return
index = show(self.args[1])
pprint(index)
def clear(self):
from ckan.lib.search import clear, clear_all
package_id = self.args[1] if len(self.args) > 1 else None
if not package_id:
clear_all()
else:
clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
conf = _get_config()
### Get ids using own engine, otherwise multiprocess will balk
db_url = conf['sqlalchemy.url']
engine = sa.create_engine(db_url)
package_ids = []
result = engine.execute("select id from package where state = 'active';")
for row in result:
package_ids.append(row[0])
def start(ids):
## load actual enviroment for each subprocess, so each have thier own
## sa session
self._load_config()
from ckan.lib.search import rebuild, commit
rebuild(package_ids=ids)
commit()
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
processes = []
for chunk in chunks(package_ids, mp.cpu_count()):
process = mp.Process(target=start, args=(chunk,))
processes.append(process)
process.daemon = True
process.start()
for process in processes:
process.join()
class Notification(CkanCommand):
'''Send out modification notifications.
In "replay" mode, an update signal is sent for each dataset in the database.
Usage:
notify replay - send out modification signals
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.model import Session, Package, DomainObjectOperation
from ckan.model.modification import DomainObjectModificationExtension
if not self.args:
# default to run
cmd = 'replay'
else:
cmd = self.args[0]
if cmd == 'replay':
dome = DomainObjectModificationExtension()
for package in Session.query(Package):
dome.notify(package, DomainObjectOperation.changed)
else:
print('Command %s not recognized' % cmd)
class RDFExport(CkanCommand):
'''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
Usage:
paster rdf-export /path/to/store/output
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
self._load_config()
if not self.args:
# default to run
print(RDFExport.__doc__)
else:
self.export_datasets(self.args[0])
def export_datasets(self, out_folder):
'''
Export datasets as RDF to an output folder.
'''
from ckan.common import config
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
# Create output folder if not exists
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
fetch_url = config['ckan.site_url']
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
dataset_names = logic.get_action('package_list')(context, {})
for dataset_name in dataset_names:
dd = logic.get_action('package_show')(context, {'id': dataset_name})
if not dd['state'] == 'active':
continue
url = h.url_for('dataset.read', id=dd['name'])
url = urljoin(fetch_url, url[1:]) + '.rdf'
try:
fname = os.path.join(out_folder, dd['name']) + ".rdf"
try:
r = urlopen(url).read()
except HTTPError as e:
if e.code == 404:
error('Please install ckanext-dcat and enable the ' +
'`dcat` plugin to use the RDF serializations')
with open(fname, 'wb') as f:
f.write(r)
except IOError as ioe:
sys.stderr.write(str(ioe) + "\n")
class Sysadmin(CkanCommand):
'''Gives sysadmin rights to a named user
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
sysadmin add USERNAME - make an existing user into a sysadmin
sysadmin add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- creates a new user that is a sysadmin
(prompts for password and email if not
supplied).
Field can be: apikey
email
fullname
name (this will be the username)
password
sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
cmd = self.args[0] if self.args else None
if cmd is None or cmd == 'list':
self.list()
elif cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
else:
print('Command %s not recognized' % cmd)
def list(self):
import ckan.model as model
print('Sysadmins:')
sysadmins = model.Session.query(model.User).filter_by(sysadmin=True,
state='active')
print('count = %i' % sysadmins.count())
for sysadmin in sysadmins:
print('%s name=%s email=%s id=%s' % (
sysadmin.__class__.__name__,
sysadmin.name,
sysadmin.email,
sysadmin.id))
def add(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user to be made sysadmin.')
return
username = self.args[1]
user = model.User.by_name(text_type(username))
if not user:
print('User "%s" not found' % username)
makeuser = input('Create new user: %s? [y/n]' % username)
if makeuser == 'y':
user_add(self.args[1:])
user = model.User.by_name(text_type(username))
else:
print('Exiting ...')
return
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
print('Added %s as sysadmin' % username)
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user to be made sysadmin.')
return
username = self.args[1]
user = model.User.by_name(text_type(username))
if not user:
print('Error: user "%s" not found!' % username)
return
user.sysadmin = False
model.repo.commit_and_remove()
class UserCmd(CkanCommand):
'''Manage users
Usage:
user - lists users
user list - lists users
user USERNAME - shows user properties
user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for email and
password if not supplied).
Field can be: apikey
email
fullname
name (this will be the username)
password
user setpass USERNAME - set user password (prompts)
user remove USERNAME - removes user from users
user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
if not self.args:
self.list()
else:
cmd = self.args[0]
if cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
elif cmd == 'search':
self.search()
elif cmd == 'setpass':
self.setpass()
elif cmd == 'list':
self.list()
else:
self.show()
def get_user_str(self, user):
user_str = 'name=%s' % user.name
if user.name != user.display_name:
user_str += ' display=%s' % user.display_name
return user_str
def list(self):
import ckan.model as model
print('Users:')
users = model.Session.query(model.User).filter_by(state='active')
print('count = %i' % users.count())
for user in users:
print(self.get_user_str(user))
def show(self):
import ckan.model as model
username = self.args[0]
user = model.User.get(text_type(username))
print('User: \n', user)
def setpass(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user.')
return
username = self.args[1]
user = model.User.get(username)
print('Editing user: %r' % user.name)
password = self.password_prompt()
user.password = password
model.repo.commit_and_remove()
print('Done')
def search(self):
import ckan.model as model
if len(self.args) < 2:
print('Need user name query string.')
return
query_str = self.args[1]
query = model.User.search(query_str)
print('%i users matching %r:' % (query.count(), query_str))
for user in query.all():
print(self.get_user_str(user))
@classmethod
def password_prompt(cls):
import getpass
password1 = None
while not password1:
password1 = getpass.getpass('Password: ')
password2 = getpass.getpass('Confirm password: ')
if password1 != password2:
error('Passwords do not match')
return password1
def add(self):
user_add(self.args[1:])
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print('Need name of the user.')
return
username = self.args[1]
p.toolkit.get_action('user_delete')(
{'model': model, 'ignore_auth': True},
{'id': username})
print('Deleted user: %s' % username)
class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
dataset DATASET_NAME|ID - shows dataset properties
dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 0
def command(self):
self._load_config()
if not self.args:
print(self.usage)
else:
cmd = self.args[0]
if cmd == 'delete':
self.delete(self.args[1])
elif cmd == 'purge':
self.purge(self.args[1])
elif cmd == 'list':
self.list()
elif cmd == 'show':
self.show(self.args[1])
else:
self.show(self.args[0])
def list(self):
import ckan.model as model
print('Datasets:')
datasets = model.Session.query(model.Package)
print('count = %i' % datasets.count())
for dataset in datasets:
state = ('(%s)' % dataset.state) if dataset.state != 'active' else ''
print('%s %s %s' % (dataset.id, dataset.name, state))
def _get_dataset(self, dataset_ref):
import ckan.model as model
dataset = model.Package.get(text_type(dataset_ref))
assert dataset, 'Could not find dataset matching reference: %r' % dataset_ref
return dataset
def show(self, dataset_ref):
import pprint
dataset = self._get_dataset(dataset_ref)
pprint.pprint(dataset.as_dict())
def delete(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
old_state = dataset.state
dataset.delete()
model.repo.commit_and_remove()
dataset = self._get_dataset(dataset_ref)
print('%s %s -> %s' % (dataset.name, old_state, dataset.state))
def purge(self, dataset_ref):
import ckan.logic as logic
dataset = self._get_dataset(dataset_ref)
name = dataset.name
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': site_user['name']}
logic.get_action('dataset_purge')(
context, {'id': dataset_ref})
print('%s purged' % name)
class Ratings(CkanCommand):
'''Manage the ratings stored in the db
Usage:
ratings count - counts ratings
ratings clean - remove all ratings
ratings clean-anonymous - remove only anonymous ratings
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0]
if cmd == 'count':
self.count()
elif cmd == 'clean':
self.clean()
elif cmd == 'clean-anonymous':
self.clean(user_ratings=False)
else:
print('Command %s not recognized' % cmd)
def count(self):
import ckan.model as model
q = model.Session.query(model.Rating)
print("%i ratings" % q.count())
q = q.filter(model.Rating.user_id is None)
print("of which %i are anonymous ratings" % q.count())
def clean(self, user_ratings=True):
import ckan.model as model
q = model.Session.query(model.Rating)
print("%i ratings" % q.count())
if not user_ratings:
q = q.filter(model.Rating.user_id is None)
print("of which %i are anonymous ratings" % q.count())
ratings = q.all()
for rating in ratings:
rating.purge()
model.repo.commit_and_remove()
## Used by the Tracking class
_ViewCount = collections.namedtuple("ViewCount", "id name count")
class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
tracking update [start_date] - update tracking stats
tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
engine = model.meta.engine
cmd = self.args[0]
if cmd == 'update':
start_date = self.args[1] if len(self.args) > 1 else None
self.update_all(engine, start_date)
elif cmd == 'export':
if len(self.args) <= 1:
error(self.__class__.__doc__)
output_file = self.args[1]
start_date = self.args[2] if len(self.args) > 2 else None
self.update_all(engine, start_date)
self.export_tracking(engine, output_file)
else:
error(self.__class__.__doc__)
def update_all(self, engine, start_date=None):
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
else:
# No date given. See when we last have data for and get data
# from 2 days before then in case new data is available.
# If no date here then use 2011-01-01 as the start date
sql = '''SELECT tracking_date from tracking_summary
ORDER BY tracking_date DESC LIMIT 1;'''
result = engine.execute(sql).fetchall()
if result:
start_date = result[0]['tracking_date']
start_date += datetime.timedelta(-2)
# convert date to datetime
combine = datetime.datetime.combine
start_date = combine(start_date, datetime.time(0))
else:
start_date = datetime.datetime(2011, 1, 1)
start_date_solrsync = start_date
end_date = datetime.datetime.now()
while start_date < end_date:
stop_date = start_date + datetime.timedelta(1)
self.update_tracking(engine, start_date)
print('tracking updated for %s' % start_date)
start_date = stop_date
self.update_tracking_solr(engine, start_date_solrsync)
def _total_views(self, engine):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql).fetchall()]
def _recent_views(self, engine, measure_from):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE s.tracking_date >= %(measure_from)s
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql, measure_from=str(measure_from)).fetchall()]
def export_tracking(self, engine, output_filename):
'''Write tracking summary to a csv file.'''
HEADINGS = [
"dataset id",
"dataset name",
"total views",
"recent views (last 2 weeks)",
]
measure_from = datetime.date.today() - datetime.timedelta(days=14)
recent_views = self._recent_views(engine, measure_from)
total_views = self._total_views(engine)
with open(output_filename, 'w') as fh:
f_out = csv.writer(fh)
f_out.writerow(HEADINGS)
recent_views_for_id = dict((r.id, r.count) for r in recent_views)
f_out.writerows([(r.id,
r.name,
r.count,
recent_views_for_id.get(r.id, 0))
for r in total_views])
def update_tracking(self, engine, summary_date):
PACKAGE_URL = '/dataset/'
# clear out existing data before adding new
sql = '''DELETE FROM tracking_summary
WHERE tracking_date='%s'; ''' % summary_date
engine.execute(sql)
sql = '''SELECT DISTINCT url, user_key,
CAST(access_timestamp AS Date) AS tracking_date,
tracking_type INTO tracking_tmp
FROM tracking_raw
WHERE CAST(access_timestamp as Date)=%s;
INSERT INTO tracking_summary
(url, count, tracking_date, tracking_type)
SELECT url, count(user_key), tracking_date, tracking_type
FROM tracking_tmp
GROUP BY url, tracking_date, tracking_type;
DROP TABLE tracking_tmp;
COMMIT;'''
engine.execute(sql, summary_date)
# get ids for dataset urls
sql = '''UPDATE tracking_summary t
SET package_id = COALESCE(
(SELECT id FROM package p
WHERE p.name = regexp_replace(' ' || t.url, '^[ ]{1}(/\w{2}){0,1}' || %s, ''))
,'~~not~found~~')
WHERE t.package_id IS NULL
AND tracking_type = 'page';'''
engine.execute(sql, PACKAGE_URL)
# update summary totals for resources
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'resource';'''
engine.execute(sql)
# update summary totals for pages
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'page'
AND t1.package_id IS NOT NULL
AND t1.package_id != '~~not~found~~';'''
engine.execute(sql)
def update_tracking_solr(self, engine, start_date):
sql = '''SELECT package_id FROM tracking_summary
where package_id!='~~not~found~~'
and tracking_date >= %s;'''
results = engine.execute(sql, start_date)
package_ids = set()
for row in results:
package_ids.add(row['package_id'])
total = len(package_ids)
not_found = 0
print('%i package index%s to be rebuilt starting from %s' % (total, '' if total < 2 else 'es', start_date))
from ckan.lib.search import rebuild
for package_id in package_ids:
try:
rebuild(package_id)
except logic.NotFound:
print("Error: package %s not found." % (package_id))
not_found += 1
except KeyboardInterrupt:
print("Stopped.")
return
except:
raise
print('search index rebuilding done.' + (' %i not found.' % (not_found) if not_found else ""))
class PluginInfo(CkanCommand):
'''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 0
min_args = 0
def command(self):
self.get_info()
def get_info(self):
''' print info about current plugins from the .ini file'''
import ckan.plugins as p
self._load_config()
interfaces = {}
plugins = {}
for name in dir(p):
item = getattr(p, name)
try:
if issubclass(item, p.Interface):
interfaces[item] = {'class': item}
except TypeError:
pass
for interface in interfaces:
for plugin in p.PluginImplementations(interface):
name = plugin.name
if name not in plugins:
plugins[name] = {'doc': plugin.__doc__,
'class': plugin,
'implements': []}
plugins[name]['implements'].append(interface.__name__)
for plugin in plugins:
p = plugins[plugin]
print(plugin + ':')
print('-' * (len(plugin) + 1))
if p['doc']:
print(p['doc'])
print('Implements:')
for i in p['implements']:
extra = None
if i == 'ITemplateHelpers':
extra = self.template_helpers(p['class'])
if i == 'IActions':
extra = self.actions(p['class'])
print(' %s' % i)
if extra:
print(extra)
print()
def actions(self, cls):
''' Return readable action function info. '''
actions = cls.get_actions()
return self.function_info(actions)
def template_helpers(self, cls):
''' Return readable helper function info. '''
helpers = cls.get_helpers()
return self.function_info(helpers)
def function_info(self, functions):
''' Take a dict of functions and output readable info '''
import inspect
output = []
for function_name in functions:
fn = functions[function_name]
args_info = inspect.getargspec(fn)
params = args_info.args
num_params = len(params)
if args_info.varargs:
params.append('*' + args_info.varargs)
if args_info.keywords:
params.append('**' + args_info.keywords)
if args_info.defaults:
offset = num_params - len(args_info.defaults)
for i, v in enumerate(args_info.defaults):
params[i + offset] = params[i + offset] + '=' + repr(v)
# is this a classmethod if so remove the first parameter
if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
params = params[1:]
params = ', '.join(params)
output.append(' %s(%s)' % (function_name, params))
# doc string
if fn.__doc__:
bits = fn.__doc__.split('\n')
for bit in bits:
output.append(' %s' % bit)
return ('\n').join(output)
class CreateTestDataCommand(CkanCommand):
'''Create test data in the database.
Tests can also delete the created objects easily with the delete() method.
create-test-data - annakarenina and warandpeace
create-test-data search - realistic data to test search
create-test-data gov - government style data
create-test-data family - package relationships data
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
create-test-data vocabs - annakerenina, warandpeace, and some test
vocabularies
create-test-data hierarchy - hierarchy of groups
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.lib.create_test_data import CreateTestData
if self.args:
cmd = self.args[0]
else:
cmd = 'basic'
if self.verbose:
print('Creating %s test data' % cmd)
if cmd == 'basic':
CreateTestData.create_basic_test_data()
elif cmd == 'user':
CreateTestData.create_test_user()
print('Created user %r with password %r and apikey %r' %
('tester', 'tester', 'tester'))
elif cmd == 'search':
CreateTestData.create_search_test_data()
elif cmd == 'gov':
CreateTestData.create_gov_test_data()
elif cmd == 'family':
CreateTestData.create_family_test_data()
elif cmd == 'translations':
CreateTestData.create_translations_test_data()
elif cmd == 'vocabs':
CreateTestData.create_vocabs_test_data()
elif cmd == 'hierarchy':
CreateTestData.create_group_hierarchy_test_data()
else:
print('Command %s not recognized' % cmd)
raise NotImplementedError
if self.verbose:
print('Creating %s test data: Complete!' % cmd)
class Profile(CkanCommand):
'''Code speed profiler
Provide a ckan url and it will make the request and record
how long each function call took in a file that can be read
by pstats.Stats (command-line) or runsnakerun (gui).
Usage:
profile URL [username]
e.g. profile /data/search
The result is saved in profile.data.search
To view the profile in runsnakerun:
runsnakerun ckan.data.search.profile
You may need to install python module: cProfile
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 1
def _load_config_into_test_app(self):
from paste.deploy import loadapp
import paste.fixture
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
wsgiapp = loadapp('config:' + self.filename)
self.app = paste.fixture.TestApp(wsgiapp)
def command(self):
self._load_config_into_test_app()
import paste.fixture
import cProfile
import re
url = self.args[0]
if self.args[1:]:
user = self.args[1]
else:
user = 'visitor'
def profile_url(url):
try:
res = self.app.get(url, status=[200],
extra_environ={'REMOTE_USER': user})
except paste.fixture.AppError:
print('App error: ', url.strip())
except KeyboardInterrupt:
raise
except Exception:
error(traceback.format_exc())
output_filename = 'ckan%s.profile' % re.sub('[/?]', '.', url.replace('/', '.'))
profile_command = "profile_url('%s')" % url
cProfile.runctx(profile_command, globals(), locals(), filename=output_filename)
import pstats
stats = pstats.Stats(output_filename)
stats.sort_stats('cumulative')
stats.print_stats(0.1) # show only top 10% of lines
print('Only top 10% of lines shown')
print('Written profile to: %s' % output_filename)
class CreateColorSchemeCommand(CkanCommand):
'''Create or remove a color scheme.
After running this, you'll need to regenerate the css files. See paster's less command for details.
color - creates a random color scheme
color clear - clears any color scheme
color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
color <VALUE> - a float between 0.0 and 1.0 used as base hue
color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
rules = [
'@layoutLinkColor',
'@mastheadBackgroundColor',
'@btnPrimaryBackground',
'@btnPrimaryBackgroundHighlight',
]
# list of predefined colors
color_list = {
'aliceblue': '#f0fff8',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo ': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
def create_colors(self, hue, num_colors=5, saturation=None, lightness=None):
if saturation is None:
saturation = 0.9
if lightness is None:
lightness = 40
else:
lightness *= 100
import math
saturation -= math.trunc(saturation)
print(hue, saturation)
import colorsys
''' Create n related colours '''
colors = []
for i in xrange(num_colors):
ix = i * (1.0/num_colors)
_lightness = (lightness + (ix * 40))/100.
if _lightness > 1.0:
_lightness = 1.0
color = colorsys.hls_to_rgb(hue, _lightness, saturation)
hex_color = '#'
for part in color:
hex_color += '%02x' % int(part * 255)
# check and remove any bad values
if not re.match('^\#[0-9a-f]{6}$', hex_color):
hex_color = '#FFFFFF'
colors.append(hex_color)
return colors
def command(self):
hue = None
saturation = None
lightness = None
public = config.get(u'ckan.base_public_folder')
path = os.path.dirname(__file__)
path = os.path.join(path, '..', public, 'base', 'less', 'custom.less')
if self.args:
arg = self.args[0]
rgb = None
if arg == 'clear':
os.remove(path)
print('custom colors removed.')
elif arg.startswith('#'):
color = arg[1:]
if len(color) == 3:
rgb = [int(x, 16) * 16 for x in color]
elif len(color) == 6:
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
print('ERROR: invalid color')
elif arg.lower() in self.color_list:
color = self.color_list[arg.lower()][1:]
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
try:
hue = float(self.args[0])
except ValueError:
print('ERROR argument `%s` not recognised' % arg)
if rgb:
import colorsys
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness = lightness / 340
# deal with greys
if not (hue == 0.0 and saturation == 0.0):
saturation = None
else:
import random
hue = random.random()
if hue is not None:
f = open(path, 'w')
colors = self.create_colors(hue, saturation=saturation, lightness=lightness)
for i in xrange(len(self.rules)):
f.write('%s: %s;\n' % (self.rules[i], colors[i]))
print('%s: %s;\n' % (self.rules[i], colors[i]))
f.close
print('Color scheme has been created.')
print('Make sure less is run for changes to take effect.')
class TranslationsCommand(CkanCommand):
'''Translation helper functions
trans js - generate the javascript translations
trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
from ckan.common import config
from ckan.lib.i18n import build_js_translations
ckan_path = os.path.join(os.path.dirname(__file__), '..')
self.i18n_path = config.get('ckan.i18n_directory',
os.path.join(ckan_path, 'i18n'))
command = self.args[0]
if command == 'mangle':
self.mangle_po()
elif command == 'js':
build_js_translations()
else:
print('command not recognised')
def mangle_po(self):
''' This will mangle the zh_TW translations for translation coverage
testing.
NOTE: This will destroy the current translations fot zh_TW
'''
import polib
pot_path = os.path.join(self.i18n_path, 'ckan.pot')
po = polib.pofile(pot_path)
# we don't want to mangle the following items in strings
# %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc
# sprintf bit after %
spf_reg_ex = "\+?(0|'.)?-?\d*(.\d*)?[\%bcdeufosxX]"
extract_reg_ex = '(\%\([^\)]*\)' + spf_reg_ex + \
'|\[\d*\:[^\]]*\]' + \
'|\{[^\}]*\}' + \
'|<[^>}]*>' + \
'|\%((\d)*\$)?' + spf_reg_ex + ')'
for entry in po:
msg = entry.msgid.encode('utf-8')
matches = re.finditer(extract_reg_ex, msg)
length = len(msg)
position = 0
translation = u''
for match in matches:
translation += '-' * (match.start() - position)
position = match.end()
translation += match.group(0)
translation += '-' * (length - position)
entry.msgstr = translation
out_dir = os.path.join(self.i18n_path, 'zh_TW', 'LC_MESSAGES')
try:
os.makedirs(out_dir)
except OSError:
pass
po.metadata['Plural-Forms'] = "nplurals=1; plural=0\n"
out_po = os.path.join(out_dir, 'ckan.po')
out_mo = os.path.join(out_dir, 'ckan.mo')
po.save(out_po)
po.save_as_mofile(out_mo)
print('zh_TW has been mangled')
class MinifyCommand(CkanCommand):
'''Create minified versions of the given Javascript and CSS files.
Usage:
paster minify [--clean] PATH
for example:
paster minify ckan/public/base
paster minify ckan/public/base/css/*.css
paster minify ckan/public/base/css/red.css
if the --clean option is provided any minified files will be removed.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
exclude_dirs = ['vendor']
def __init__(self, name):
super(MinifyCommand, self).__init__(name)
self.parser.add_option('--clean', dest='clean',
action='store_true', default=False,
help='remove any minified files in the path')
def command(self):
clean = getattr(self.options, 'clean', False)
self._load_config()
for base_path in self.args:
if os.path.isfile(base_path):
if clean:
self.clear_minifyed(base_path)
else:
self.minify_file(base_path)
elif os.path.isdir(base_path):
for root, dirs, files in os.walk(base_path):
dirs[:] = [d for d in dirs if not d in self.exclude_dirs]
for filename in files:
path = os.path.join(root, filename)
if clean:
self.clear_minifyed(path)
else:
self.minify_file(path)
else:
# Path is neither a file or a dir?
continue
def clear_minifyed(self, path):
path_only, extension = os.path.splitext(path)
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
if path_only.endswith('.min'):
print('removing %s' % path)
os.remove(path)
def minify_file(self, path):
'''Create the minified version of the given file.
If the file is not a .js or .css file (e.g. it's a .min.js or .min.css
file, or it's some other type of file entirely) it will not be
minifed.
:param path: The path to the .js or .css file to minify
'''
import ckan.lib.fanstatic_resources as fanstatic_resources
path_only, extension = os.path.splitext(path)
if path_only.endswith('.min'):
# This is already a minified file.
return
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
path_min = fanstatic_resources.min_path(path)
source = open(path, 'r').read()
f = open(path_min, 'w')
if path.endswith('.css'):
f.write(rcssmin.cssmin(source))
elif path.endswith('.js'):
f.write(rjsmin.jsmin(source))
f.close()
print("Minified file '{0}'".format(path))
class LessCommand(CkanCommand):
'''Compile all root less documents into their CSS counterparts
Usage:
paster less
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
self.less()
custom_css = {
'fuchsia': '''
@layoutLinkColor: #E73892;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'green': '''
@layoutLinkColor: #2F9B45;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'red': '''
@layoutLinkColor: #C14531;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'maroon': '''
@layoutLinkColor: #810606;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
}
def less(self):
''' Compile less files '''
import subprocess
command = ('npm', 'bin')
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
output = process.communicate()
directory = output[0].strip()
if not directory:
raise error('Command "{}" returned nothing. Check that npm is '
'installed.'.format(' '.join(command)))
less_bin = os.path.join(directory, 'lessc')
public = config.get(u'ckan.base_public_folder')
root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
custom_less = os.path.join(root, 'less', 'custom.less')
for color in self.custom_css:
f = open(custom_less, 'w')
f.write(self.custom_css[color])
f.close()
self.compile_less(root, less_bin, color)
f = open(custom_less, 'w')
f.write('// This file is needed in order for `gulp build` to compile in less 1.3.1+\n')
f.close()
self.compile_less(root, less_bin, 'main')
def compile_less(self, root, less_bin, color):
print('compile %s.css' % color)
import subprocess
main_less = os.path.join(root, 'less', 'main.less')
main_css = os.path.join(root, 'css', '%s.css' % color)
command = (less_bin, main_less, main_css)
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
output = process.communicate()
print(output)
class FrontEndBuildCommand(CkanCommand):
'''Creates and minifies css and JavaScript files
Usage:
paster front-end-build
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
# Less css
cmd = LessCommand('less')
cmd.options = self.options
cmd.command()
# js translation strings
cmd = TranslationsCommand('trans')
cmd.options = self.options
cmd.args = ('js',)
cmd.command()
# minification
cmd = MinifyCommand('minify')
cmd.options = self.options
public = config.get(u'ckan.base_public_folder')
root = os.path.join(os.path.dirname(__file__), '..', public, 'base')
root = os.path.abspath(root)
ckanext = os.path.join(os.path.dirname(__file__), '..', '..', 'ckanext')
ckanext = os.path.abspath(ckanext)
cmd.args = (root, ckanext)
cmd.command()
class ViewsCommand(CkanCommand):
'''Manage resource views.
Usage:
paster views create [options] [type1] [type2] ...
Create views on relevant resources. You can optionally provide
specific view types (eg `recline_view`, `image_view`). If no types
are provided, the default ones will be used. These are generally
the ones defined in the `ckan.views.default_views` config option.
Note that on either case, plugins must be loaded (ie added to
`ckan.plugins`), otherwise the command will stop.
paster views clear [options] [type1] [type2] ...
Permanently delete all views or the ones with the provided types.
paster views clean
Permanently delete views for all types no longer present in the
`ckan.plugins` configuration option.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
def __init__(self, name):
super(ViewsCommand, self).__init__(name)
self.parser.add_option('-y', '--yes', dest='assume_yes',
action='store_true',
default=False,
help='''Automatic yes to prompts. Assume "yes"
as answer to all prompts and run non-interactively''')
self.parser.add_option('-d', '--dataset', dest='dataset_id',
action='append',
help='''Create views on a particular dataset.
You can use the dataset id or name, and it can be defined multiple times.''')
self.parser.add_option('--no-default-filters',
dest='no_default_filters',
action='store_true',
default=False,
help='''Do not add default filters for relevant
resource formats for the view types provided. Note that filters are not added
by default anyway if an unsupported view type is provided or when using the
`-s` or `-d` options.''')
self.parser.add_option('-s', '--search', dest='search_params',
action='store',
default=False,
help='''Extra search parameters that will be
used for getting the datasets to create the resource views on. It must be a
JSON object like the one used by the `package_search` API call. Supported
fields are `q`, `fq` and `fq_list`. Check the documentation for examples.
Not used when using the `-d` option.''')
def command(self):
self._load_config()
if not self.args:
print(self.usage)
elif self.args[0] == 'create':
view_plugin_types = self.args[1:]
self.create_views(view_plugin_types)
elif self.args[0] == 'clear':
view_plugin_types = self.args[1:]
self.clear_views(view_plugin_types)
elif self.args[0] == 'clean':
self.clean_views()
else:
print(self.usage)
_page_size = 100
def _get_view_plugins(self, view_plugin_types,
get_datastore_views=False):
'''
Returns the view plugins that were succesfully loaded
Views are provided as a list of ``view_plugin_types``. If no types are
provided, the default views defined in the ``ckan.views.default_views``
will be created. Only in this case (when the default view plugins are
used) the `get_datastore_views` parameter can be used to get also view
plugins that require data to be in the DataStore.
If any of the provided plugins could not be loaded (eg it was not added
to `ckan.plugins`) the command will stop.
Returns a list of loaded plugin names.
'''
from ckan.lib.datapreview import (get_view_plugins,
get_default_view_plugins
)
log = logging.getLogger(__name__)
view_plugins = []
if not view_plugin_types:
log.info('No view types provided, using default types')
view_plugins = get_default_view_plugins()
if get_datastore_views:
view_plugins.extend(
get_default_view_plugins(get_datastore_views=True))
else:
view_plugins = get_view_plugins(view_plugin_types)
loaded_view_plugins = [view_plugin.info()['name']
for view_plugin in view_plugins]
plugins_not_found = list(set(view_plugin_types) -
set(loaded_view_plugins))
if plugins_not_found:
error('View plugin(s) not found : {0}. '.format(plugins_not_found)
+ 'Have they been added to the `ckan.plugins` configuration'
+ ' option?')
return loaded_view_plugins
def _add_default_filters(self, search_data_dict, view_types):
'''
Adds extra filters to the `package_search` dict for common view types
It basically adds `fq` parameters that filter relevant resource formats
for the view types provided. For instance, if one of the view types is
`pdf_view` the following will be added to the final query:
fq=res_format:"pdf" OR res_format:"PDF"
This obviously should only be used if all view types are known and can
be filtered, otherwise we want all datasets to be returned. If a
non-filterable view type is provided, the search params are not
modified.
Returns the provided data_dict for `package_search`, optionally
modified with extra filters.
'''
from ckanext.imageview.plugin import DEFAULT_IMAGE_FORMATS
from ckanext.textview.plugin import get_formats as get_text_formats
from ckanext.datapusher.plugin import DEFAULT_FORMATS as \
datapusher_formats
filter_formats = []
for view_type in view_types:
if view_type == 'image_view':
for _format in DEFAULT_IMAGE_FORMATS:
filter_formats.extend([_format, _format.upper()])
elif view_type == 'text_view':
formats = get_text_formats(config)
for _format in itertools.chain.from_iterable(formats.values()):
filter_formats.extend([_format, _format.upper()])
elif view_type == 'pdf_view':
filter_formats.extend(['pdf', 'PDF'])
elif view_type in ['recline_view', 'recline_grid_view',
'recline_graph_view', 'recline_map_view']:
if datapusher_formats[0] in filter_formats:
continue
for _format in datapusher_formats:
if '/' not in _format:
filter_formats.extend([_format, _format.upper()])
else:
# There is another view type provided so we can't add any
# filter
return search_data_dict
filter_formats_query = ['+res_format:"{0}"'.format(_format)
for _format in filter_formats]
search_data_dict['fq_list'].append(' OR '.join(filter_formats_query))
return search_data_dict
def _update_search_params(self, search_data_dict):
'''
Update the `package_search` data dict with the user provided parameters
Supported fields are `q`, `fq` and `fq_list`.
If the provided JSON object can not be parsed the process stops with
an error.
Returns the updated data dict
'''
log = logging.getLogger(__name__)
if not self.options.search_params:
return search_data_dict
try:
user_search_params = json.loads(self.options.search_params)
except ValueError as e:
error('Unable to parse JSON search parameters: {0}'.format(e))
if user_search_params.get('q'):
search_data_dict['q'] = user_search_params['q']
if user_search_params.get('fq'):
if search_data_dict['fq']:
search_data_dict['fq'] += ' ' + user_search_params['fq']
else:
search_data_dict['fq'] = user_search_params['fq']
if (user_search_params.get('fq_list') and
isinstance(user_search_params['fq_list'], list)):
search_data_dict['fq_list'].extend(user_search_params['fq_list'])
def _search_datasets(self, page=1, view_types=[]):
'''
Perform a query with `package_search` and return the result
Results can be paginated using the `page` parameter
'''
n = self._page_size
search_data_dict = {
'q': '',
'fq': '',
'fq_list': [],
'include_private': True,
'rows': n,
'start': n * (page - 1),
}
if self.options.dataset_id:
search_data_dict['q'] = ' OR '.join(
['id:{0} OR name:"{0}"'.format(dataset_id)
for dataset_id in self.options.dataset_id]
)
elif self.options.search_params:
self._update_search_params(search_data_dict)
elif not self.options.no_default_filters:
self._add_default_filters(search_data_dict, view_types)
if not search_data_dict.get('q'):
search_data_dict['q'] = '*:*'
query = p.toolkit.get_action('package_search')(
{}, search_data_dict)
return query
def create_views(self, view_plugin_types=[]):
from ckan.lib.datapreview import add_views_to_dataset_resources
log = logging.getLogger(__name__)
datastore_enabled = 'datastore' in config['ckan.plugins'].split()
loaded_view_plugins = self._get_view_plugins(view_plugin_types,
datastore_enabled)
context = {'user': self.site_user['name']}
page = 1
while True:
query = self._search_datasets(page, loaded_view_plugins)
if page == 1 and query['count'] == 0:
error('No datasets to create resource views on, exiting...')
elif page == 1 and not self.options.assume_yes:
msg = ('\nYou are about to check {0} datasets for the ' +
'following view plugins: {1}\n' +
' Do you want to continue?')
confirm = query_yes_no(msg.format(query['count'],
loaded_view_plugins))
if confirm == 'no':
error('Command aborted by user')
if query['results']:
for dataset_dict in query['results']:
if not dataset_dict.get('resources'):
continue
views = add_views_to_dataset_resources(
context,
dataset_dict,
view_types=loaded_view_plugins)
if views:
view_types = list({view['view_type']
for view in views})
msg = ('Added {0} view(s) of type(s) {1} to ' +
'resources from dataset {2}')
log.debug(msg.format(len(views),
', '.join(view_types),
dataset_dict['name']))
if len(query['results']) < self._page_size:
break
page += 1
else:
break
log.info('Done')
def clear_views(self, view_plugin_types=[]):
log = logging.getLogger(__name__)
if not self.options.assume_yes:
if view_plugin_types:
msg = 'Are you sure you want to delete all resource views ' + \
'of type {0}?'.format(', '.join(view_plugin_types))
else:
msg = 'Are you sure you want to delete all resource views?'
result = query_yes_no(msg, default='no')
if result == 'no':
error('Command aborted by user')
context = {'user': self.site_user['name']}
logic.get_action('resource_view_clear')(
context, {'view_types': view_plugin_types})
log.info('Done')
def clean_views(self):
names = []
for plugin in p.PluginImplementations(p.IResourceView):
names.append(str(plugin.info()['name']))
results = model.ResourceView.get_count_not_in_view_types(names)
if not results:
print('No resource views to delete')
return
print('This command will delete.\n')
for row in results:
print('%s of type %s' % (row[1], row[0]))
result = query_yes_no('Do you want to delete these resource views:', default='no')
if result == 'no':
print('Not Deleting.')
return
model.ResourceView.delete_not_in_view_types(names)
model.Session.commit()
print('Deleted resource views.')
class ConfigToolCommand(paste.script.command.Command):
'''Tool for editing options in a CKAN config file
paster config-tool <default.ini> <key>=<value> [<key>=<value> ...]
paster config-tool <default.ini> -f <custom_options.ini>
Examples:
paster config-tool default.ini sqlalchemy.url=123 'ckan.site_title=ABC'
paster config-tool default.ini -s server:main -e port=8080
paster config-tool default.ini -f custom_options.ini
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
default_verbosity = 1
group_name = 'ckan'
usage = __doc__
summary = usage.split('\n')[0]
parser.add_option('-s', '--section', dest='section',
default='app:main', help='Section of the config file')
parser.add_option(
'-e', '--edit', action='store_true', dest='edit', default=False,
help='Checks the option already exists in the config file')
parser.add_option(
'-f', '--file', dest='merge_filepath', metavar='FILE',
help='Supply an options file to merge in')
def command(self):
from ckan.lib import config_tool
if len(self.args) < 1:
self.parser.error('Not enough arguments (got %i, need at least 1)'
% len(self.args))
config_filepath = self.args[0]
if not os.path.exists(config_filepath):
self.parser.error('Config filename %r does not exist.' %
config_filepath)
if self.options.merge_filepath:
config_tool.config_edit_using_merge_file(
config_filepath, self.options.merge_filepath)
options = self.args[1:]
if not (options or self.options.merge_filepath):
self.parser.error('No options provided')
if options:
for option in options:
if '=' not in option:
error(
'An option does not have an equals sign: %r '
'It should be \'key=value\'. If there are spaces '
'you\'ll need to quote the option.\n' % option)
try:
config_tool.config_edit_using_option_strings(
config_filepath, options, self.options.section,
edit=self.options.edit)
except config_tool.ConfigToolError as e:
error(traceback.format_exc())
class JobsCommand(CkanCommand):
'''Manage background jobs
Usage:
paster jobs worker [--burst] [QUEUES]
Start a worker that fetches jobs from queues and executes
them. If no queue names are given then the worker listens
to the default queue, this is equivalent to
paster jobs worker default
If queue names are given then the worker listens to those
queues and only those:
paster jobs worker my-custom-queue
Hence, if you want the worker to listen to the default queue
and some others then you must list the default queue explicitly:
paster jobs worker default my-custom-queue
If the `--burst` option is given then the worker will exit
as soon as all its queues are empty.
paster jobs list [QUEUES]
List currently enqueued jobs from the given queues. If no queue
names are given then the jobs from all queues are listed.
paster jobs show ID
Show details about a specific job.
paster jobs cancel ID
Cancel a specific job. Jobs can only be canceled while they are
enqueued. Once a worker has started executing a job it cannot
be aborted anymore.
paster jobs clear [QUEUES]
Cancel all jobs on the given queues. If no queue names are
given then ALL queues are cleared.
paster jobs test [QUEUES]
Enqueue a test job. If no queue names are given then the job is
added to the default queue. If queue names are given then a
separate test job is added to each of the queues.
'''
summary = __doc__.split(u'\n')[0]
usage = __doc__
min_args = 0
def __init__(self, *args, **kwargs):
super(JobsCommand, self).__init__(*args, **kwargs)
try:
self.parser.add_option(u'--burst', action='store_true',
default=False,
help=u'Start worker in burst mode.')
except OptionConflictError:
# Option has already been added in previous call
pass
def command(self):
self._load_config()
try:
cmd = self.args.pop(0)
except IndexError:
print(self.__doc__)
sys.exit(0)
if cmd == u'worker':
self.worker()
elif cmd == u'list':
self.list()
elif cmd == u'show':
self.show()
elif cmd == u'cancel':
self.cancel()
elif cmd == u'clear':
self.clear()
elif cmd == u'test':
self.test()
else:
error(u'Unknown command "{}"'.format(cmd))
def worker(self):
from ckan.lib.jobs import Worker
Worker(self.args).work(burst=self.options.burst)
def list(self):
data_dict = {
u'queues': self.args,
}
jobs = p.toolkit.get_action(u'job_list')({}, data_dict)
for job in jobs:
if job[u'title'] is None:
job[u'title'] = ''
else:
job[u'title'] = u'"{}"'.format(job[u'title'])
print(u'{created} {id} {queue} {title}'.format(**job))
def show(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
job = p.toolkit.get_action(u'job_show')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'ID: {}'.format(job[u'id']))
if job[u'title'] is None:
title = u'None'
else:
title = u'"{}"'.format(job[u'title'])
print(u'Title: {}'.format(title))
print(u'Created: {}'.format(job[u'created']))
print(u'Queue: {}'.format(job[u'queue']))
def cancel(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
p.toolkit.get_action(u'job_cancel')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'Cancelled job {}'.format(id))
def clear(self):
data_dict = {
u'queues': self.args,
}
queues = p.toolkit.get_action(u'job_clear')({}, data_dict)
queues = (u'"{}"'.format(q) for q in queues)
print(u'Cleared queue(s) {}'.format(u', '.join(queues)))
def test(self):
from ckan.lib.jobs import DEFAULT_QUEUE_NAME, enqueue, test_job
for queue in (self.args or [DEFAULT_QUEUE_NAME]):
job = enqueue(test_job, [u'A test job'], title=u'A test job', queue=queue)
print(u'Added test job {} to queue "{}"'.format(job.id, queue))
|
htcondor_utils.py
|
#=== Imports ===================================================
import re
import time
import threading
import random
import multiprocessing
import tempfile
import functools
import traceback
import xml.etree.ElementTree as ET
try:
import subprocess32 as subprocess
except Exception:
import subprocess
try:
from threading import get_ident
except ImportError:
from thread import get_ident
import six
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestercore.core_utils import SingletonWithID
from pandaharvester.harvestercore.fifos import SpecialFIFOBase
# condor python or command api
try:
import htcondor
except ImportError:
CONDOR_API = 'command'
else:
CONDOR_API = 'python'
#===============================================================
#=== Definitions ===============================================
# logger
baseLogger = core_utils.setup_logger('htcondor_utils')
# module level lock
moduleLock = threading.Lock()
# List of job ads required
CONDOR_JOB_ADS_LIST = [
'ClusterId', 'ProcId', 'JobStatus', 'LastJobStatus',
'JobStartDate', 'EnteredCurrentStatus', 'ExitCode',
'HoldReason', 'LastHoldReason', 'RemoveReason',
'harvesterWorkerID',
]
# harvesterID
harvesterID = harvester_config.master.harvester_id
#===============================================================
#=== Functions =================================================
def synchronize(func):
"""
synchronize decorator
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
with moduleLock:
return func(*args, **kwargs)
return wrapper
def _runShell(cmd):
"""
Run shell function
"""
cmd = str(cmd)
p = subprocess.Popen(cmd.split(), shell=False, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdOut, stdErr = p.communicate()
retCode = p.returncode
return (retCode, stdOut, stdErr)
def condor_job_id_from_workspec(workspec):
"""
Generate condor job id with schedd host from workspec
"""
batchid_str = str(workspec.batchID)
# backward compatibility if workspec.batchID does not contain ProcId
if '.' not in batchid_str:
batchid_str += '.0'
return '{0}#{1}'.format(workspec.submissionHost, batchid_str)
def get_host_batchid_map(workspec_list):
"""
Get a dictionary of submissionHost: list of batchIDs from workspec_list
return {submissionHost_1: {batchID_1_1, ...}, submissionHost_2: {...}, ...}
"""
host_batchid_map = {}
for workspec in workspec_list:
host = workspec.submissionHost
batchid = workspec.batchID
if batchid is None:
continue
batchid_str = str(batchid)
# backward compatibility if workspec.batchID does not contain ProcId
if '.' not in batchid_str:
batchid_str += '.0'
try:
host_batchid_map[host].append(batchid_str)
except KeyError:
host_batchid_map[host] = [batchid_str]
return host_batchid_map
def get_batchid_from_job(job_ads_dict):
"""
Get batchID string from condor job dict
"""
batchid = '{0}.{1}'.format(job_ads_dict['ClusterId'], job_ads_dict['ProcId'])
return batchid
def get_job_id_tuple_from_batchid(batchid):
"""
Get tuple (ClusterId, ProcId) from batchID string
"""
batchid_str_list = str(batchid).split('.')
clusterid = batchid_str_list[0]
procid = batchid_str_list[1]
if not procid:
procid = 0
return (clusterid, procid)
# def jdl_to_map(jdl):
# """
# Transform jdl into dictionary
# The "queue" line (e.g. "queue 1") will be omitted
# """
# # FIXME: not containing "+"
# ret_map = {}
# for line in jdl.split('\n'):
# match = re.search('^(.+) = (.+)$', line)
# if match:
# ret_map[match(1)] = match(2)
# return ret_map
def condor_submit_process(mp_queue, host, jdl_map_list):
"""
Function for new process to submit condor
"""
# initialization
errStr = ''
batchIDs_list = []
# parse schedd and pool name
condor_schedd, condor_pool = None, None
if host in ('LOCAL', 'None'):
tmpLog.debug('submissionHost is {0}, treated as local schedd. Skipped'.format(host))
else:
try:
condor_schedd, condor_pool = host.split(',')[0:2]
except ValueError:
tmpLog.error('Invalid submissionHost: {0} . Skipped'.format(host))
# get schedd
try:
if condor_pool:
collector = htcondor.Collector(condor_pool)
else:
collector = htcondor.Collector()
if condor_schedd:
scheddAd = collector.locate(htcondor.DaemonTypes.Schedd, condor_schedd)
else:
scheddAd = collector.locate(htcondor.DaemonTypes.Schedd)
schedd = htcondor.Schedd(scheddAd)
except Exception as e:
errStr = 'create condor collector and schedd failed; {0}: {1}'.format(e.__class__.__name__, e)
else:
submit_obj = htcondor.Submit()
try:
with schedd.transaction() as txn:
# TODO: Currently spool is not supported in htcondor.Submit ...
submit_result = submit_obj.queue_with_itemdata(txn, 1, iter(jdl_map_list))
clusterid = submit_result.cluster()
first_proc = submit_result.first_proc()
num_proc = submit_result.num_procs()
batchIDs_list.extend(['{0}.{1}'.format(clusterid, procid)
for procid in range(first_proc, first_proc + num_proc)])
except RuntimeError as e:
errStr = 'submission failed; {0}: {1}'.format(e.__class__.__name__, e)
mp_queue.put((batchIDs_list, errStr))
#===============================================================
#=== Classes ===================================================
# Condor queue cache fifo
class CondorQCacheFifo(six.with_metaclass(SingletonWithID, SpecialFIFOBase)):
global_lock_id = -1
def __init__(self, target, *args, **kwargs):
name_suffix = target.split('.')[0]
self.titleName = 'CondorQCache_{0}'.format(name_suffix)
SpecialFIFOBase.__init__(self)
def lock(self, score=None):
lock_key = format(int(random.random() * 2**32), 'x')
if score is None:
score = time.time()
retVal = self.putbyid(self.global_lock_id, lock_key, score)
if retVal:
return lock_key
return None
def unlock(self, key=None, force=False):
peeked_tuple = self.peekbyid(id=self.global_lock_id)
if peeked_tuple.score is None or peeked_tuple.item is None:
return True
elif force or self.decode(peeked_tuple.item) == key:
self.delete([self.global_lock_id])
return True
else:
return False
# Condor client
class CondorClient(object):
@classmethod
def renew_session_and_retry(cls, func):
"""
If RuntimeError, call renew_session and retry
"""
# FIXME: currently hard-coded
to_retry = True
# Wrapper
def wrapper(self, *args, **kwargs):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorClient.renew_session_if_error')
func_name = func.__name__
try:
self.schedd
except AttributeError:
if self.lock.acquire(False):
is_renewed = self.renew_session()
self.lock.release()
if not is_renewed:
errStr = 'failed to communicate with {0}'.format(self.submissionHost)
tmpLog.error(errStr)
tmpLog.debug('got RuntimeError: {0}'.format(e))
raise Exception(errStr)
try:
ret = func(self, *args, **kwargs)
except RuntimeError as e:
tmpLog.debug('got RuntimeError: {0}'.format(e))
if self.lock.acquire(False):
is_renewed = self.renew_session()
self.lock.release()
if is_renewed:
if to_retry:
tmpLog.debug('condor session renewed. Retrying {0}'.format(func_name))
ret = func(self, *args, **kwargs)
else:
tmpLog.debug('condor session renewed')
raise
else:
tmpLog.error('failed to renew condor session')
raise
else:
tmpLog.debug('another thread is renewing condor session; skipped...')
raise
tmpLog.debug('done')
return ret
return wrapper
def __init__(self, submissionHost, *args, **kwargs):
self.submissionHost = submissionHost
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorClient.__init__')
# Initialize
tmpLog.debug('Initializing client')
self.lock = threading.Lock()
self.condor_api = CONDOR_API
self.condor_schedd = None
self.condor_pool = None
# Parse condor command remote options from workspec
if self.submissionHost in ('LOCAL', 'None'):
tmpLog.debug('submissionHost is {0}, treated as local schedd. Skipped'.format(self.submissionHost))
else:
try:
self.condor_schedd, self.condor_pool = self.submissionHost.split(',')[0:2]
except ValueError:
tmpLog.error('Invalid submissionHost: {0} . Skipped'.format(self.submissionHost))
# Use Python API or fall back to command
if self.condor_api == 'python':
try:
self.secman = htcondor.SecMan()
self.renew_session(init=True)
except Exception as e:
tmpLog.error('Error when using htcondor Python API. Exception {0}: {1}'.format(e.__class__.__name__, e))
raise
tmpLog.debug('Initialized client')
@synchronize
def renew_session(self, retry=3, init=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorClient.renew_session')
# Clear security session if not initialization
if not init:
tmpLog.info('Renew condor session')
self.secman.invalidateAllSessions()
# Recreate collector and schedd object
i_try = 1
while i_try <= retry:
try:
tmpLog.info('Try {0}'.format(i_try))
if self.condor_pool:
self.collector = htcondor.Collector(self.condor_pool)
else:
self.collector = htcondor.Collector()
if self.condor_schedd:
self.scheddAd = self.collector.locate(htcondor.DaemonTypes.Schedd, self.condor_schedd)
else:
self.scheddAd = self.collector.locate(htcondor.DaemonTypes.Schedd)
self.schedd = htcondor.Schedd(self.scheddAd)
tmpLog.info('Success')
break
except Exception as e:
tmpLog.warning('Recreate condor collector and schedd failed: {0}'.format(e))
if i_try < retry:
tmpLog.warning('Failed. Retry...')
else:
tmpLog.warning('Retry {0} times. Still failed. Skipped'.format(i_try))
return False
i_try += 1
self.secman.invalidateAllSessions()
time.sleep(3)
# Sleep
time.sleep(3)
return True
# Condor job query
class CondorJobQuery(six.with_metaclass(SingletonWithID, CondorClient)):
# class lock
classLock = threading.Lock()
# Query commands
orig_comStr_list = [
'condor_q -xml',
'condor_history -xml',
]
# Bad text of redundant xml roots to eleminate from condor XML
badtext = """
</classads>
<?xml version="1.0"?>
<!DOCTYPE classads SYSTEM "classads.dtd">
<classads>
"""
def __init__(self, cacheEnable=False, cacheRefreshInterval=None, useCondorHistory=True, *args, **kwargs):
self.submissionHost = str(kwargs.get('id'))
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0} thrid={1} oid={2}'.format(self.submissionHost, get_ident(), id(self)), method_name='CondorJobQuery.__init__')
# Initialize
with self.classLock:
tmpLog.debug('Start')
CondorClient.__init__(self, self.submissionHost, *args, **kwargs)
# For condor_q cache
self.cacheEnable = cacheEnable
if self.cacheEnable:
self.cache = ([], 0)
self.cacheRefreshInterval = cacheRefreshInterval
self.useCondorHistory = useCondorHistory
tmpLog.debug('Initialize done')
def get_all(self, batchIDs_list=[], allJobs=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.get_all')
# Get all
tmpLog.debug('Start')
job_ads_all_dict = {}
if self.condor_api == 'python':
try:
job_ads_all_dict = self.query_with_python(batchIDs_list, allJobs)
except Exception as e:
tmpLog.error('Exception {0}: {1}'.format(e.__class__.__name__, e))
raise
else:
job_ads_all_dict = self.query_with_command(batchIDs_list)
return job_ads_all_dict
def query_with_command(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.query_with_command')
# Start query
tmpLog.debug('Start query')
job_ads_all_dict = {}
batchIDs_set = set(batchIDs_list)
for orig_comStr in self.orig_comStr_list:
# String of batchIDs
batchIDs_str = ' '.join(list(batchIDs_set))
# Command
if 'condor_q' in orig_comStr or ('condor_history' in orig_comStr and batchIDs_set):
name_opt = '-name {0}'.format(self.condor_schedd) if self.condor_schedd else ''
pool_opt = '-pool {0}'.format(self.condor_pool) if self.condor_pool else ''
ids = batchIDs_str
comStr = '{cmd} {name_opt} {pool_opt} {ids}'.format(cmd=orig_comStr,
name_opt=name_opt,
pool_opt=pool_opt,
ids=ids)
else:
# tmpLog.debug('No batch job left to query in this cycle by this thread')
continue
tmpLog.debug('check with {0}'.format(comStr))
(retCode, stdOut, stdErr) = _runShell(comStr)
if retCode == 0:
# Command succeeded
job_ads_xml_str = '\n'.join(str(stdOut).split(self.badtext))
if '<c>' in job_ads_xml_str:
# Found at least one job
# XML parsing
xml_root = ET.fromstring(job_ads_xml_str)
def _getAttribute_tuple(attribute_xml_element):
# Attribute name
_n = str(attribute_xml_element.get('n'))
# Attribute value text
_t = ' '.join(attribute_xml_element.itertext())
return (_n, _t)
# Every batch job
for _c in xml_root.findall('c'):
job_ads_dict = dict()
# Every attribute
attribute_iter = map(_getAttribute_tuple, _c.findall('a'))
job_ads_dict.update(attribute_iter)
batchid = get_batchid_from_job(job_ads_dict)
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = job_ads_dict
# Remove batch jobs already gotten from the list
if batchid in batchIDs_set:
batchIDs_set.discard(batchid)
else:
# Job not found
tmpLog.debug('job not found with {0}'.format(comStr))
continue
else:
# Command failed
errStr = 'command "{0}" failed, retCode={1}, error: {2} {3}'.format(comStr, retCode, stdOut, stdErr)
tmpLog.error(errStr)
if len(batchIDs_set) > 0:
# Job unfound via both condor_q or condor_history, marked as unknown worker in harvester
for batchid in batchIDs_set:
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = dict()
tmpLog.info( 'Unfound batch jobs of submissionHost={0}: {1}'.format(
self.submissionHost, ' '.join(list(batchIDs_set)) ) )
# Return
return job_ads_all_dict
@CondorClient.renew_session_and_retry
def query_with_python(self, batchIDs_list=[], allJobs=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.query_with_python')
# Start query
tmpLog.debug('Start query')
cache_fifo = CondorQCacheFifo(target=self.submissionHost, id='{0},{1}'.format(self.submissionHost, get_ident()))
job_ads_all_dict = {}
# make id sets
batchIDs_set = set(batchIDs_list)
clusterids_set = set([get_job_id_tuple_from_batchid(batchid)[0] for batchid in batchIDs_list])
# query from cache
def cache_query(requirements=None, projection=CONDOR_JOB_ADS_LIST, timeout=60):
# query from condor xquery and update cache to fifo
def update_cache(lockInterval=90):
tmpLog.debug('update_cache')
# acquire lock with score timestamp
score = time.time() - self.cacheRefreshInterval + lockInterval
lock_key = cache_fifo.lock(score=score)
if lock_key is not None:
# acquired lock, update from condor schedd
tmpLog.debug('got lock, updating cache')
jobs_iter_orig = self.schedd.xquery(requirements=requirements, projection=projection)
jobs_iter = []
for job in jobs_iter_orig:
try:
jobs_iter.append(dict(job))
except Exception as e:
tmpLog.error('In updating cache schedd xquery; got exception {0}: {1} ; {2}'.format(
e.__class__.__name__, e, repr(job)))
timeNow = time.time()
cache_fifo.put(jobs_iter, timeNow)
self.cache = (jobs_iter, timeNow)
# release lock
retVal = cache_fifo.unlock(key=lock_key)
if retVal:
tmpLog.debug('done update cache and unlock')
else:
tmpLog.warning('cannot unlock... Maybe something wrong')
return jobs_iter
else:
tmpLog.debug('cache fifo locked by other thread. Skipped')
return None
# remove invalid or outdated caches from fifo
def cleanup_cache(timeout=60):
tmpLog.debug('cleanup_cache')
id_list = list()
attempt_timestamp = time.time()
n_cleanup = 0
while True:
if time.time() > attempt_timestamp + timeout:
tmpLog.debug('time is up when cleanup cache. Skipped')
break
peeked_tuple = cache_fifo.peek(skip_item=True)
if peeked_tuple is None:
tmpLog.debug('empty cache fifo')
break
elif peeked_tuple.score is not None \
and time.time() <= peeked_tuple.score + self.cacheRefreshInterval:
tmpLog.debug('nothing expired')
break
elif peeked_tuple.id is not None:
retVal = cache_fifo.delete([peeked_tuple.id])
if isinstance(retVal, int):
n_cleanup += retVal
else:
# problematic
tmpLog.warning('got nothing when cleanup cache, maybe problematic. Skipped')
break
tmpLog.debug('cleaned up {0} objects in cache fifo'.format(n_cleanup))
# start
jobs_iter = tuple()
try:
attempt_timestamp = time.time()
while True:
if time.time() > attempt_timestamp + timeout:
# skip cache_query if too long
tmpLog.debug('cache_query got timeout ({0} seconds). Skipped '.format(timeout))
break
# get latest cache
peeked_tuple = cache_fifo.peeklast(skip_item=True)
if peeked_tuple is not None and peeked_tuple.score is not None:
# got something
if peeked_tuple.id == cache_fifo.global_lock_id:
if time.time() <= peeked_tuple.score + self.cacheRefreshInterval:
# lock
tmpLog.debug('got fifo locked. Wait and retry...')
time.sleep(random.uniform(1, 5))
continue
else:
# expired lock
tmpLog.debug('got lock expired. Clean up and retry...')
cleanup_cache()
continue
elif time.time() <= peeked_tuple.score + self.cacheRefreshInterval:
# got valid cache
_obj, _last_update = self.cache
if _last_update >= peeked_tuple.score:
# valid local cache
tmpLog.debug('valid local cache')
jobs_iter = _obj
else:
# valid fifo cache
tmpLog.debug('update local cache from fifo')
peeked_tuple_with_item = cache_fifo.peeklast()
if peeked_tuple_with_item is not None \
and peeked_tuple.id != cache_fifo.global_lock_id \
and peeked_tuple_with_item.item is not None:
jobs_iter = cache_fifo.decode(peeked_tuple_with_item.item)
self.cache = (jobs_iter, peeked_tuple_with_item.score)
else:
tmpLog.debug('peeked invalid cache fifo object. Wait and retry...')
time.sleep(random.uniform(1, 5))
continue
else:
# cache expired
tmpLog.debug('update cache in fifo')
retVal = update_cache()
if retVal is not None:
jobs_iter = retVal
cleanup_cache()
break
else:
# no cache in fifo, check with size again
if cache_fifo.size() == 0:
if time.time() > attempt_timestamp + random.uniform(10, 30):
# have waited for long enough, update cache
tmpLog.debug('waited enough, update cache in fifo')
retVal = update_cache()
if retVal is not None:
jobs_iter = retVal
break
else:
# still nothing, wait
time.sleep(2)
continue
except Exception as _e:
tb_str = traceback.format_exc()
tmpLog.error('Error querying from cache fifo; {0} ; {1}'.format(_e, tb_str))
return jobs_iter
# query method options
query_method_list = [self.schedd.xquery]
if self.cacheEnable:
query_method_list.insert(0, cache_query)
if self.useCondorHistory:
query_method_list.append(self.schedd.history)
# Go
for query_method in query_method_list:
# Make requirements
clusterids_str = ','.join(list(clusterids_set))
if query_method is cache_query or allJobs:
requirements = 'harvesterID =?= "{0}"'.format(harvesterID)
else:
requirements = 'member(ClusterID, {{{0}}})'.format(clusterids_str)
if allJobs:
tmpLog.debug('Query method: {0} ; allJobs'.format(query_method.__name__))
else:
tmpLog.debug('Query method: {0} ; clusterids: "{1}"'.format(query_method.__name__, clusterids_str))
# Query
jobs_iter = query_method(requirements=requirements, projection=CONDOR_JOB_ADS_LIST)
for job in jobs_iter:
try:
job_ads_dict = dict(job)
except Exception as e:
tmpLog.error('In doing schedd xquery or history; got exception {0}: {1} ; {2}'.format(
e.__class__.__name__, e, repr(job)))
batchid = get_batchid_from_job(job_ads_dict)
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = job_ads_dict
# Remove batch jobs already gotten from the list
if not allJobs:
batchIDs_set.discard(batchid)
if len(batchIDs_set) == 0 or allJobs:
break
# Remaining
if not allJobs and len(batchIDs_set) > 0:
# Job unfound via both condor_q or condor_history, marked as unknown worker in harvester
for batchid in batchIDs_set:
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = dict()
tmpLog.info( 'Unfound batch jobs of submissionHost={0}: {1}'.format(
self.submissionHost, ' '.join(list(batchIDs_set)) ) )
# Return
return job_ads_all_dict
# Condor job submit
class CondorJobSubmit(six.with_metaclass(SingletonWithID, CondorClient)):
# class lock
classLock = threading.Lock()
def __init__(self, *args, **kwargs):
self.submissionHost = str(kwargs.get('id'))
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0} thrid={1} oid={2}'.format(self.submissionHost, get_ident(), id(self)), method_name='CondorJobSubmit.__init__')
# Initialize
tmpLog.debug('Start')
self.lock = threading.Lock()
CondorClient.__init__(self, self.submissionHost, *args, **kwargs)
tmpLog.debug('Initialize done')
def submit(self, jdl_list, use_spool=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobSubmit.submit')
# Get all
tmpLog.debug('Start')
job_ads_all_dict = {}
if self.condor_api == 'python':
try:
# TODO: submit_with_python will meet segfault or c++ error after many times of submission; need help from condor team
# TODO: submit_with_python_proces has no such error but spawns some processes that will not terminate after harvester stops
# TODO: Fall back to submit_with_command for now
# retVal = self.submit_with_python(jdl_list, use_spool)
# retVal = self.submit_with_python_proces(jdl_list, use_spool)
retVal = self.submit_with_command(jdl_list, use_spool)
except Exception as e:
tmpLog.error('Exception {0}: {1}'.format(e.__class__.__name__, e))
raise
else:
retVal = self.submit_with_command(jdl_list, use_spool)
return retVal
def submit_with_command(self, jdl_list, use_spool=False, tmp_str='', keep_temp_sdf=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobSubmit.submit_with_command')
# Initialize
errStr = ''
batchIDs_list = []
# make sdf temp file from jdls
tmpFile = tempfile.NamedTemporaryFile(mode='w', delete=(not keep_temp_sdf),
suffix='_{0}_cluster_submit.sdf'.format(tmp_str))
sdf_file = tmpFile.name
tmpFile.write('\n\n'.join(jdl_list))
tmpFile.flush()
# make condor remote options
name_opt = '-name {0}'.format(self.condor_schedd) if self.condor_schedd else ''
pool_opt = '-pool {0}'.format(self.condor_pool) if self.condor_pool else ''
spool_opt = '-remote -spool' if use_spool and self.condor_schedd else ''
# command
comStr = 'condor_submit -single-cluster {spool_opt} {name_opt} {pool_opt} {sdf_file}'.format(
sdf_file=sdf_file, name_opt=name_opt, pool_opt=pool_opt, spool_opt=spool_opt)
# submit
tmpLog.debug('submit with command: {0}'.format(comStr))
try:
p = subprocess.Popen(comStr.split(), shell=False, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# check return code
stdOut, stdErr = p.communicate()
retCode = p.returncode
except Exception as e:
stdOut = ''
stdErr = core_utils.dump_error_message(tmpLog, no_message=True)
retCode = 1
errStr = '{0}: {1}'.format(e.__class__.__name__, e)
finally:
tmpFile.close()
tmpLog.debug('retCode={0}'.format(retCode))
if retCode == 0:
# extract clusterid and n_jobs
job_id_match = None
for tmp_line_str in stdOut.split('\n'):
job_id_match = re.search('^(\d+) job[(]s[)] submitted to cluster (\d+)\.$', tmp_line_str)
if job_id_match:
break
if job_id_match is not None:
n_jobs = int(job_id_match.group(1))
clusterid = job_id_match.group(2)
batchIDs_list = ['{0}.{1}'.format(clusterid, procid) for procid in range(n_jobs)]
tmpLog.debug('submitted {0} jobs: {1}'.format(n_jobs, ' '.join(batchIDs_list)))
else:
errStr = 'no job submitted: {0}'.format(errStr)
tmpLog.error(errStr)
else:
tmpLog.error('submission failed: {0} ; {1}'.format(stdErr, errStr))
# Return
return (batchIDs_list, errStr)
@CondorClient.renew_session_and_retry
def submit_with_python(self, jdl_list, use_spool=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobSubmit.submit_with_python')
# Start
tmpLog.debug('Start')
# Initialize
errStr = ''
batchIDs_list = []
# Make list of jdl map with dummy submit objects
jdl_map_list = [ dict(htcondor.Submit(jdl).items()) for jdl in jdl_list ]
# Go
submit_obj = htcondor.Submit()
try:
with self.schedd.transaction() as txn:
# TODO: Currently spool is not supported in htcondor.Submit ...
submit_result = submit_obj.queue_with_itemdata(txn, 1, iter(jdl_map_list))
clusterid = submit_result.cluster()
first_proc = submit_result.first_proc()
num_proc = submit_result.num_procs()
batchIDs_list.extend(['{0}.{1}'.format(clusterid, procid)
for procid in range(first_proc, first_proc + num_proc)])
except RuntimeError as e:
errStr = '{0}: {1}'.format(e.__class__.__name__, e)
tmpLog.error('submission failed: {0}'.format(errStr))
raise
if batchIDs_list:
n_jobs = len(batchIDs_list)
tmpLog.debug('submitted {0} jobs: {1}'.format(n_jobs, ' '.join(batchIDs_list)))
elif not errStr:
tmpLog.error('submitted nothing')
tmpLog.debug('Done')
# Return
return (batchIDs_list, errStr)
def submit_with_python_process(self, jdl_list, use_spool=False):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobSubmit.submit_with_python_process')
# Start
tmpLog.debug('Start')
# Make list of jdl map with dummy submit objects
jdl_map_list = [ dict(htcondor.Submit(jdl).items()) for jdl in jdl_list ]
# Go
mp_queue = multiprocessing.Queue()
mp_process = multiprocessing.Process(target=condor_submit_process, args=(mp_queue, self.submissionHost, jdl_map_list))
mp_process.daemon = True
mp_process.start()
(batchIDs_list, errStr) = mp_queue.get()
mp_queue.close()
mp_process.terminate()
mp_process.join()
if batchIDs_list:
n_jobs = len(batchIDs_list)
tmpLog.debug('submitted {0} jobs: {1}'.format(n_jobs, ' '.join(batchIDs_list)))
elif not errStr:
tmpLog.error('submitted nothing')
tmpLog.debug('Done')
# Return
return (batchIDs_list, errStr)
# Condor job remove
class CondorJobManage(six.with_metaclass(SingletonWithID, CondorClient)):
# class lock
classLock = threading.Lock()
def __init__(self, *args, **kwargs):
self.submissionHost = str(kwargs.get('id'))
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0} thrid={1} oid={2}'.format(self.submissionHost, get_ident(), id(self)), method_name='CondorJobManage.__init__')
# Initialize
tmpLog.debug('Start')
self.lock = threading.Lock()
CondorClient.__init__(self, self.submissionHost, *args, **kwargs)
tmpLog.debug('Initialize done')
def remove(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobManage.remove')
# Get all
tmpLog.debug('Start')
job_ads_all_dict = {}
if self.condor_api == 'python':
try:
retVal = self.remove_with_python(batchIDs_list)
except Exception as e:
tmpLog.error('Exception {0}: {1}'.format(e.__class__.__name__, e))
raise
else:
retVal = self.remove_with_command(batchIDs_list)
return retVal
def remove_with_command(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobManage.remove_with_command')
# if workspec.batchID is None:
# tmpLog.info('Found workerID={0} has submissionHost={1} batchID={2} . Cannot kill. Skipped '.format(
# workspec.workerID, workspec.submissionHost, workspec.batchID))
# ret_list.append((True, ''))
#
# ## Parse condor remote options
# name_opt, pool_opt = '', ''
# if workspec.submissionHost is None or workspec.submissionHost == 'LOCAL':
# pass
# else:
# try:
# condor_schedd, condor_pool = workspec.submissionHost.split(',')[0:2]
# except ValueError:
# errStr = 'Invalid submissionHost: {0} . Skipped'.format(workspec.submissionHost)
# tmpLog.error(errStr)
# ret_list.append((False, errStr))
# name_opt = '-name {0}'.format(condor_schedd) if condor_schedd else ''
# pool_opt = '-pool {0}'.format(condor_pool) if condor_pool else ''
#
# ## Kill command
# comStr = 'condor_rm {name_opt} {pool_opt} {batchID}'.format(name_opt=name_opt,
# pool_opt=pool_opt,
# batchID=workspec.batchID)
# (retCode, stdOut, stdErr) = _runShell(comStr)
# if retCode != 0:
# comStr = 'condor_q -l {name_opt} {pool_opt} {batchID}'.format(name_opt=name_opt,
# pool_opt=pool_opt,
# batchID=workspec.batchID)
# (retCode, stdOut, stdErr) = _runShell(comStr)
# if ('ClusterId = {0}'.format(workspec.batchID) in str(stdOut) \
# and 'JobStatus = 3' not in str(stdOut)) or retCode != 0:
# ## Force to cancel if batch job not terminated first time
# comStr = 'condor_rm -forcex {name_opt} {pool_opt} {batchID}'.format(name_opt=name_opt,
# pool_opt=pool_opt,
# batchID=workspec.batchID)
# (retCode, stdOut, stdErr) = _runShell(comStr)
# if retCode != 0:
# ## Command failed to kill
# errStr = 'command "{0}" failed, retCode={1}, error: {2} {3}'.format(comStr, retCode, stdOut, stdErr)
# tmpLog.error(errStr)
# ret_list.append((False, errStr))
# ## Found already killed
# tmpLog.info('Found workerID={0} submissionHost={1} batchID={2} already killed'.format(
# workspec.workerID, workspec.submissionHost, workspec.batchID))
# else:
# tmpLog.info('Succeeded to kill workerID={0} submissionHost={1} batchID={2}'.format(
# workspec.workerID, workspec.submissionHost, workspec.batchID))
raise NotImplementedError
@CondorClient.renew_session_and_retry
def remove_with_python(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobManage.remove_with_python')
# Start
tmpLog.debug('Start')
# Acquire class lock
with self.classLock:
tmpLog.debug('Got class lock')
# Initialize
ret_list = []
retMap = {}
# Go
n_jobs = len(batchIDs_list)
act_ret = self.schedd.act(htcondor.JobAction.Remove, batchIDs_list)
# Check if all jobs clear (off from schedd queue)
is_all_clear = (n_jobs == act_ret['TotalAlreadyDone'] + act_ret['TotalNotFound'] + act_ret['TotalSuccess'])
if act_ret and is_all_clear:
tmpLog.debug('removed {0} jobs: {1}'.format(n_jobs, ','.join(batchIDs_list)))
for batchid in batchIDs_list:
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
retMap[condor_job_id] = (True, '')
else:
tmpLog.error('job removal failed; batchIDs_list={0}, got: {1}'.format(batchIDs_list, act_ret))
# need to query queue for unterminated jobs not removed yet
clusterids_set = set([ get_job_id_tuple_from_batchid(batchid)[0] for batchid in batchIDs_list ])
clusterids_str = ','.join(list(clusterids_set))
requirements = 'member(ClusterID, {{{0}}}) && JobStatus =!= 3 && JobStatus =!= 4'.format(clusterids_str)
jobs_iter = self.schedd.xquery(requirements=requirements, projection=CONDOR_JOB_ADS_LIST)
all_batchid_map = {}
ok_batchid_list = []
ng_batchid_list = []
for job in jobs_iter:
job_ads_dict = dict(job)
batchid = get_batchid_from_job(job_ads_dict)
all_batchid_map[batchid] = job_ads_dict
for batchid in batchIDs_list:
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
if batchid in all_batchid_map:
ng_batchid_list.append(batchid)
retMap[condor_job_id] = (False, 'batchID={0} still unterminated in condor queue'.format(batchid))
else:
ok_batchid_list.append(batchid)
retMap[condor_job_id] = (True, '')
tmpLog.debug('removed {0} jobs: {1} ; failed to remove {2} jobs: {3}'.format(
len(ok_batchid_list), ','.join(ok_batchid_list), len(ng_batchid_list), ','.join(ng_batchid_list)))
tmpLog.debug('Done')
# Return
return retMap
#===============================================================
|
ping-aggregator-E16-server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import logging
import psutil
import subprocess
import os
import yaml
from threading import Thread
import srvdb
import requests as orig_requests
import time
from flask import Flask
from flask import request
from two1.commands.config import Config
from two1.wallet.two1_wallet import Wallet
from two1.bitserv.flask import Payment
from two1.bitrequests import BitTransferRequests
# set up bitrequest client for BitTransfer requests
wallet = Wallet()
username = Config().username
requests = BitTransferRequests(wallet, username)
app = Flask(__name__)
# app.debug = True
# setup wallet
wallet = Wallet()
payment = Payment(app, wallet)
# logging
logger = logging.getLogger('werkzeug')
# db handle
db = srvdb.SrvDb("./ping-aggregator.db")
def get_payment_amt(request):
"""
Return the amount of the request based on the number of nodes.
"""
# print(request.data)
user_input = json.loads(request.data.decode('UTF-8'))
cost = 1000
nodes = db.get_cheapest_nodes(user_input['nodes'])
for node in nodes:
cost = cost + node['price']
return cost
@app.route('/', methods=['POST'])
@payment.required(get_payment_amt)
def ping():
"""
Gets the cheapest X nodes running ping21 and runs them against the url specified.
"""
user_input = json.loads(request.data.decode('UTF-8'))
if 'nodes' not in user_input:
ret_obj = {'success': False, "message": "Missing nodes parameter in post data."}
ret = json.dumps(ret_obj, indent=2)
return (ret, 200, {'Content-length': len(ret), 'Content-type': 'application/json'})
if 'website' not in user_input:
ret_obj = {'success': False, "message": "Missing website parameter in post data."}
ret = json.dumps(ret_obj, indent=2)
return (ret, 200, {'Content-length': len(ret), 'Content-type': 'application/json'})
# Get the amount of nodes the user requested + 10 in case one of them fails
requested_count = user_input['nodes']
nodes = db.get_cheapest_nodes(requested_count + 10)
# Iterate over the nodes returned from the DB
vals = []
successful_requests = 0
for node in nodes:
# If we have already found as many nodes as the user requested, bail out
if successful_requests >= requested_count:
break
try:
# Get the ping data from the node.
# Use the uri from the user in the request.
# Use the maxprice from the db (last time we saw it), so we don't get suckered.
ret = requests.get(node['url'] + "?uri=" + user_input['website'], max_price=node['price'])
# Get the json for the response
ret_obj = ret.json()
ret_obj['price_paid'] = node['price']
# Strip out sensitive info
del ret_obj['server']['ip']
del ret_obj['server']['hostname']
# Save it off
vals.append(ret_obj)
# Update the success count
successful_requests = successful_requests + 1
except Exception as err:
logger.error("Failure: {0}".format(err))
ret = json.dumps(vals, indent=2)
return (ret, 200, {'Content-length': len(ret), 'Content-type': 'application/json'})
def gather_ping_node_stats():
"""
Iterates over nodes and updates the prices and status.
"""
while True:
# Sleep for 8 hours before reloading the node stats
time.sleep(60 * 60 * 8)
nodes = db.get_node_ips()
for node in nodes:
logger.info("\n\nChecking for ping server on {}".format(node))
node_up = False
# First try port 6002
url = "http://{}:6002/".format(node)
manifest_url = url + "manifest"
try:
# If the manifest comes back, if it is running ping21 then it is up
logger.info("Checking on port 6002 with url: {}".format(manifest_url))
manifest = orig_requests.get(manifest_url, timeout=1)
logger.debug("Got back the manifest")
if "ping21" in manifest.text:
node_up = True
logger.debug("Ping21 is running on 6002 on this node")
else:
logger.debug("Ping21 was not found in the manifest")
except:
node_up = False
# Not found on standard node, see if it is running as a microservice
if not node_up:
url = "http://{}:8080/ping/".format(node)
manifest_url = url + "manifest"
try:
# If the manifest comes back, if it is running ping21 then it is up
logger.debug("Checking on port 8080")
manifest = orig_requests.get(manifest_url, timeout=1)
logger.debug("Got back the manifest")
if "ping21" in manifest.text:
node_up = True
logger.debug("Ping21 is running on 8080 on this node")
else:
logger.debug("Ping21 was not found on this node")
except:
node_up = False
# if we didn't find the ping21 service, mark the node as down
if not node_up:
logger.debug("Marking this node as down since Ping21 was not found")
db.update_node(node, False, 0, "")
continue
# We found the node and it is running ping21, so hit the endpoint to get the price
try:
# If the manifest comes back, if it is running ping21 then it is up
logger.debug("Getting ping url: {}".format(url))
ping_res = orig_requests.get(url)
price = int(ping_res.headers['Price'])
db.update_node(node, True, price, url)
logger.debug("Updated the price from the endpoint: {}".format(price))
except Exception as err:
logger.error("Failure: {0}".format(err))
db.update_node(node, False, 0, url)
if __name__ == '__main__':
import click
@click.command()
@click.option("-d", "--daemon", default=False, is_flag=True, help="Run in daemon mode.")
@click.option("-l", "--log", default="ERROR", help="Logging level to use (DEBUG, INFO, WARNING, ERROR, CRITICAL)")
def run(daemon, log):
"""
Run the service.
"""
# Set logging level
numeric_level = getattr(logging, log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % log)
logging.basicConfig(level=numeric_level)
if daemon:
pid_file = './ping-aggregator.pid'
if os.path.isfile(pid_file):
pid = int(open(pid_file).read())
os.remove(pid_file)
try:
p = psutil.Process(pid)
p.terminate()
except:
pass
try:
p = subprocess.Popen(['python3', 'ping-aggregator-E16-server.py'])
open(pid_file, 'w').write(str(p.pid))
except subprocess.CalledProcessError:
raise ValueError("error starting ping-aggregator-E16-server.py daemon")
else:
# Start cleanup thread
cleaner = Thread(target=gather_ping_node_stats, daemon=True)
cleaner.start()
print("Server running...")
app.run(host='::', port=7018)
run()
|
bak.client.py
|
#!/usr/bin/env python
#
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""binary to deploy a cluster by compass client api."""
import os
import re
import sys
import time
import yaml
import netaddr
import requests
import json
import itertools
import threading
from collections import defaultdict
from restful import Client
import log as logging
from oslo_config import cfg
ROLE_UNASSIGNED = True
ROLE_ASSIGNED = False
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def byteify(input):
if isinstance(input, dict):
return dict([(byteify(key), byteify(value))
for key, value in input.iteritems()])
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
opts = [
cfg.StrOpt('expansion',
help='is this an expansion?',
default='false'),
cfg.StrOpt('compass_server',
help='compass server url',
default='http://127.0.0.1/api'),
cfg.StrOpt('compass_user_email',
help='compass user email',
default='admin@huawei.com'),
cfg.StrOpt('compass_user_password',
help='compass user password',
default='admin'),
cfg.StrOpt('switch_ips',
help='comma seperated switch ips',
default=''),
cfg.StrOpt('switch_credential',
help='comma separated <credential key>=<credential value>',
default='version=2c,community=public'),
cfg.IntOpt('switch_max_retries',
help='max retries of poll switch',
default=10),
cfg.IntOpt('switch_retry_interval',
help='interval to repoll switch',
default=10),
cfg.BoolOpt('poll_switches',
help='if the client polls switches',
default=True),
cfg.StrOpt('machines',
help='comma separated mac addresses of machines',
default=''),
cfg.StrOpt('subnets',
help='comma seperated subnets',
default=''),
cfg.StrOpt('adapter_name',
help='adapter name',
default=''),
cfg.StrOpt('adapter_os_pattern',
help='adapter os name',
default=r'^(?i)centos.*'),
cfg.StrOpt('adapter_target_system_pattern',
help='adapter target system name',
default='^openstack$'),
cfg.StrOpt('adapter_flavor_pattern',
help='adapter flavor name',
default='allinone'),
cfg.StrOpt('cluster_name',
help='cluster name',
default='cluster1'),
cfg.StrOpt('language',
help='language',
default='EN'),
cfg.StrOpt('timezone',
help='timezone',
default='GMT'),
cfg.StrOpt('http_proxy',
help='http proxy',
default=''),
cfg.StrOpt('https_proxy',
help='https proxy',
default=''),
cfg.StrOpt('no_proxy',
help='no proxy',
default=''),
cfg.StrOpt('ntp_server',
help='ntp server',
default=''),
cfg.StrOpt('dns_servers',
help='dns servers',
default=''),
cfg.StrOpt('domain',
help='domain',
default=''),
cfg.StrOpt('search_path',
help='search path',
default=''),
cfg.StrOpt('local_repo_url',
help='local repo url',
default=''),
cfg.StrOpt('default_gateway',
help='default gateway',
default=''),
cfg.StrOpt('server_credential',
help=(
'server credential formatted as '
'<username>=<password>'
),
default='root=root'),
cfg.StrOpt('os_config_json_file',
help='json formatted os config file',
default=''),
cfg.StrOpt('service_credentials',
help=(
'comma seperated service credentials formatted as '
'<servicename>:<username>=<password>,...'
),
default=''),
cfg.StrOpt('console_credentials',
help=(
'comma seperated console credential formated as '
'<consolename>:<username>=<password>'
),
default=''),
cfg.StrOpt('hostnames',
help='comma seperated hostname',
default=''),
cfg.StrOpt('host_networks',
help=(
'semicomma seperated host name and its networks '
'<hostname>:<interface_name>=<ip>|<is_mgmt>|<is_promiscuous>,...' # noqa
),
default=''),
cfg.StrOpt('partitions',
help=(
'comma seperated partitions '
'<partition name>=<partition_value>'
),
default='tmp:percentage=10%,var:percentage=30%,home:percentage=30%'), # noqa
cfg.StrOpt('network_mapping',
help=(
'comma seperated network mapping '
'<network_type>=<interface_name>'
),
default=''),
cfg.StrOpt('package_config_json_file',
help='json formatted os config file',
default=''),
cfg.StrOpt('host_roles',
help=(
'semicomma separated host roles '
'<hostname>=<comma separated roles>'
),
default=''),
cfg.StrOpt('default_roles',
help=(
'comma seperated default roles '
'<rolename>'
),
default=''),
cfg.IntOpt('action_timeout',
help='action timeout in seconds',
default=60),
cfg.IntOpt('deployment_timeout',
help='deployment timeout in minutes',
default=60),
cfg.IntOpt('progress_update_check_interval',
help='progress update status check interval in seconds',
default=60),
cfg.StrOpt('dashboard_url',
help='dashboard url',
default=''),
cfg.StrOpt('dashboard_link_pattern',
help='dashboard link pattern',
default=r'(?m)(http://\d+\.\d+\.\d+\.\d+:5000/v2\.0)'),
cfg.StrOpt('cluster_vip',
help='cluster ip address',
default=''),
cfg.StrOpt('enable_secgroup',
help='enable security group',
default='true'),
cfg.StrOpt('enable_vpnaas',
help='enable vpn as service',
default='true'),
cfg.StrOpt('enable_fwaas',
help='enable firewall as service',
default='true'),
cfg.StrOpt('network_cfg',
help='netowrk config file',
default=''),
cfg.StrOpt('neutron_cfg',
help='netowrk config file',
default=''),
cfg.StrOpt('cluster_pub_vip',
help='cluster ip address',
default=''),
cfg.StrOpt('cluster_prv_vip',
help='cluster ip address',
default=''),
cfg.StrOpt('repo_name',
help='repo name',
default=''),
cfg.StrOpt('deploy_type',
help='deploy type',
default='virtual'),
cfg.StrOpt('deploy_flag',
help='deploy flag',
default='deploy'),
cfg.StrOpt('rsa_file',
help='ssh rsa key file',
default=''),
cfg.StrOpt('odl_l3_agent',
help='odl l3 agent enable flag',
default='Disable'),
cfg.StrOpt('moon',
help='moon enable flag',
default='Disable'),
cfg.StrOpt('onos_sfc',
help='onos_sfc enable flag',
default='Disable'),
]
CONF.register_cli_opts(opts)
def is_role_unassigned(role):
return role
def _load_config(config_filename):
if not config_filename:
return {}
with open(config_filename) as config_file:
content = config_file.read()
return json.loads(content)
class CompassClient(object):
def __init__(self):
LOG.info("xh: compass_server=%s" % CONF.compass_server)
self.client = Client(CONF.compass_server)
self.subnet_mapping = {}
self.role_mapping = {}
self.host_mapping = {}
self.host_ips = defaultdict(list)
self.host_roles = {}
self.login()
def is_ok(self, status):
if status < 300 and status >= 200:
return True
def login(self):
status, resp = self.client.get_token(
CONF.compass_user_email,
CONF.compass_user_password
)
LOG.info(
'login status: %s, resp: %s',
status, resp
)
if self.is_ok(status):
return resp["token"]
else:
raise Exception(
'failed to login %s with user %s',
CONF.compass_server,
CONF.compass_user_email
)
def get_machines(self):
status, resp = self.client.list_machines()
if not self.is_ok(status):
LOG.error(
'get all machines status: %s, resp: %s', status, resp)
raise RuntimeError('failed to get machines')
machines_to_add = list(set([
machine for machine in CONF.machines.split(',')
if machine
]))
machines_db = [str(m["mac"]) for m in resp]
LOG.info(
'machines in db: %s\n to add: %s',
machines_db,
machines_to_add)
if not set(machines_to_add).issubset(set(machines_db)):
raise RuntimeError('unidentify machine to add')
return [m["id"] for m in resp if str(m["mac"]) in machines_to_add]
def list_clusters(self):
status, resp = self.client.list_clusters(name=CONF.cluster_name)
if not self.is_ok(status) or not resp:
raise RuntimeError('failed to list cluster')
cluster = resp[0]
return cluster['id']
def get_adapter(self):
"""get adapter."""
status, resp = self.client.list_adapters(name=CONF.adapter_name)
LOG.info(
'get all adapters status: %s, resp: %s',
status, resp
)
if not self.is_ok(status) or not resp:
raise RuntimeError('failed to get adapters')
os_re = re.compile(CONF.adapter_os_pattern)
flavor_re = re.compile(CONF.adapter_flavor_pattern)
adapter_id = None
os_id = None
flavor_id = None
adapter = None
adapter = resp[0]
adapter_id = adapter['id']
for supported_os in adapter['supported_oses']:
if not os_re or os_re.match(supported_os['name']):
os_id = supported_os['os_id']
break
if 'flavors' in adapter:
for flavor in adapter['flavors']:
if not flavor_re or flavor_re.match(flavor['name']):
flavor_id = flavor['id']
break
assert(os_id and flavor_id)
return (adapter_id, os_id, flavor_id)
def add_subnets(self):
subnets = [
subnet for subnet in CONF.subnets.split(',')
if subnet
]
assert(subnets)
subnet_mapping = {}
_, subnets_in_db = self.client.list_subnets()
for subnet in subnets:
try:
netaddr.IPNetwork(subnet)
except:
raise RuntimeError('subnet %s format is invalid' % subnet)
if CONF.expansion == "false":
status, resp = self.client.add_subnet(subnet)
LOG.info('add subnet %s status %s response %s',
subnet, status, resp)
if not self.is_ok(status):
raise RuntimeError('failed to add subnet %s' % subnet)
subnet_mapping[resp['subnet']] = resp['id']
else:
for subnet_in_db in subnets_in_db:
if subnet == subnet_in_db['subnet']:
subnet_mapping[subnet] = subnet_in_db['id']
self.subnet_mapping = subnet_mapping
def add_cluster(self, adapter_id, os_id, flavor_id):
"""add a cluster."""
cluster_name = CONF.cluster_name
assert(cluster_name)
status, resp = self.client.add_cluster(
cluster_name, adapter_id,
os_id, flavor_id)
if not self.is_ok(status):
raise RuntimeError("add cluster failed")
LOG.info('add cluster %s status: %s resp:%s',
cluster_name, status, resp)
if isinstance(resp, list):
cluster = resp[0]
else:
cluster = resp
cluster_id = cluster['id']
flavor = cluster.get('flavor', {})
roles = flavor.get('roles', [])
for role in roles:
if role.get('optional', False):
self.role_mapping[role['name']] = ROLE_ASSIGNED
else:
self.role_mapping[role['name']] = ROLE_UNASSIGNED
return cluster_id
def add_cluster_hosts(self, cluster_id, machines):
hostnames = [
hostname for hostname in CONF.hostnames.split(',')
if hostname
]
machines = machines[-len(hostnames):]
assert(len(machines) == len(hostnames))
machines_dict = []
for machine_id, hostname in zip(machines, hostnames):
machines_dict.append({
'machine_id': machine_id,
'name': hostname
})
# add hosts to the cluster.
status, resp = self.client.add_hosts_to_cluster(
cluster_id,
{'machines': machines_dict})
LOG.info('add machines %s to cluster %s status: %s, resp: %s',
machines_dict, cluster_id, status, resp)
if not self.is_ok(status):
raise RuntimeError("add host to cluster failed")
for host in resp['hosts']:
if host['hostname'] in hostnames:
self.host_mapping[host['hostname']] = host['id']
if CONF.expansion == "false":
assert(len(self.host_mapping) == len(machines))
def set_cluster_os_config(self, cluster_id):
"""set cluster os config."""
os_config = {}
language = CONF.language
timezone = CONF.timezone
http_proxy = CONF.http_proxy
https_proxy = CONF.https_proxy
local_repo_url = CONF.local_repo_url
repo_name = CONF.repo_name
deploy_type = CONF.deploy_type
if not https_proxy and http_proxy:
https_proxy = http_proxy
no_proxy = [
no_proxy for no_proxy in CONF.no_proxy.split(',')
if no_proxy
]
compass_server = CONF.compass_server
if http_proxy:
for hostname, ips in self.host_ips.items():
no_proxy.append(hostname)
no_proxy.extend(ips)
ntp_server = CONF.ntp_server or compass_server
dns_servers = [
dns_server for dns_server in CONF.dns_servers.split(',')
if dns_server
]
if not dns_servers:
dns_servers = [compass_server]
domain = CONF.domain
if not domain:
raise Exception('domain is not defined')
search_path = [
search_path for search_path in CONF.search_path.split(',')
if search_path
]
if not search_path:
search_path = [domain]
default_gateway = CONF.default_gateway
if not default_gateway:
raise Exception('default gateway is not defined')
general_config = {
'language': language,
'timezone': timezone,
'ntp_server': ntp_server,
'dns_servers': dns_servers,
'default_gateway': default_gateway
}
if http_proxy:
general_config['http_proxy'] = http_proxy
if https_proxy:
general_config['https_proxy'] = https_proxy
if no_proxy:
general_config['no_proxy'] = no_proxy
if domain:
general_config['domain'] = domain
if search_path:
general_config['search_path'] = search_path
if local_repo_url:
general_config['local_repo'] = local_repo_url
if repo_name:
general_config['repo_name'] = repo_name
if deploy_type:
general_config['deploy_type'] = deploy_type
os_config["general"] = general_config
server_credential = CONF.server_credential
if '=' in server_credential:
server_username, server_password = server_credential.split('=', 1)
elif server_credential:
server_username = server_password = server_credential
else:
server_username = 'root'
server_password = 'root'
os_config['server_credentials'] = {
'username': server_username,
'password': server_password
}
partitions = [
partition for partition in CONF.partitions.split(',')
if partition
]
partition_config = {}
for partition in partitions:
assert("=" in partition)
partition_name, partition_value = partition.split('=', 1)
partition_name = partition_name.strip()
partition_value = partition_value.strip()
assert(partition_name and partition_value)
if partition_value.endswith('%'):
partition_type = 'percentage'
partition_value = int(partition_value[:-1])
else:
partition_type = 'size'
partition_config[partition_name] = {
partition_type: partition_value
}
os_config['partition'] = partition_config
"""
os_config_filename = CONF.os_config_json_file
if os_config_filename:
util.merge_dict(
os_config, _load_config(os_config_filename)
)
"""
status, resp = self.client.update_cluster_config(
cluster_id, os_config=os_config)
LOG.info(
'set os config %s to cluster %s status: %s, resp: %s',
os_config, cluster_id, status, resp)
if not self.is_ok(status):
raise RuntimeError('failed to set os config %s to cluster %s'
% (os_config, cluster_id))
def set_host_networking(self):
"""set cluster hosts networking."""
def get_subnet(ip_str):
try:
LOG.info("subnets: %s" % self.subnet_mapping.keys())
ip = netaddr.IPAddress(ip_str)
for cidr, subnet_id in self.subnet_mapping.items():
subnet = netaddr.IPNetwork(cidr)
if ip in subnet:
return True, subnet_id
LOG.info("ip %s not in %s" % (ip_str, cidr))
return False, None
except:
LOG.exception("ip addr %s is invalid" % ip_str)
return False, None
for host_network in CONF.host_networks.split(';'):
hostname, networks_str = host_network.split(':', 1)
hostname = hostname.strip()
networks_str = networks_str.strip()
assert(hostname in self.host_mapping)
host_id = self.host_mapping[hostname]
intf_list = networks_str.split(',')
for intf_str in intf_list:
interface, intf_properties = intf_str.split('=', 1)
intf_properties = intf_properties.strip().split('|')
assert(intf_properties)
ip_str = intf_properties[0]
status, subnet_id = get_subnet(ip_str)
if not status:
raise RuntimeError("ip addr %s is invalid" % ip_str)
properties = dict([
(intf_property, True)
for intf_property in intf_properties[1:]
])
LOG.info(
'add host %s interface %s ip %s network properties %s',
hostname, interface, ip_str, properties)
status, response = self.client.add_host_network(
host_id, interface, ip=ip_str, subnet_id=subnet_id,
**properties
)
LOG.info(
'add host %s interface %s ip %s network properties %s '
'status %s: %s',
hostname, interface, ip_str, properties,
status, response
)
if not self.is_ok(status):
raise RuntimeError("add host network failed")
self.host_ips[hostname].append(ip_str)
def set_cluster_package_config(self, cluster_id):
"""set cluster package config."""
package_config = {"security": {}}
service_credentials = [
service_credential
for service_credential in CONF.service_credentials.split(',')
if service_credential
]
service_credential_cfg = {}
LOG.info(
'service credentials: %s', service_credentials
)
for service_credential in service_credentials:
if ':' not in service_credential:
raise Exception(
'there is no : in service credential %s' % service_credential # noqa
)
service_name, service_pair = service_credential.split(':', 1)
if '=' not in service_pair:
raise Exception(
'there is no = in service %s security' % service_name
)
username, password = service_pair.split('=', 1)
service_credential_cfg[service_name] = {
'username': username,
'password': password
}
console_credentials = [
console_credential
for console_credential in CONF.console_credentials.split(',')
if console_credential
]
LOG.info(
'console credentials: %s', console_credentials
)
console_credential_cfg = {}
for console_credential in console_credentials:
if ':' not in console_credential:
raise Exception(
'there is no : in console credential %s' % console_credential # noqa
)
console_name, console_pair = console_credential.split(':', 1)
if '=' not in console_pair:
raise Exception(
'there is no = in console %s security' % console_name
)
username, password = console_pair.split('=', 1)
console_credential_cfg[console_name] = {
'username': username,
'password': password
}
package_config["security"] = {"service_credentials": service_credential_cfg, # noqa
"console_credentials": console_credential_cfg} # noqa
network_mapping = dict([
network_pair.split('=', 1)
for network_pair in CONF.network_mapping.split(',')
if '=' in network_pair
])
package_config['network_mapping'] = network_mapping
assert(os.path.exists(CONF.network_cfg))
network_cfg = yaml.load(open(CONF.network_cfg))
package_config["network_cfg"] = network_cfg
assert(os.path.exists(CONF.neutron_cfg))
neutron_cfg = yaml.load(open(CONF.neutron_cfg))
package_config["neutron_config"] = neutron_cfg
"""
package_config_filename = CONF.package_config_json_file
if package_config_filename:
util.merge_dict(
package_config, _load_config(package_config_filename)
)
"""
package_config['ha_proxy'] = {}
if CONF.cluster_vip:
package_config["ha_proxy"]["vip"] = CONF.cluster_vip
package_config['enable_secgroup'] = (CONF.enable_secgroup == "true")
package_config['enable_fwaas'] = (CONF.enable_fwaas == "true")
package_config['enable_vpnaas'] = (CONF.enable_vpnaas == "true")
package_config[
'odl_l3_agent'] = "Enable" if CONF.odl_l3_agent == "Enable" else "Disable" # noqa
package_config[
'moon'] = "Enable" if CONF.moon == "Enable" else "Disable"
package_config[
'onos_sfc'] = "Enable" if CONF.onos_sfc == "Enable" else "Disable"
status, resp = self.client.update_cluster_config(
cluster_id, package_config=package_config)
LOG.info(
'set package config %s to cluster %s status: %s, resp: %s',
package_config, cluster_id, status, resp)
if not self.is_ok(status):
raise RuntimeError("set cluster package_config failed")
def set_host_roles(self, cluster_id, host_id, roles):
status, response = self.client.update_cluster_host(
cluster_id, host_id, roles=roles)
LOG.info(
'set cluster %s host %s roles %s status %s: %s',
cluster_id, host_id, roles, status, response
)
if not self.is_ok(status):
raise RuntimeError("set host roles failed")
for role in roles:
if role in self.role_mapping:
self.role_mapping[role] = ROLE_ASSIGNED
def set_all_hosts_roles(self, cluster_id):
for host_str in CONF.host_roles.split(';'):
host_str = host_str.strip()
hostname, roles_str = host_str.split('=', 1)
assert(hostname in self.host_mapping)
host_id = self.host_mapping[hostname]
roles = [role.strip() for role in roles_str.split(',') if role]
self.set_host_roles(cluster_id, host_id, roles)
self.host_roles[hostname] = roles
unassigned_hostnames = list(set(self.host_mapping.keys()) - set(self.host_roles.keys())) # noqa
unassigned_roles = [role for role, status in self.role_mapping.items()
if is_role_unassigned(status)]
assert(len(unassigned_hostnames) >= len(unassigned_roles))
for hostname, role in map(
None, unassigned_hostnames, unassigned_roles):
host_id = self.host_mapping[hostname]
self.set_host_roles(cluster_id, host_id, [role])
self.host_roles[hostname] = [role]
unassigned_hostnames = list(set(self.host_mapping.keys()) - set(self.host_roles.keys())) # noqa
if not unassigned_hostnames:
return
# assign default roles to unassigned hosts
default_roles = [
role for role in CONF.default_roles.split(',')
if role
]
assert(default_roles)
cycle_roles = itertools.cycle(default_roles)
for hostname in unassigned_hostnames:
host_id = self.host_mapping[hostname]
roles = [cycle_roles.next()]
self.set_host_roles(cluster_id, host_id, roles)
self.host_roles[hostname] = roles
def deploy_clusters(self, cluster_id):
host_ids = self.host_mapping.values()
status, response = self.client.review_cluster(
cluster_id, review={'hosts': host_ids}
)
LOG.info(
'review cluster %s hosts %s, status %s: %s',
cluster_id, host_ids, status, response
)
# TODO, what this doning?
if not self.is_ok(status):
raise RuntimeError("review cluster host failed")
status, response = self.client.deploy_cluster(
cluster_id, deploy={'hosts': host_ids}
)
LOG.info(
'deploy cluster %s hosts %s status %s: %s',
cluster_id, host_ids, status, response
)
if not self.is_ok(status):
raise RuntimeError("deploy cluster failed")
def redeploy_clusters(self, cluster_id):
status, response = self.client.redeploy_cluster(
cluster_id
)
if not self.is_ok(status):
LOG.info(
'deploy cluster %s status %s: %s',
cluster_id, status, response
)
raise RuntimeError("redeploy cluster failed")
def get_cluster_state(self, cluster_id):
for _ in range(10):
try:
status, cluster_state = self.client.get_cluster_state(
cluster_id)
if self.is_ok(status):
break
except:
status = 500
cluster_state = ""
LOG.error("can not get cluster %s's state, try again" % cluster_id)
time.sleep(6)
return status, cluster_state
def get_installing_progress(self, cluster_id):
def _get_installing_progress():
"""get intalling progress."""
deployment_timeout = time.time() + 60 * float(CONF.deployment_timeout) # noqa
current_time = time.time
while current_time() < deployment_timeout:
status, cluster_state = self.get_cluster_state(cluster_id)
if not self.is_ok(status):
raise RuntimeError("can not get cluster state")
elif cluster_state['state'] == 'SUCCESSFUL':
LOG.info(
'get cluster %s state status %s: %s, successful',
cluster_id, status, cluster_state
)
break
elif cluster_state['state'] == 'ERROR':
raise RuntimeError(
'get cluster %s state status %s: %s, error',
(cluster_id, status, cluster_state)
)
time.sleep(5)
if current_time() >= deployment_timeout:
LOG.info("current_time=%s, deployment_timeout=%s"
% (current_time(), deployment_timeout))
LOG.info("cobbler status:")
os.system("docker exec -it compass-cobbler bash -c 'cobbler status'")
raise RuntimeError("installation timeout")
try:
_get_installing_progress()
finally:
# do this twice, make sure process be killed
kill_print_proc()
kill_print_proc()
def check_dashboard_links(self, cluster_id):
dashboard_url = CONF.dashboard_url
if not dashboard_url:
LOG.info('no dashboarde url set')
return
dashboard_link_pattern = re.compile(
CONF.dashboard_link_pattern)
r = requests.get(dashboard_url, verify=False)
r.raise_for_status()
match = dashboard_link_pattern.search(r.text)
if match:
LOG.info(
'dashboard login page for cluster %s can be downloaded',
cluster_id)
else:
msg = (
'%s failed to be downloaded\n'
'the context is:\n%s\n'
) % (dashboard_url, r.text)
raise Exception(msg)
def print_ansible_log():
os.system("docker exec compass-tasks bash -c \
'while ! tail -f \
/var/ansible/run/%s-%s/ansible.log 2>/dev/null; do :; \
sleep 1; done'" %
(CONF.adapter_name, CONF.cluster_name))
def kill_print_proc():
os.system(
"ps aux|grep -v grep|grep -E 'ssh.+root@192.168.200.2'|awk '{print $2}'|xargs kill -9") # noqa
def deploy():
if CONF.expansion == "false":
client = CompassClient()
machines = client.get_machines()
LOG.info('machines are %s', machines)
client.add_subnets()
adapter_id, os_id, flavor_id = client.get_adapter()
cluster_id = client.add_cluster(adapter_id, os_id, flavor_id)
client.add_cluster_hosts(cluster_id, machines)
client.set_host_networking()
client.set_cluster_os_config(cluster_id)
if flavor_id:
client.set_cluster_package_config(cluster_id)
client.set_all_hosts_roles(cluster_id)
client.deploy_clusters(cluster_id)
LOG.info("compass OS installtion is begin")
threading.Thread(target=print_ansible_log).start()
client.get_installing_progress(cluster_id)
client.check_dashboard_links(cluster_id)
else:
client = CompassClient()
machines = client.get_machines()
LOG.info('machines are %s', machines)
client.add_subnets()
status, response = client.client.list_clusters()
cluster_id = 1
for cluster in response:
if cluster['name'] == CONF.cluster_name:
cluster_id = cluster['id']
client.add_cluster_hosts(cluster_id, machines)
client.set_host_networking()
client.set_cluster_os_config(cluster_id)
client.set_cluster_package_config(cluster_id)
client.set_all_hosts_roles(cluster_id)
client.deploy_clusters(cluster_id)
threading.Thread(target=print_ansible_log).start()
client.get_installing_progress(cluster_id)
def redeploy():
client = CompassClient()
cluster_id = client.list_clusters()
client.redeploy_clusters(cluster_id)
client.get_installing_progress(cluster_id)
client.check_dashboard_links(cluster_id)
def main():
if CONF.deploy_flag == "redeploy":
redeploy()
else:
deploy()
if __name__ == "__main__":
CONF(args=sys.argv[1:])
main()
|
run.py
|
import os
import time
import torch
import numpy as np
import numpy.random as rd
import multiprocessing as mp
# from elegantrl.env import build_env
from elegantrl.env import build_isaac_gym_env as build_env # todo isaac
from elegantrl.replay import ReplayBuffer, ReplayBufferMP
from elegantrl.evaluator import Evaluator
"""[ElegantRL.2021.09.09](https://github.com/AI4Finance-LLC/ElegantRL)"""
class Arguments:
def __init__(self, if_on_policy=False):
self.env = None # the environment for training
self.agent = None # Deep Reinforcement Learning algorithm
'''Arguments for training'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 2 ** 0 # an approximate target reward usually be closed to 256
self.learning_rate = 2 ** -15 # 2 ** -14 ~= 3e-5
self.soft_update_tau = 2 ** -8 # 2 ** -8 ~= 5e-3
self.if_on_policy = if_on_policy
if self.if_on_policy: # (on-policy)
self.net_dim = 2 ** 9 # the network width
self.batch_size = self.net_dim * 2 # num of transitions sampled from replay buffer.
self.repeat_times = 2 ** 3 # collect target_step, then update network
self.target_step = 2 ** 12 # repeatedly update network to keep critic's loss small
self.max_memo = self.target_step # capacity of replay buffer
self.if_per_or_gae = False # GAE for on-policy sparse reward: Generalized Advantage Estimation.
else:
self.net_dim = 2 ** 8 # the network width
self.batch_size = self.net_dim # num of transitions sampled from replay buffer.
self.repeat_times = 2 ** 0 # repeatedly update network to keep critic's loss small
self.target_step = 2 ** 10 # collect target_step, then update network
self.max_memo = 2 ** 21 # capacity of replay buffer
self.if_per_or_gae = False # PER for off-policy sparse reward: Prioritized Experience Replay.
'''Arguments for device'''
self.env_num = 1 # The Environment number for each worker. env_num == 1 means don't use VecEnv.
self.worker_num = 2 # rollout workers number pre GPU (adjust it to get high GPU usage)
self.thread_num = 8 # cpu_num for evaluate model, torch.set_num_threads(self.num_threads)
self.visible_gpu = '0' # for example: os.environ['CUDA_VISIBLE_DEVICES'] = '0, 2,'
self.random_seed = 0 # initialize random seed in self.init_before_training()
'''Arguments for evaluate and save'''
self.cwd = None # current work directory. None means set automatically
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = 2 ** 20 # break training after 'total_step > break_step'
self.if_allow_break = True # allow break training when reach goal (early termination)
self.eval_env = None # the environment for evaluating. None means set automatically.
self.eval_gap = 2 ** 7 # evaluate the agent per eval_gap seconds
self.eval_times1 = 2 ** 3 # number of times that get episode return in first
self.eval_times2 = 2 ** 4 # number of times that get episode return in second
self.eval_device_id = -1 # -1 means use cpu, >=0 means use GPU
def init_before_training(self, if_main):
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.set_num_threads(self.thread_num)
torch.set_default_dtype(torch.float32)
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.visible_gpu)
'''env'''
if self.env is None:
raise RuntimeError(f'\n| Why env=None? For example:'
f'\n| args.env = XxxEnv()'
f'\n| args.env = str(env_name)'
f'\n| args.env = build_env(env_name), from elegantrl.env import build_env')
if not (isinstance(self.env, str) or hasattr(self.env, 'env_name')):
raise RuntimeError('\n| What is env.env_name? use env=PreprocessEnv(env).')
'''agent'''
if self.agent is None:
raise RuntimeError(f'\n| Why agent=None? Assignment `args.agent = AgentXXX` please.')
if not hasattr(self.agent, 'init'):
raise RuntimeError(f"\n| why hasattr(self.agent, 'init') == False"
f'\n| Should be `agent=AgentXXX()` instead of `agent=AgentXXX`.')
if self.agent.if_on_policy != self.if_on_policy:
raise RuntimeError(f'\n| Why bool `if_on_policy` is not consistent?'
f'\n| self.if_on_policy: {self.if_on_policy}'
f'\n| self.agent.if_on_policy: {self.agent.if_on_policy}')
'''cwd'''
if self.cwd is None:
agent_name = self.agent.__class__.__name__
env_name = getattr(self.env, 'env_name', self.env)
self.cwd = f'./{agent_name}_{env_name}_{self.visible_gpu}'
if if_main:
# remove history according to bool(if_remove)
if self.if_remove is None:
self.if_remove = bool(input(f"| PRESS 'y' to REMOVE: {self.cwd}? ") == 'y')
elif self.if_remove:
import shutil
shutil.rmtree(self.cwd, ignore_errors=True)
print(f"| Remove cwd: {self.cwd}")
os.makedirs(self.cwd, exist_ok=True)
'''single processing training'''
def train_and_evaluate(args, agent_id=0):
args.init_before_training(if_main=True)
env = build_env(args.env, if_print=False)
'''init: Agent'''
agent = args.agent
agent.init(args.net_dim, env.state_dim, env.action_dim, args.learning_rate, args.if_per_or_gae, args.env_num)
agent.save_or_load_agent(args.cwd, if_save=False)
if env.env_num == 1:
agent.states = [env.reset(), ]
assert isinstance(agent.states[0], np.ndarray)
assert agent.states[0].shape == (env.state_dim,)
else:
agent.states = env.reset()
assert isinstance(agent.states, torch.Tensor)
assert agent.states.shape == (env.env_num, env.state_dim)
'''init Evaluator'''
eval_env = args.eval_env if args.eval_env else build_env(env)
evaluator = Evaluator(args.cwd, agent_id, agent.device, eval_env,
args.eval_gap, args.eval_times1, args.eval_times2)
evaluator.save_or_load_recoder(if_save=False)
'''init ReplayBuffer'''
if agent.if_on_policy:
buffer = list()
def update_buffer(_traj_list):
buffer[:] = _traj_list[0] # (ten_state, ten_reward, ten_mask, ten_action, ten_noise)
_step, _r_exp = get_step_r_exp(ten_reward=buffer[1])
return _step, _r_exp
else:
buffer = ReplayBuffer(max_len=args.max_memo, state_dim=env.state_dim,
action_dim=1 if env.if_discrete else env.action_dim,
if_use_per=args.if_per_or_gae)
buffer.save_or_load_history(args.cwd, if_save=False)
def update_buffer(_traj_list):
ten_state, ten_other = _traj_list[0]
buffer.extend_buffer(ten_state, ten_other)
_steps, _r_exp = get_step_r_exp(ten_reward=ten_other[0]) # other = (reward, mask, action)
return _steps, _r_exp
"""start training"""
cwd = args.cwd
gamma = args.gamma
break_step = args.break_step
batch_size = args.batch_size
target_step = args.target_step
repeat_times = args.repeat_times
reward_scale = args.reward_scale
if_allow_break = args.if_allow_break
soft_update_tau = args.soft_update_tau
del args
'''init ReplayBuffer after training start'''
if not agent.if_on_policy:
if_load = buffer.save_or_load_history(cwd, if_save=False)
if not if_load:
traj_list = agent.explore_env(env, target_step, reward_scale, gamma)
steps, r_exp = update_buffer(traj_list)
evaluator.total_step += steps
'''start training loop'''
if_train = True
while if_train:
with torch.no_grad():
traj_list = agent.explore_env(env, target_step, reward_scale, gamma)
steps, r_exp = update_buffer(traj_list)
logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau)
with torch.no_grad():
temp = evaluator.evaluate_and_save(agent.act, steps, r_exp, logging_tuple)
if_reach_goal, if_save = temp
if_train = not ((if_allow_break and if_reach_goal)
or evaluator.total_step > break_step
or os.path.exists(f'{cwd}/stop'))
print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')
env.close()
agent.save_or_load_agent(cwd, if_save=True)
buffer.save_or_load_history(cwd, if_save=True) if not agent.if_on_policy else None
evaluator.save_or_load_recoder(if_save=True)
def get_step_r_exp(ten_reward):
return len(ten_reward), ten_reward.mean().item()
'''multiple processing training'''
class PipeWorker:
def __init__(self, env_num, worker_num):
self.env_num = env_num
self.worker_num = worker_num
self.pipes = [mp.Pipe() for _ in range(worker_num)]
self.pipe1s = [pipe[1] for pipe in self.pipes]
def explore(self, agent):
act_dict = agent.act.state_dict()
for worker_id in range(self.worker_num):
self.pipe1s[worker_id].send(act_dict)
traj_lists = [pipe1.recv() for pipe1 in self.pipe1s]
return traj_lists
def run(self, args, comm_env, worker_id, learner_id):
# print(f'| os.getpid()={os.getpid()} PipeExplore.run {learner_id}')
args.init_before_training(if_main=False)
'''init Agent'''
env = build_env(args.env, if_print=False)
agent = args.agent
agent.init(args.net_dim, env.state_dim, env.action_dim,
args.learning_rate, args.if_per_or_gae, args.env_num, learner_id)
'''loop'''
gamma = args.gamma
target_step = args.target_step
reward_scale = args.reward_scale
del args
if comm_env:
env = comm_env
agent.states = env.reset()
else:
agent.states = [env.reset(), ]
with torch.no_grad():
while True:
act_dict = self.pipes[worker_id][0].recv()
agent.act.load_state_dict(act_dict)
trajectory = agent.explore_env(env, target_step, reward_scale, gamma)
self.pipes[worker_id][0].send(trajectory)
def get_comm_data(agent):
act = list(agent.act.parameters())
cri_optim = get_optim_parameters(agent.cri_optim)
if agent.cri is agent.act:
cri = None
act_optim = None
else:
cri = list(agent.cri.parameters())
act_optim = get_optim_parameters(agent.act_optim)
act_target = list(agent.act_target.parameters()) if agent.if_use_act_target else None
cri_target = list(agent.cri_target.parameters()) if agent.if_use_cri_target else None
return act, act_optim, cri, cri_optim, act_target, cri_target # data
class PipeLearner:
def __init__(self, learner_num):
self.learner_num = learner_num
self.round_num = int(np.log2(learner_num))
self.pipes = [mp.Pipe() for _ in range(learner_num)]
pipes = [mp.Pipe() for _ in range(learner_num)]
self.pipe0s = [pipe[0] for pipe in pipes]
self.pipe1s = [pipe[1] for pipe in pipes]
self.device_list = [torch.device(f'cuda:{i}') for i in range(learner_num)]
if learner_num == 1:
self.idx_l = None
elif learner_num == 2:
self.idx_l = [(1,), (0,), ]
elif learner_num == 4:
self.idx_l = [(1, 2), (0, 3),
(3, 0), (2, 1), ]
elif learner_num == 8:
self.idx_l = [(1, 2, 4), (0, 3, 5),
(3, 0, 6), (2, 1, 7),
(5, 6, 0), (4, 7, 1),
(7, 4, 2), (6, 5, 3), ]
else:
print(f"| LearnerPipe, ERROR: learner_num {learner_num} should in (1, 2, 4, 8)")
exit()
def comm_data(self, data, learner_id, round_id):
if round_id == -1:
learner_jd = self.idx_l[learner_id][round_id]
self.pipes[learner_jd][0].send(data)
return self.pipes[learner_id][1].recv()
else:
learner_jd = self.idx_l[learner_id][round_id]
self.pipe0s[learner_jd].send(data)
return self.pipe1s[learner_id].recv()
def comm_network_optim(self, agent, learner_id):
device = self.device_list[learner_id]
for round_id in range(self.round_num):
data = get_comm_data(agent)
data = self.comm_data(data, learner_id, round_id)
if data:
avg_update_net(agent.act, data[0], device)
avg_update_optim(agent.act_optim, data[1], device) if data[1] else None
avg_update_net(agent.cri, data[2], device) if data[2] else None
avg_update_optim(agent.cri_optim, data[3], device)
avg_update_net(agent.act_target, data[4], device) if agent.if_use_act_target else None
avg_update_net(agent.cri_target, data[5], device) if agent.if_use_cri_target else None
def run(self, args, comm_eva, comm_exp, learner_id=0):
# print(f'| os.getpid()={os.getpid()} PipeLearn.run, {learner_id}')
args.init_before_training(if_main=learner_id == 0)
# env = build_env(args.env, if_print=False)
if_on_policy = args.if_on_policy
'''init Agent'''
agent = args.agent
# agent.init(args.net_dim, env.state_dim, env.action_dim,
# args.learning_rate, args.if_per_or_gae, args.env_num, learner_id)
agent.init(args.net_dim, args.state_dim, args.action_dim,
args.learning_rate, args.if_per_or_gae, args.env_num, learner_id) # todo isaac
agent.save_or_load_agent(args.cwd, if_save=False)
'''init ReplayBuffer'''
if if_on_policy:
buffer = list()
def update_buffer(_traj_list):
_traj_list = list(map(list, zip(*_traj_list)))
_traj_list = [torch.cat(t, dim=0) for t in _traj_list]
buffer[:] = _traj_list # (ten_state, ten_reward, ten_mask, ten_action, ten_noise)
_step, _r_exp = get_step_r_exp(ten_reward=buffer[1])
return _step, _r_exp
else:
buffer_num = args.worker_num * args.env_num
if self.learner_num > 1:
buffer_num *= 2
# buffer = ReplayBufferMP(max_len=args.max_memo, state_dim=env.state_dim,
# action_dim=1 if env.if_discrete else env.action_dim,
# if_use_per=args.if_per_or_gae,
# buffer_num=buffer_num, gpu_id=learner_id)
buffer = ReplayBufferMP(max_len=args.max_memo, state_dim=args.state_dim,
action_dim=1 if args.if_discrete else args.action_dim,
if_use_per=args.if_per_or_gae,
buffer_num=buffer_num, gpu_id=learner_id) # todo isaac
buffer.save_or_load_history(args.cwd, if_save=False)
def update_buffer(_traj_list):
step_sum = 0
r_exp_sum = 0
for buffer_i, (ten_state, ten_other) in enumerate(_traj_list):
buffer.buffers[buffer_i].extend_buffer(ten_state, ten_other)
step_r_exp = get_step_r_exp(ten_reward=ten_other[:, 0]) # other = (reward, mask, action)
step_sum += step_r_exp[0]
r_exp_sum += step_r_exp[1]
return step_sum, r_exp_sum / len(_traj_list)
'''start training'''
cwd = args.cwd
batch_size = args.batch_size
repeat_times = args.repeat_times
soft_update_tau = args.soft_update_tau
del args
if_train = True
while if_train:
traj_lists = comm_exp.explore(agent)
if self.learner_num > 1:
data = self.comm_data(traj_lists, learner_id, round_id=-1)
traj_lists.extend(data)
traj_list = sum(traj_lists, list())
steps, r_exp = update_buffer(traj_list)
del traj_lists
logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau)
if self.learner_num > 1:
self.comm_network_optim(agent, learner_id)
if comm_eva:
if_train, if_save = comm_eva.evaluate_and_save_mp(agent.act, steps, r_exp, logging_tuple)
agent.save_or_load_agent(cwd, if_save=True)
if not if_on_policy:
print(f"| LearnerPipe.run: ReplayBuffer saving in {cwd}")
buffer.save_or_load_history(cwd, if_save=True)
class PipeEvaluator:
def __init__(self):
super().__init__()
self.pipe0, self.pipe1 = mp.Pipe()
def evaluate_and_save_mp(self, agent_act, steps, r_exp, logging_tuple):
if self.pipe1.poll(): # if_evaluator_idle
if_train, if_save = self.pipe1.recv()
act_cpu_dict = {k: v.cpu() for k, v in agent_act.state_dict().items()}
else:
if_train, if_save = True, False
act_cpu_dict = None
self.pipe1.send((act_cpu_dict, steps, r_exp, logging_tuple))
return if_train, if_save
def run(self, args, agent_id):
# print(f'| os.getpid()={os.getpid()} PipeEvaluate.run {agent_id}')
args.init_before_training(if_main=False)
'''init: Agent'''
# eval_env = args.eval_env if args.eval_env else build_env(env, if_print=False)
eval_env = build_env(args.eval_env, if_print=False) # todo isaac
env = eval_env
agent = args.agent
agent.init(args.net_dim, env.state_dim, env.action_dim, args.learning_rate,
args.if_per_or_gae, args.env_num, agent_id=args.eval_device_id)
agent.save_or_load_agent(args.cwd, if_save=False)
act_cpu = agent.act
act_cpu.eval()
[setattr(param, 'requires_grad', False) for param in act_cpu.parameters()]
'''init Evaluator'''
evaluator = Evaluator(args.cwd, agent_id, agent.device, eval_env,
args.eval_gap, args.eval_times1, args.eval_times2)
evaluator.save_or_load_recoder(if_save=False)
del agent
del env
'''loop'''
cwd = args.cwd
break_step = args.break_step
if_allow_break = args.if_allow_break
del args
if_save = False
if_train = True
if_reach_goal = False
with torch.no_grad():
while if_train:
act_cpu_dict, steps, r_exp, logging_tuple = self.pipe0.recv()
if act_cpu_dict:
act_cpu.load_state_dict(act_cpu_dict)
if_reach_goal, if_save = evaluator.evaluate_and_save(act_cpu, steps, r_exp, logging_tuple)
else:
evaluator.total_step += steps
if_train = not ((if_allow_break and if_reach_goal)
or evaluator.total_step > break_step
or os.path.exists(f'{cwd}/stop'))
self.pipe0.send((if_train, if_save))
print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')
evaluator.save_or_load_recoder(if_save=True)
class PipeVectorEnv:
def __init__(self, args):
self.env_num = args.env_num
self.pipes = [mp.Pipe() for _ in range(self.env_num)]
self.pipe0s = [pipe[0] for pipe in self.pipes]
env = build_env(args.eval_env)
self.max_step = env.max_step
self.env_name = env.env_name
self.state_dim = env.state_dim
self.action_dim = env.action_dim
self.action_max = env.action_max
self.if_discrete = env.if_discrete
self.target_return = env.target_return
del env
self.process = list()
for env_id in range(args.env_num):
self.process.append(mp.Process(target=self.run, args=(args, env_id)))
args.random_seed += 1 # set different for each env
# [p.start() for p in self.process]
def reset(self):
vec_state = [pipe0.recv() for pipe0 in self.pipe0s]
return vec_state
def step(self, vec_action): # pipe0_step
for i in range(self.env_num):
self.pipe0s[i].send(vec_action[i])
return [pipe0.recv() for pipe0 in self.pipe0s] # list of (state, reward, done)
def run(self, args, env_id):
np.random.seed(args.random_seed)
env = build_env(args.eval_env, if_print=False)
pipe1 = self.pipes[env_id][1]
del args
state = env.reset()
pipe1.send(state)
while True:
action = pipe1.recv()
state, reward, done, _ = env.step(action)
pipe1.send((env.reset() if done else state, reward, done))
# def check(self):
# vec_state = self.reset()
# ten_state = np.array(vec_state)
# print(ten_state.shape)
#
# vec_action = np.array(((0.0, 1.0, 0.0),
# (0.0, 0.5, 0.0),
# (0.0, 0.1, 0.0),))[:self.env_num]
# assert self.env_num <= 3
#
# trajectory_list = list()
# for _ in range(8):
# s_r_d_list = self.step(vec_action)
# ten_state = np.array([s_r_d[0] for s_r_d in s_r_d_list])
# print(ten_state.shape)
# trajectory_list.append(s_r_d_list)
#
# trajectory_list = list(map(list, zip(*trajectory_list))) # 2D-list transpose
# print('| shape of trajectory_list:', len(trajectory_list), len(trajectory_list[0]))
def train_and_evaluate_mp(args, agent_id=0):
process = list()
mp.set_start_method(method='spawn', force=True) # force all the multiprocessing to 'spawn' methods
'''learner'''
learner_num = get_num_learner(args.visible_gpu)
learner_pipe = PipeLearner(learner_num)
for learner_id in range(learner_num):
'''evaluator'''
if learner_id == learner_num - 1:
evaluator_pipe = PipeEvaluator()
process.append(mp.Process(target=evaluator_pipe.run, args=(args, agent_id)))
else:
evaluator_pipe = None
'''explorer'''
worker_pipe = PipeWorker(args.env_num, args.worker_num)
for worker_id in range(args.worker_num):
if args.env_num == 1:
env_pipe = None
else:
env_pipe = PipeVectorEnv(args)
process.extend(env_pipe.process)
process.append(mp.Process(target=worker_pipe.run, args=(args, env_pipe, worker_id, learner_id)))
process.append(mp.Process(target=learner_pipe.run, args=(args, evaluator_pipe, worker_pipe, learner_id)))
[(p.start(), time.sleep(0.1)) for p in process]
process[-1].join()
process_safely_terminate(process)
"""Utils"""
def get_num_learner(visible_gpu):
assert isinstance(visible_gpu, str) # visible_gpu may in {'0', '1', '1,', '1,2', '1,2,'}
visible_gpu = eval(visible_gpu)
num_learner = 1 if isinstance(visible_gpu, int) else len(visible_gpu)
return num_learner
def process_safely_terminate(process):
for p in process:
try:
p.kill()
except OSError as e:
print(e)
pass
def get_optim_parameters(optim): # for avg_update_optim()
params_list = list()
for params_dict in optim.state_dict()['state'].values():
params_list.extend([t for t in params_dict.values() if isinstance(t, torch.Tensor)])
return params_list
def avg_update_optim(dst_optim, src_optim_param, device):
for dst, src in zip(get_optim_parameters(dst_optim), src_optim_param):
dst.data.copy_((dst.data + src.data.to(device)) * 0.5)
# dst.data.copy_(src.data * tau + dst.data * (1 - tau))
def avg_update_net(dst_net, src_net_param, device):
for dst, src in zip(dst_net.parameters(), src_net_param):
dst.data.copy_((dst.data + src.data.to(device)) * 0.5)
|
slack.py
|
from slackclient import SlackClient
import json
import requests
from threading import Thread
from utils import*
slack_token = 'YOUR-SLACK-TOKEN-HERE'
slack_client = SlackClient(slack_token)
#just for example
allowedChannels = ['G7N3MRCCS','G7P2M4CCS']
#slack common utils
def isIntenedMessage(event):
try:
j = event['subtype']
return False
except:
try:
j = event['channel']
return True
except:
return False
def isAllowedChannel(event,allowedChannels):
try:
return event['channel'] in allowedChannels
except:
return False
def isDM(channel):
try:
return channel.startswith("D")
except:
return False
def handle(event,triggerWord,callbacks,restrictions=None):
'''
Handles all the incomming command messages.
:param event: slack event (json/dict format)
:param triggerWord: The word that triggers the command. (EX: !echo)
:param restrictions: A list of booleans that give the command special rules.
EX: has to be in a certain channel, said by a certain person, etc.
:param callbacks: List of functions to call when the parameters are satisfied.
:return: None
'''
if restrictions == None:
restrictions = []
if event['text'].startswith(f"!{triggerWord}") and False not in restrictions:
try:
context = event['text'].split(f"!{triggerWord} ")[1]
except:
context = ""
for callback in callbacks:
Thread(target=callback(context,event), daemon=True).start()
def parseInput(events):
for event in events:
if event['type'] == 'message' and isIntenedMessage(event):
try:
handle(event,'echo',[echo],restrictions=[isAllowedChannel(event,allowedChannels)])
handle(event,'user',[findUserByID],restrictions=[isDM(event['channel'])])
except Exception as e:
log(str(e),prefix="HANDLER ERROR",color='red')
#Example slackbot functions
def echo(context,event):
Thread(
target=slack_client.api_call,
args=("chat.postMessage",),
kwargs=dict(
channel=event['channel'],
text=context
),
daemon=True
).start()
def findUserByID(context,event):
Thread(
target=slack_client.api_call,
args=("chat.postMessage",),
kwargs=dict(
channel=event['channel'],
text=f"User: <@{context}>"
),
daemon=True
).start()
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
log("Bot connected and running!",prefix='SYSTEM',color='blue')
# Read bot's user ID by calling Web API method `auth.test`
id = slack_client.api_call("auth.test")["user_id"]
while True:
parseInput(slack_client.rtm_read())
else:
log("Connection failed.",prefix='SYSTEM',color='red')
|
executor.py
|
import functools
import json
import logging
import signal
import time
from threading import Event, Lock, Thread, Timer
import os
import pymesos as pm
import cook
import cook.io_helper as cio
import cook.progress as cp
import cook.subprocess as cs
import cook.util as cu
def get_task_id(task):
"""Retrieves the id of the task.
Parameters
----------
task: dictionary
The task
Returns
-------
the id of the task.
"""
return task['task_id']['value']
class StatusUpdater(object):
"""Sends status updates for the task."""
def __init__(self, driver, task_id):
"""
Parameters
----------
driver: MesosExecutorDriver
The driver to send the status update to.
task_id: dictionary
The task whose status update to send.
"""
self.driver = driver
self.lock = Lock()
self.task_id = task_id
self.terminal_states = {cook.TASK_ERROR, cook.TASK_FAILED, cook.TASK_FINISHED, cook.TASK_KILLED}
self.terminal_status_sent = False
def create_status(self, task_state, reason=None):
"""Creates a dictionary representing the task status.
Parameters
----------
task_state: string
The state of the task to report.
reason: string
The reason for the task state.
Returns
-------
a status dictionary that can be sent to the driver.
"""
task_status = {'state': task_state,
'task_id': {'value': self.task_id},
'timestamp': time.time()}
if reason:
task_status['reason'] = reason
return task_status
def update_status(self, task_state, reason=None):
"""Sends the status using the driver.
Parameters
----------
task_state: string
The state of the task which will be sent to the driver.
reason: string
The reason for the task state.
Returns
-------
True if successfully sent the status update, else False.
"""
with self.lock:
is_terminal_status = task_state in self.terminal_states
if is_terminal_status and self.terminal_status_sent:
logging.info('Terminal state for task already sent, dropping state {}'.format(task_state))
return False
try:
logging.info('Updating task state to {}'.format(task_state))
status = self.create_status(task_state, reason=reason)
self.driver.sendStatusUpdate(status)
self.terminal_status_sent = is_terminal_status
return True
except Exception:
logging.exception('Unable to send task state {}'.format(task_state))
return False
def send_message(driver, error_handler, message):
"""Sends the message, if it is smaller than the max length, using the driver.
Note: This function must rethrow any OSError exceptions that it encounters.
Parameters
----------
driver: MesosExecutorDriver
The driver to send the message to.
error_handler: fn(os_error)
OSError exception handler for out of memory situations.
message: dictionary
The raw message to send.
Returns
-------
whether the message was successfully sent
"""
try:
logging.info('Sending framework message {}'.format(message))
message_string = json.dumps(message).encode('utf8')
encoded_message = pm.encode_data(message_string)
driver.sendFrameworkMessage(encoded_message)
logging.info('Sent framework message')
return True
except Exception as exception:
if cu.is_out_of_memory_error(exception):
error_handler(exception)
else:
logging.exception('Exception while sending message {}'.format(message))
return False
def launch_task(task, environment):
"""Launches the task using the command available in the json map from the data field.
Parameters
----------
task: dictionary
The task to execute.
environment: dictionary
The task environment.
Returns
-------
When command is provided and a process can be started, the process launched.
Else it logs the reason and returns None.
"""
try:
data_string = pm.decode_data(task['data']).decode('utf8')
data_json = json.loads(data_string)
command = str(data_json['command']).strip()
logging.info('Command: {}'.format(command))
return cs.launch_process(command, environment)
except Exception:
logging.exception('Error in launch_task')
return None
def await_process_completion(process, stop_signal, shutdown_grace_period_ms):
"""Awaits process completion. Also sets up the thread that will kill the process if stop_signal is set.
Parameters
----------
process: subprocess.Popen
The process to whose termination to wait on.
stop_signal: Event
Event that determines if the process was requested to terminate
shutdown_grace_period_ms: int
Grace period before forceful kill
Returns
-------
True if the process was killed, False if it terminated naturally.
"""
def process_stop_signal():
stop_signal.wait() # wait indefinitely for the stop_signal to be set
if cs.is_process_running(process):
logging.info('Executor has been instructed to terminate running task')
cs.kill_process(process, shutdown_grace_period_ms)
kill_thread = Thread(target=process_stop_signal, args=())
kill_thread.daemon = True
kill_thread.start()
# wait indefinitely for process to terminate (either normally or by being killed)
process.wait()
def await_reregister(reregister_signal, recovery_secs, *disconnect_signals):
"""Awaits reregistration on rerigster_signal, and notifies on stop_signal and disconnect_signal if not set.
Parameters
----------
reregister_signal: Event
Event that notifies on mesos agent reregistration
recovery_secs: int
Number of seconds to wait for reregistration.
disconnect_signals: [Event]
Events to notify if reregistration does not occur
"""
def await_reregister_thread():
reregister_signal.wait(recovery_secs)
if reregister_signal.isSet():
logging.info("Reregistered with mesos agent. Not notifying on disconnect_signals")
else:
logging.warning(
"Failed to reregister within {} seconds. Notifying disconnect_signals".format(recovery_secs))
for signal in disconnect_signals:
signal.set()
await_thread = Thread(target=await_reregister_thread, args=())
await_thread.daemon = True
await_thread.start()
def get_task_state(exit_code):
"""Interprets the exit_code and return the corresponding task status string
Parameters
----------
exit_code: int
An integer that represents the return code of the task.
Returns
-------
A task status string corresponding to the exit code.
"""
if exit_code > 0:
return cook.TASK_FAILED
elif exit_code < 0:
return cook.TASK_KILLED
else:
return cook.TASK_FINISHED
def set_environment(environment, key, value):
"""Updates an entry in the environment dictionary.
Returns
-------
Nothing.
"""
if key not in environment or environment[key] != value:
logging.info('Setting process environment[{}]={}'.format(key, value))
environment[key] = value
def retrieve_process_environment(config, task, os_environ):
"""Prepares the environment for the subprocess.
The function also ensures that env[config.progress_output_env_variable] is set to config.progress_output_name.
This protects against the scenario where the config.progress_output_env_variable was specified
in the environment, but the progress output file was not specified.
Parameters
----------
config: cook.config.ExecutorConfig
The current executor config.
task: dictionary
The mesos task object
os_environ: dictionary
A dictionary representing the current environment.
Returns
-------
The environment dictionary for the subprocess.
"""
environment = dict(os_environ)
task_env = {}
mesos_task_variables = (task.get('executor', {})
.get('command', {})
.get('environment', {})
.get('variables', []))
for variable in mesos_task_variables:
task_env[variable['name']] = variable['value']
for var in config.reset_vars:
if var in task_env:
environment[var] = task_env[var]
elif var in environment:
del environment[var]
set_environment(environment, config.progress_output_env_variable, config.progress_output_name)
return environment
def output_task_completion(task_id, task_state):
"""Prints and logs the executor completion message."""
cio.print_and_log('Executor completed execution of {} (state={})'.format(task_id, task_state))
def os_error_handler(stop_signal, status_updater, os_error):
"""Exception handler for OSError.
Parameters
----------
stop_signal: threading.Event
Event that determines if the process was requested to terminate.
status_updater: StatusUpdater
Wrapper object that sends task status messages.
os_error: OSError
The current executor config.
Returns
-------
Nothing
"""
stop_signal.set()
logging.exception('OSError generated, requesting process to terminate')
reason = cook.REASON_CONTAINER_LIMITATION_MEMORY if cu.is_out_of_memory_error(os_error) else None
status_updater.update_status(cook.TASK_FAILED, reason=reason)
cu.print_memory_usage()
def manage_task(driver, task, stop_signal, completed_signal, config):
"""Manages the execution of a task waiting for it to terminate normally or be killed.
It also sends the task status updates, sandbox location and exit code back to the scheduler.
Progress updates are tracked on a separate thread and are also sent to the scheduler.
Setting the stop_signal will trigger termination of the task and associated cleanup.
Returns
-------
Nothing
"""
launched_process = None
task_id = get_task_id(task)
cio.print_and_log('Starting task {}'.format(task_id))
status_updater = StatusUpdater(driver, task_id)
inner_os_error_handler = functools.partial(os_error_handler, stop_signal, status_updater)
try:
# not yet started to run the task
status_updater.update_status(cook.TASK_STARTING)
# Use MESOS_DIRECTORY instead of MESOS_SANDBOX, to report the sandbox location outside of the container
sandbox_message = {'sandbox-directory': config.mesos_directory, 'task-id': task_id, 'type': 'directory'}
send_message(driver, inner_os_error_handler, sandbox_message)
environment = retrieve_process_environment(config, task, os.environ)
launched_process = launch_task(task, environment)
if launched_process:
# task has begun running successfully
status_updater.update_status(cook.TASK_RUNNING)
cio.print_and_log('Forked command at {}'.format(launched_process.pid))
else:
# task launch failed, report an error
logging.error('Error in launching task')
status_updater.update_status(cook.TASK_ERROR, reason=cook.REASON_TASK_INVALID)
return
task_completed_signal = Event() # event to track task execution completion
sequence_counter = cp.ProgressSequenceCounter()
send_progress_message = functools.partial(send_message, driver, inner_os_error_handler)
max_message_length = config.max_message_length
sample_interval_ms = config.progress_sample_interval_ms
progress_updater = cp.ProgressUpdater(task_id, max_message_length, sample_interval_ms, send_progress_message)
progress_termination_signal = Event()
def launch_progress_tracker(progress_location, location_tag):
progress_file_path = os.path.abspath(progress_location)
logging.info('Location {} (absolute path={}) tagged as [tag={}]'.format(
progress_location, progress_file_path, location_tag))
progress_tracker = cp.ProgressTracker(config, stop_signal, task_completed_signal, sequence_counter,
progress_updater, progress_termination_signal, progress_location,
location_tag, inner_os_error_handler)
progress_tracker.start()
return progress_tracker
progress_locations = {config.progress_output_name: 'progress',
config.stderr_file(): 'stderr',
config.stdout_file(): 'stdout'}
logging.info('Progress will be tracked from {} locations'.format(len(progress_locations)))
progress_trackers = [launch_progress_tracker(l, progress_locations[l]) for l in progress_locations]
await_process_completion(launched_process, stop_signal, config.shutdown_grace_period_ms)
task_completed_signal.set()
progress_termination_timer = Timer(config.shutdown_grace_period_ms / 1000.0, progress_termination_signal.set)
progress_termination_timer.daemon = True
progress_termination_timer.start()
# propagate the exit code
exit_code = launched_process.returncode
cio.print_and_log('Command exited with status {} (pid: {})'.format(exit_code, launched_process.pid))
exit_message = {'exit-code': exit_code, 'task-id': task_id}
send_message(driver, inner_os_error_handler, exit_message)
# await progress updater termination if executor is terminating normally
if not stop_signal.isSet():
logging.info('Awaiting completion of progress updaters')
[progress_tracker.wait() for progress_tracker in progress_trackers]
logging.info('Progress updaters completed')
# force send the latest progress state if available
[progress_tracker.force_send_progress_update() for progress_tracker in progress_trackers]
# task either completed successfully or aborted with an error
task_state = get_task_state(exit_code)
output_task_completion(task_id, task_state)
status_updater.update_status(task_state)
except Exception as exception:
if cu.is_out_of_memory_error(exception):
inner_os_error_handler(exception)
else:
# task aborted with an error
logging.exception('Error in executing task')
output_task_completion(task_id, cook.TASK_FAILED)
status_updater.update_status(cook.TASK_FAILED, reason=cook.REASON_EXECUTOR_TERMINATED)
finally:
# ensure completed_signal is set so driver can stop
completed_signal.set()
if launched_process and cs.is_process_running(launched_process):
cs.send_signal(launched_process.pid, signal.SIGKILL)
class CookExecutor(pm.Executor):
"""This class is responsible for launching the task sent by the scheduler.
It implements the Executor methods."""
def __init__(self, stop_signal, config):
self.completed_signal = Event()
self.config = config
self.disconnect_signal = Event()
self.stop_signal = stop_signal
self.reregister_signal = None
def registered(self, driver, executor_info, framework_info, agent_info):
logging.info('Executor registered executor={}, framework={}, agent={}'.
format(executor_info['executor_id']['value'], framework_info['id'], agent_info['id']['value']))
env = os.environ
if 'EXECUTOR_TEST_EXIT' in env:
# When running in docker, if the container exits too quickly it's logged in mesos as container launch failed
# instead of mesos executor terminated. This sleep ensures that we have the correct reason code for our
# integration tests.
time.sleep(5)
exit_code = int(env['EXECUTOR_TEST_EXIT'])
logging.warning('Exiting with code {} from EXECUTOR_TEST_EXIT environment variable'.
format(exit_code))
os._exit(exit_code)
def reregistered(self, driver, agent_info):
logging.info('Executor re-registered agent={}'.format(agent_info))
if self.config.checkpoint:
if self.reregister_signal is not None:
logging.info('Executor checkpointing is enabled. Notifying on reregister_signal')
self.reregister_signal.set()
self.reregister_signal = None
else:
logging.error('Checkpointing is enabled but reregister_signal is None. Unable to notify!')
def disconnected(self, driver):
logging.info('Mesos requested executor to disconnect')
if self.config.checkpoint:
if self.reregister_signal is None:
logging.info('Executor checkpointing is enabled. Waiting for agent recovery.')
new_event = Event()
self.reregister_signal = new_event
await_reregister(new_event, self.config.recovery_timeout_ms / 1000, self.stop_signal,
self.disconnect_signal)
else:
logging.info('Checkpointing is enabled. Already launched await_reregister thread.')
else:
logging.info('Executor checkpointing is not enabled. Terminating task.')
self.disconnect_signal.set()
self.stop_signal.set()
def launchTask(self, driver, task):
logging.info('Driver {} launching task {}'.format(driver, task))
stop_signal = self.stop_signal
completed_signal = self.completed_signal
config = self.config
task_thread = Thread(target=manage_task, args=(driver, task, stop_signal, completed_signal, config))
task_thread.daemon = True
task_thread.start()
def killTask(self, driver, task_id):
logging.info('Mesos requested executor to kill task {}'.format(task_id))
task_id_str = task_id['value'] if 'value' in task_id else task_id
grace_period = os.environ.get('MESOS_EXECUTOR_SHUTDOWN_GRACE_PERIOD', '')
cio.print_and_log('Received kill for task {} with grace period of {}'.format(task_id_str, grace_period))
cu.log_thread_stack_traces()
self.stop_signal.set()
def shutdown(self, driver):
logging.info('Mesos requested executor to shutdown')
self.stop_signal.set()
def error(self, driver, message):
logging.error(message)
super().error(driver, message)
def await_completion(self):
"""
Blocks until the internal flag completed_signal is set.
The completed_signal Event is expected to be set by manage_task.
"""
logging.info('Waiting for CookExecutor to complete...')
self.completed_signal.wait()
logging.info('CookExecutor has completed')
def await_disconnect(self):
"""
Blocks until the internal flag disconnect_signal is set or the disconnect grace period expires.
The disconnect grace period is computed based on whether stop_signal is set.
"""
disconnect_grace_secs = cook.TERMINATE_GRACE_SECS if self.stop_signal.isSet() else cook.DAEMON_GRACE_SECS
if not self.disconnect_signal.isSet():
logging.info('Waiting up to {} second(s) for CookExecutor to disconnect'.format(disconnect_grace_secs))
self.disconnect_signal.wait(disconnect_grace_secs)
if not self.disconnect_signal.isSet():
logging.info('CookExecutor did not disconnect in {} seconds'.format(disconnect_grace_secs))
|
stress_test.py
|
# USAGE
# python stress_test.py
# import the necessary packages
from threading import Thread
import requests
import time
# initialize the Keras REST API endpoint URL along with the input
# image path
KERAS_REST_API_URL = "http://localhost:5000/predict"
IMAGE_PATH = "jemma.jpg"
# initialize the number of requests for the stress test along with
# the sleep amount between requests
NUM_REQUESTS = 500
SLEEP_COUNT = 0.05
def call_predict_endpoint(n):
# load the input image and construct the payload for the request
image = open(IMAGE_PATH, "rb").read()
payload = {"input_file": image}
# submit the request
try:
r = requests.post(KERAS_REST_API_URL, files=payload).json()
# ensure the request was sucessful
if r["success"]:
print("[INFO] thread {} OK".format(n))
# otherwise, the request failed
else:
print("[INFO] thread {} FAILED".format(n))
except:
print("[INFO] thread {} FAILED BAD connection".format(n))
# loop over the number of threads
for i in range(0, NUM_REQUESTS):
# start a new thread to call the API
t = Thread(target=call_predict_endpoint, args=(i,))
t.daemon = True
t.start()
time.sleep(SLEEP_COUNT)
print("[INFO] all request queued up" )
# insert a long sleep so we can wait until the server is finished
# processing the images
time.sleep(300)
|
gen_cleaned_megaface.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import traceback
from easydict import EasyDict as edict
import time
import sys
import numpy as np
import argparse
import struct
import cv2
import multiprocessing
import sklearn
from sklearn.preprocessing import normalize
import mxnet as mx
from mxnet import ndarray as nd
feature_dim = 512
feature_ext = 1
def put_feature(imgs, nets, out_put_list, q_exc):
try:
count = len(imgs)
data = mx.nd.zeros(shape=(count * 2, 3, imgs[0].shape[1], imgs[0].shape[2]))
for idx, img in enumerate(imgs):
for flipid in [0, 1]:
_img = nd.array(img)
if flipid == 1:
_img = _img[:, :, ::-1]
data[count * flipid + idx] = _img
F = []
for net in nets:
db = mx.io.DataBatch(data=(data,))
net.model.forward(db, is_train=False)
x = net.model.get_outputs()[0].asnumpy()
embedding = x[0:count, :] + x[count:, :]
embedding = sklearn.preprocessing.normalize(embedding)
F.append(embedding)
F = np.concatenate(F, axis=1)
F = sklearn.preprocessing.normalize(F)
for i, k in enumerate(out_put_list):
q_work = q_exc[i % len(q_exc)]
data = (F[i], k)
while True:
if q_work.full():
continue
else:
q_work.put(data)
break
except Exception as e:
traceback.print_exc()
print('det_img error:', e)
for q in q_exc:
q.put(None)
return
def write(args, q_exc):
while True:
data = q_exc.get()
if data is None:
break
v= data[0]
path, label = data[1][0], data[1][1]
if label==1:
feature = np.full( (feature_dim+feature_ext,), 100, dtype=np.float32)
feature[0:feature_dim] = v
else:
feature = np.full((feature_dim + feature_ext,), 0, dtype=np.float32)
feature[0:feature_dim] = v
feature = list(feature)
with open(path, 'wb') as f:
f.write(struct.pack('4i', len(feature), 1, 4, 5))
f.write(struct.pack("%df" % len(feature), *feature))
def generate_output_dic(args, img_list):
out_dic = {}
mf_noise_map = {}
for line in open(args.megaface_noises, 'r'):
if line.startswith('#'):
continue
line = line.strip()
_vec = line.split("\t")
if len(_vec) > 1:
line = _vec[1]
mf_noise_map[line] = 1
print("Creating dictionary start")
for line in img_list:
clean_label = 0
line = [i.strip() for i in line.strip().split('\t')]
img_path = line[-1]
image_path = img_path.strip()
_path = image_path.split('/')
a_pre, a, b = _path[-3], _path[-2], _path[-1]
dataset_out = os.path.join(args.output, args.dataset)
out_dir = os.path.join(dataset_out, a_pre, a)
out_path = os.path.join(out_dir, b + "_%s.bin" % (args.algo))
bb = '/'.join([a_pre, a, b])
if bb in mf_noise_map:
clean_label = 1
out_dic[int(line[0])] = (out_path, clean_label)
print("Creating dictionary end, the length of dictionary is", len(out_dic))
return out_dic
def main(args):
print(args)
if len(args.gpu) == 1:
gpuid = int(args.gpu)
ctx = mx.gpu(gpuid)
else:
ctx = [mx.gpu(int(i)) for i in args.gpu.split(',')]
nets = []
image_shape = [int(x) for x in args.image_size.split(',')]
for model in args.model.split('|'):
vec = model.split(',')
assert len(vec) > 1
prefix = vec[0]
epoch = int(vec[1])
print('loading', prefix, epoch)
net = edict()
net.ctx = ctx
net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(prefix, epoch)
all_layers = net.sym.get_internals()
net.sym = all_layers['{}fc1_output'.format(args.name_prefix)]
net.model = mx.mod.Module(symbol=net.sym, context=net.ctx, label_names=None)
net.model.bind(data_shapes=[('data', (args.batch_size, 3, image_shape[1], image_shape[2]))])
net.model.set_params(net.arg_params, net.aux_params)
nets.append(net)
with open(args.dataset_lst) as f:
img_lst = f.readlines()
dataset_dic = generate_output_dic(args, img_lst)
total_nums = len(img_lst)
i, j = total_nums // args.batch_size, total_nums % args.batch_size
count = 0
q_exc = [multiprocessing.Queue(2048) for v in range(args.num_threads)]
write_process = [multiprocessing.Process(target=write, args=(args, q_exc[v])) \
for v in range(args.num_threads)]
for p in write_process:
p.start()
data_iter = mx.image.ImageIter(batch_size=args.batch_size, data_shape=(3, 112, 112),
path_imgrec=args.rec_path,
part_index=args.idx_path)
data_iter.reset()
while count <= i:
start = time.time()
batch = data_iter.next()
data = batch.data[0]
data.asnumpy()
out_path_list = []
for value in batch.label[0]:
idx = int(value.asnumpy())
out_path = dataset_dic.get(idx)
out_path_list.append(out_path)
if count == i and j != 0:
out_path_list = out_path_list[:j]
put_feature(data, nets, out_path_list, q_exc)
elapse = time.time() - start
print(count, '/', i, 'Total Time used:', elapse)
count += 1
for q in q_exc:
q.put(None)
for p in write_process:
p.join()
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--rec-path', type=str, help='',
default='~/huya_face/face_datasets/megaface_data/eval_set/megaface.rec')
parser.add_argument('--idx-path', type=str, help='',
default='~/huya_face/face_datasets/megaface_data/eval_set/megaface.idx')
parser.add_argument('--batch-size', type=int, help='', default=32)
parser.add_argument('--image-size', type=str, help='', default='3,112,112')
parser.add_argument('--gpu', type=str, help='', default=0)
parser.add_argument('--num-threads', type=int, help='', default=16)
parser.add_argument('--algo', type=str, help='', default='insightface')
parser.add_argument('--dataset', type=str, help='', default='megaface')
parser.add_argument('--dataset_lst', type=str, help='',
default='~/huya_face/face_datasets/megaface_data/eval_set/megaface.lst')
parser.add_argument('--output', type=str, help='',
default='~/huya_face/feature_out_clean')
parser.add_argument('--model', type=str, help='',
default='~/huya_face/models/model-r100-ii/model,0')
parser.add_argument('--megaface-noises', type=str, help='',
default='~/huya_face/face_datasets/megaface_data/eval_set/megaface_noises.txt')
parser.add_argument('--name_prefix', type=str, default='')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
app.py
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request,session,abort
from revisit_sayat import fayat
# from flask_sslify import SSLify
import uuid
from threading import Thread
from check import checkAll
app = Flask(__name__)
app.secret_key = 'hellowordl1'
# sslify = SSLify(app, permanent=True)
def random_alphanum(string_length=10):
"""Returns a random string of length string_length."""
random = str(uuid.uuid4()) # Convert UUID format to a Python string.
random = random.upper() # Make all characters uppercase.
random = random.replace("-","") # Remove the UUID '-'.
return random[0:string_length] # Return the random string.
@app.before_request
def csrf_protect():
if request.method == "POST":
token = session.pop('_csrf_token', None)
if not token or token != request.form.get('_csrf_token'):
abort(403)
def generate_csrf_token():
if '_csrf_token' not in session:
session['_csrf_token'] = random_alphanum(10)
return session['_csrf_token']
app.jinja_env.globals['csrf_token'] = generate_csrf_token
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
id = request.form['uid']
n = request.form['n']
text = request.form['feedback']
print id, n, text
time = int(n) * 1500
#theek hai?
print time
if checkAll(uid=str(id), n=int(n), text=str(text)):
print "OK"
try:
thread = Thread(target=fayat, args=(str(id), int(n), str(text)))
thread.start()
# fayat(userid=str(id),n=int(n),text=str(text))
return render_template('rocket.html', time=time, uid=str(id))
except:
try:
thread._stop()
except:
pass
return '<script>alert("error");</script>'
else:
print "err?"
return '''<script>alert("We had a problem processing your request! Possible reasons - User does not exist, feedback text length is greater than 200 or total flood rate is greater than 200. ;)");
var meta = document.createElement('meta');
meta.httpEquiv = "REFRESH";
meta.content = "0;URL=https://www.floodsayat.me";
document.getElementsByTagName('head')[0].appendChild(meta);
</script>'''
return render_template('index.html')
@app.route('/test')
def test():
return render_template('rocket.html', time=100000000,uid="test")
@app.errorhandler(404)
def page_not_found(e):
return render_template('error.html'),404
@app.errorhandler(403)
def page_not_found(e):
return render_template('error.html'),403
@app.errorhandler(503)
def page_not_found(e):
return render_template('error.html'),503
if __name__ == "__main__":
app.run(debug=False,threaded=True)
|
semglenny.py
|
# Copyright (c) Lenny Varghese and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
import threading
import collections
import struct
import pyaudio
import time
class EMG:
def __init__(self,numChans=2,sampleRate=44100,chunkSize=512,rmsWindows=5,
rmsMax = numpy.ones((2,),dtype=numpy.float),
rmsMin = numpy.zeros((2,),dtype=numpy.float)):
self.sampleRate = sampleRate
self.chunkSize = chunkSize
self.numChans = numChans
self.rmsWindows = rmsWindows
self.rmsMax = rmsMax
self.rmsMin = rmsMin
self.rmsVals = rmsMin
self.currScore = numpy.zeros((numChans,),dtype = numpy.float)
# instantiate pyAudio and open a stream for recording
self.p = pyaudio.PyAudio()
self.theStream = self.p.open(format=pyaudio.paInt16,
channels = self.numChans,
rate = self.sampleRate,
input = True,
frames_per_buffer = self.chunkSize,
start = False)
# input_device_index=useDevice)
# create the containers to store the acquired data
# using deque because it's supposedly fast and can implement a circular
# buffer efficiently
self.rawData = collections.deque([])
self.rmsBuffer = collections.deque([],maxlen=rmsWindows)
self.timeStamp = collections.deque([])
# other definitions:
# to convert the wave stream data into an array
self.convertSamplesFormat = ('%ih' % (self.rmsWindows*self.chunkSize*
self.numChans) )
# this is supposed to just be a flag to control threads, don't know if
# it will actually work like this or not
self.isAcquiring = False
#### handle the sound card and pyAudio stuff
#### start_recording and stop_recording are just acting as a switch
def start_recording(self):
# start the stream
print 'recording started'
self.theStream.start_stream()
# fill self.rmsBuffer with some bullshit for now
print 'stream started'
for q in range(0,5):
self.rmsBuffer.append(self.theStream.read(self.chunkSize))
self.thr1 = threading.Thread(target=self.get_data)
self.thr1.start()
self.isAcquiring = True
def stop_recording(self):
self.isAcquiring = False
time.sleep(0.1)
self.theStream.close()
self.p.terminate()
def get_data(self):
while True:
if self.isAcquiring == False:
break
# record the data into the deque array
dataIn = self.theStream.read(self.chunkSize)
# append the deque buffers
self.timeStamp.append(time.time())
self.rmsBuffer.append(dataIn)
self.rawData.append(dataIn)
# compute the rms and the normalized "score"
self.compute_score()
#### now do something with the acquired data
def compute_score(self):
dataString = ''.join(self.rmsBuffer)
# unpack stream (16 bit) and convert to numpy array
convertedData = numpy.array(struct.unpack(self.convertSamplesFormat,
dataString),
dtype=numpy.float)/(2.**15)
# channel data is interleaved (1 2 3 4 1 2 3 4...), so you have to take it apart
convertedData = convertedData.reshape(convertedData.size/self.numChans,self.numChans)
self.rmsVals = ((convertedData**2).mean(0))**0.5
max_v = 0
min_v = 0
for val in self.rmsVals:
if max_v < val:
max_v = val
if min_v > val:
min_v = val
# normalized(x_i) = (x_i - X_min) / (X_max - X_min)
in range:
code
# this just converts the rms values into a normalized score, 0-1 to do
# something with later
# adjust according to min/max rms values
#self.rmsVals[self.rmsVals < self.rmsMin] = (
# self.rmsMin[self.rmsVals < self.rmsMin])
# self.rmsVals[self.rmsVals > self.rmsMax] = (
# self.rmsMax[self.rmsVals > self.rmsMax])
# now get a percentage
slope = 1 / (self.rmsMax-self.rmsMin)
intercept = -1*self.rmsMin*slope
self.currScore = slope*self.rmsVals+intercept
|
SwitchHelper.py
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright 2014 Xinyu, He <legendmohe@foxmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import json
import time
import socket
from util.thread import TimerThread
from util.Res import Res
from util.log import *
class SwitchHelper:
HEARTBEAT_RATE = 3
SOCKET_TIMEOUT = 5
RETRY_TIME = 3
SCAN_PORT = 48899
SWITCH_PORT = 8899
BOARDCAST_ADDRESS = "255.255.255.255"
def __init__(self):
init_json = Res.init("init.json")
self.scan_ip = SwitchHelper.BOARDCAST_ADDRESS
self.name2ip = init_json["switchs"]
self._send_lock = threading.Lock()
self.switchs = {}
self._init_heartbeat()
def _init_heartbeat(self):
self._init_heartbeat_socket()
self._send_hb_thread = threading.Thread(target=self._heartbeat_recv)
self._send_hb_thread.daemon = True
self._send_hb_thread.start()
self._heartbeat_thread = TimerThread(
interval=SwitchHelper.HEARTBEAT_RATE,
target=self._heartbeat_send
)
self._heartbeat_thread.start()
def _init_heartbeat_socket(self):
self._hb_sock = self._get_udp_socket()
bind_address = ('0.0.0.0', SwitchHelper.SCAN_PORT)
self._hb_sock.bind(bind_address)
self._hb_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
self._hb_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
# self._hb_sock.settimeout(SwitchHelper.HEARTBEAT_RATE/2)
def ip_for_name(self, name):
return self.name2ip.get(name, None)
def name_for_ip(self, ip):
for name in self.name2ip:
if self.name2ip[name] == ip:
return name
return None
def send_open(self, target_ip):
if not target_ip in self.switchs:
ERROR("target_ip not exist: " + target_ip)
return
cmd = self._get_switch_cmd("ON")
res = self._send_cmd(target_ip, cmd)
INFO("send_open:%s" % res)
if res == "+OK" or res == "+ok":
self.switchs[target_ip]['status'] = "on"
return res
def send_close(self, target_ip):
if not target_ip in self.switchs:
ERROR("target_ip not exist: " + target_ip)
return
cmd = self._get_switch_cmd("OFF")
res = self._send_cmd(target_ip, cmd)
INFO("send_close:%s" % res)
if res == "+OK" or res == "+ok":
self.switchs[target_ip]['status'] = "off"
return res
def show_state(self, target_ip):
if not target_ip in self.switchs:
ERROR("target_ip not exist: " + target_ip)
return None
return self.switchs[target_ip]["status"]
def show_info(self, target_ip):
if not target_ip in self.switchs:
ERROR("target_ip not exist: " + target_ip)
return
cmd = self._get_info_cmd()
recv = self._send_cmd(target_ip, cmd)
if recv is None or len(recv) == 0:
return None
info = recv[5:-1].split(",")
return {
"I": info[0] if len(info[0]) != 0 else "0",
"U": info[1] if len(info[1]) != 0 else "0",
"F": info[2] if len(info[2]) != 0 else "0",
"P": info[3] if len(info[3]) != 0 else "0",
"PQ": info[4] if len(info[4]) != 0 else "0",
"E": info[5] if len(info[5]) != 0 else "0",
"EQ": info[6] if len(info[6]) != 0 else "0",
}
def readable_info(self, info):
if info is None or len(info) == 0:
return ""
I = "%.2f" % (float(info["I"])/100.0) + "A"
U = "%.2f" % (float(info["U"])/100.0) + "V"
F = "%.2f" % (float(info["F"])/100.0) + "Hz"
P = "%.2f" % (float(info["P"])/10.0) + "W"
PQ = info["P"] + "W"
E = info["E"] + "WH"
EQ = info["EQ"] + "WH"
return "".join([
u"功率:%s " % P,
u"电流:%s " % I,
u"电压:%s " % U,
# u"频率:%s " % F,
# u"有功功率:%s " % P,
# u"无功功率:%s " % PQ,
# u"有功能量值:%s " % E,
# u"无功能量值:%s" % EQ,
])
def _format_time(self):
return time.strftime("%Y%m%d%H%M%S", time.localtime())
def _get_udp_socket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def _get_cmd_socket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _get_switch_cmd(self, action):
return "AT+YZSWITCH=1,%s,%s\r\n" % (action, self._format_time())
def _get_info_cmd(self):
return "AT+YZOUT\r\n"
def _get_heartbeat_cmd(self):
return 'YZ-RECOSCAN'
def _send_cmd(self, target_ip, cmd):
if target_ip is None or len(target_ip) == 0:
ERROR("invaild target_ip.")
return
if cmd is None or len(cmd) == 0:
ERROR("invaild switch cmd.")
return
with self._send_lock:
# sock = self._get_cmd_socket()
# sock.connect()
INFO("Switch send command:%s to:%s" % (cmd, target_ip))
for i in range(0, SwitchHelper.RETRY_TIME):
try:
sock = socket.create_connection(
(target_ip, SwitchHelper.SWITCH_PORT),
SwitchHelper.SOCKET_TIMEOUT)
time.sleep(0.5)
sock.send(cmd)
recv = sock.recv(512)
sock.close()
return recv.strip()
except socket.timeout:
ERROR("SwitchHelper cmd socket timeout.")
except Exception, ex:
ERROR(ex)
return None
def _heartbeat_send(self):
# for ip in self.switchs:
# self.switchs[ip]["status"] = "-1"
sock = self._hb_sock
address = (self.scan_ip, SwitchHelper.SCAN_PORT)
sock.sendto(self._get_heartbeat_cmd(), address)
DEBUG("send switch heartbeat to:%s" % (address, ))
def _heartbeat_recv(self):
sock = self._hb_sock
while True:
try:
recv, address = sock.recvfrom(512)
DEBUG("recv switch heartbeat:%s from:%s" % (recv, address))
status = recv.strip().split(',')
if len(status) < 5:
continue
switch = {}
switch["ip"] = status[0]
switch["mac"] = status[1]
switch["name"] = self.name_for_ip(status[0])
switch["status"] = "on" if status[4] == "1" else "off"
self.switchs[switch["ip"]] = switch
except socket.timeout:
WARN("heartbeat timeout. ")
|
logger_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for worker logging utilities."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
import sys
import threading
import unittest
from builtins import object
from apache_beam.runners.worker import logger
from apache_beam.runners.worker import statesampler
from apache_beam.utils.counters import CounterFactory
class PerThreadLoggingContextTest(unittest.TestCase):
def thread_check_attribute(self, name):
self.assertFalse(name in logger.per_thread_worker_data.get_data())
with logger.PerThreadLoggingContext(**{name: 'thread-value'}):
self.assertEqual(
logger.per_thread_worker_data.get_data()[name], 'thread-value')
self.assertFalse(name in logger.per_thread_worker_data.get_data())
def test_per_thread_attribute(self):
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
with logger.PerThreadLoggingContext(xyz='value'):
self.assertEqual(logger.per_thread_worker_data.get_data()['xyz'], 'value')
thread = threading.Thread(
target=self.thread_check_attribute, args=('xyz', ))
thread.start()
thread.join()
self.assertEqual(logger.per_thread_worker_data.get_data()['xyz'], 'value')
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
def test_set_when_undefined(self):
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
with logger.PerThreadLoggingContext(xyz='value'):
self.assertEqual(logger.per_thread_worker_data.get_data()['xyz'], 'value')
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
def test_set_when_already_defined(self):
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
with logger.PerThreadLoggingContext(xyz='value'):
self.assertEqual(logger.per_thread_worker_data.get_data()['xyz'], 'value')
with logger.PerThreadLoggingContext(xyz='value2'):
self.assertEqual(
logger.per_thread_worker_data.get_data()['xyz'], 'value2')
self.assertEqual(logger.per_thread_worker_data.get_data()['xyz'], 'value')
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
class JsonLogFormatterTest(unittest.TestCase):
SAMPLE_RECORD = {
'created': 123456.789,
'msecs': 789.654321,
'msg': '%s:%d:%.2f',
'args': ('xyz', 4, 3.14),
'levelname': 'WARNING',
'process': 'pid',
'thread': 'tid',
'name': 'name',
'filename': 'file',
'funcName': 'func',
'exc_info': None
}
SAMPLE_OUTPUT = {
'timestamp': {
'seconds': 123456, 'nanos': 789654321
},
'severity': 'WARN',
'message': 'xyz:4:3.14',
'thread': 'pid:tid',
'job': 'jobid',
'worker': 'workerid',
'logger': 'name:file:func'
}
def create_log_record(self, **kwargs):
class Record(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
return Record(**kwargs)
def test_basic_record(self):
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
record = self.create_log_record(**self.SAMPLE_RECORD)
self.assertEqual(json.loads(formatter.format(record)), self.SAMPLE_OUTPUT)
def execute_multiple_cases(self, test_cases):
record = self.SAMPLE_RECORD
output = self.SAMPLE_OUTPUT
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
for case in test_cases:
record['msg'] = case['msg']
record['args'] = case['args']
output['message'] = case['expected']
self.assertEqual(
json.loads(formatter.format(self.create_log_record(**record))),
output)
def test_record_with_format_character(self):
test_cases = [
{
'msg': '%A', 'args': (), 'expected': '%A'
},
{
'msg': '%s', 'args': (), 'expected': '%s'
},
{
'msg': '%A%s', 'args': ('xy'), 'expected': '%A%s with args (xy)'
},
{
'msg': '%s%s', 'args': (1), 'expected': '%s%s with args (1)'
},
]
self.execute_multiple_cases(test_cases)
def test_record_with_arbitrary_messages(self):
test_cases = [
{
'msg': ImportError('abc'), 'args': (), 'expected': 'abc'
},
{
'msg': TypeError('abc %s'), 'args': ('def'), 'expected': 'abc def'
},
]
self.execute_multiple_cases(test_cases)
def test_record_with_per_thread_info(self):
self.maxDiff = None
tracker = statesampler.StateSampler('stage', CounterFactory())
statesampler.set_current_tracker(tracker)
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
with logger.PerThreadLoggingContext(work_item_id='workitem'):
with tracker.scoped_state('step', 'process'):
record = self.create_log_record(**self.SAMPLE_RECORD)
log_output = json.loads(formatter.format(record))
expected_output = dict(self.SAMPLE_OUTPUT)
expected_output.update({
'work': 'workitem', 'stage': 'stage', 'step': 'step'
})
self.assertEqual(log_output, expected_output)
statesampler.set_current_tracker(None)
def test_nested_with_per_thread_info(self):
self.maxDiff = None
tracker = statesampler.StateSampler('stage', CounterFactory())
statesampler.set_current_tracker(tracker)
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
with logger.PerThreadLoggingContext(work_item_id='workitem'):
with tracker.scoped_state('step1', 'process'):
record = self.create_log_record(**self.SAMPLE_RECORD)
log_output1 = json.loads(formatter.format(record))
with tracker.scoped_state('step2', 'process'):
record = self.create_log_record(**self.SAMPLE_RECORD)
log_output2 = json.loads(formatter.format(record))
record = self.create_log_record(**self.SAMPLE_RECORD)
log_output3 = json.loads(formatter.format(record))
statesampler.set_current_tracker(None)
record = self.create_log_record(**self.SAMPLE_RECORD)
log_output4 = json.loads(formatter.format(record))
self.assertEqual(
log_output1,
dict(self.SAMPLE_OUTPUT, work='workitem', stage='stage', step='step1'))
self.assertEqual(
log_output2,
dict(self.SAMPLE_OUTPUT, work='workitem', stage='stage', step='step2'))
self.assertEqual(
log_output3,
dict(self.SAMPLE_OUTPUT, work='workitem', stage='stage', step='step1'))
self.assertEqual(log_output4, self.SAMPLE_OUTPUT)
def test_exception_record(self):
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
try:
raise ValueError('Something')
except ValueError:
attribs = dict(self.SAMPLE_RECORD)
attribs.update({'exc_info': sys.exc_info()})
record = self.create_log_record(**attribs)
log_output = json.loads(formatter.format(record))
# Check if exception type, its message, and stack trace information are in.
exn_output = log_output.pop('exception')
self.assertNotEqual(exn_output.find('ValueError: Something'), -1)
self.assertNotEqual(exn_output.find('logger_test.py'), -1)
self.assertEqual(log_output, self.SAMPLE_OUTPUT)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
_simple_stubs_test.py
|
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Simple Stubs."""
# TODO(https://github.com/grpc/grpc/issues/21965): Run under setuptools.
import os
_MAXIMUM_CHANNELS = 10
_DEFAULT_TIMEOUT = 1.0
os.environ["GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS"] = "2"
os.environ["GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM"] = str(_MAXIMUM_CHANNELS)
os.environ["GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS"] = str(_DEFAULT_TIMEOUT)
import contextlib
import datetime
import inspect
import logging
import sys
import threading
import time
from typing import Callable, Optional
import unittest
import grpc
import grpc.experimental
from tests.unit import resources
from tests.unit import test_common
from tests.unit.framework.common import get_socket
_REQUEST = b"0000"
_CACHE_EPOCHS = 8
_CACHE_TRIALS = 6
_SERVER_RESPONSE_COUNT = 10
_CLIENT_REQUEST_COUNT = _SERVER_RESPONSE_COUNT
_STRESS_EPOCHS = _MAXIMUM_CHANNELS * 10
_UNARY_UNARY = "/test/UnaryUnary"
_UNARY_STREAM = "/test/UnaryStream"
_STREAM_UNARY = "/test/StreamUnary"
_STREAM_STREAM = "/test/StreamStream"
_BLACK_HOLE = "/test/BlackHole"
@contextlib.contextmanager
def _env(key: str, value: str):
os.environ[key] = value
yield
del os.environ[key]
def _unary_unary_handler(request, context):
return request
def _unary_stream_handler(request, context):
for _ in range(_SERVER_RESPONSE_COUNT):
yield request
def _stream_unary_handler(request_iterator, context):
request = None
for single_request in request_iterator:
request = single_request
return request
def _stream_stream_handler(request_iterator, context):
for request in request_iterator:
yield request
def _black_hole_handler(request, context):
event = threading.Event()
def _on_done():
event.set()
context.add_callback(_on_done)
while not event.is_set():
time.sleep(0.1)
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return grpc.unary_unary_rpc_method_handler(_unary_unary_handler)
elif handler_call_details.method == _UNARY_STREAM:
return grpc.unary_stream_rpc_method_handler(_unary_stream_handler)
elif handler_call_details.method == _STREAM_UNARY:
return grpc.stream_unary_rpc_method_handler(_stream_unary_handler)
elif handler_call_details.method == _STREAM_STREAM:
return grpc.stream_stream_rpc_method_handler(_stream_stream_handler)
elif handler_call_details.method == _BLACK_HOLE:
return grpc.unary_unary_rpc_method_handler(_black_hole_handler)
else:
raise NotImplementedError()
def _time_invocation(to_time: Callable[[], None]) -> datetime.timedelta:
start = datetime.datetime.now()
to_time()
return datetime.datetime.now() - start
@contextlib.contextmanager
def _server(credentials: Optional[grpc.ServerCredentials]):
try:
server = test_common.test_server()
target = '[::]:0'
if credentials is None:
port = server.add_insecure_port(target)
else:
port = server.add_secure_port(target, credentials)
server.add_generic_rpc_handlers((_GenericHandler(),))
server.start()
yield port
finally:
server.stop(None)
class SimpleStubsTest(unittest.TestCase):
def assert_cached(self, to_check: Callable[[str], None]) -> None:
"""Asserts that a function caches intermediate data/state.
To be specific, given a function whose caching behavior is
deterministic in the value of a supplied string, this function asserts
that, on average, subsequent invocations of the function for a specific
string are faster than first invocations with that same string.
Args:
to_check: A function returning nothing, that caches values based on
an arbitrary supplied string.
"""
initial_runs = []
cached_runs = []
for epoch in range(_CACHE_EPOCHS):
runs = []
text = str(epoch)
for trial in range(_CACHE_TRIALS):
runs.append(_time_invocation(lambda: to_check(text)))
initial_runs.append(runs[0])
cached_runs.extend(runs[1:])
average_cold = sum((run for run in initial_runs),
datetime.timedelta()) / len(initial_runs)
average_warm = sum((run for run in cached_runs),
datetime.timedelta()) / len(cached_runs)
self.assertLess(average_warm, average_cold)
def assert_eventually(self,
predicate: Callable[[], bool],
*,
timeout: Optional[datetime.timedelta] = None,
message: Optional[Callable[[], str]] = None) -> None:
message = message or (lambda: "Proposition did not evaluate to true")
timeout = timeout or datetime.timedelta(seconds=10)
end = datetime.datetime.now() + timeout
while datetime.datetime.now() < end:
if predicate():
break
time.sleep(0.5)
else:
self.fail(message() + " after " + str(timeout))
def test_unary_unary_insecure(self):
with _server(None) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.experimental.
insecure_channel_credentials(),
timeout=None)
self.assertEqual(_REQUEST, response)
def test_unary_unary_secure(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.local_channel_credentials(),
timeout=None)
self.assertEqual(_REQUEST, response)
def test_channels_cached(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
test_name = inspect.stack()[0][3]
args = (_REQUEST, target, _UNARY_UNARY)
kwargs = {"channel_credentials": grpc.local_channel_credentials()}
def _invoke(seed: str):
run_kwargs = dict(kwargs)
run_kwargs["options"] = ((test_name + seed, ""),)
grpc.experimental.unary_unary(*args, **run_kwargs)
self.assert_cached(_invoke)
def test_channels_evicted(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assert_eventually(
lambda: grpc._simple_stubs.ChannelCache.get(
)._test_only_channel_count() == 0,
message=lambda:
f"{grpc._simple_stubs.ChannelCache.get()._test_only_channel_count()} remain"
)
def test_total_channels_enforced(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for i in range(_STRESS_EPOCHS):
# Ensure we get a new channel each time.
options = (("foo", str(i)),)
# Send messages at full blast.
grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
options=options,
channel_credentials=grpc.local_channel_credentials())
self.assert_eventually(
lambda: grpc._simple_stubs.ChannelCache.get(
)._test_only_channel_count() <= _MAXIMUM_CHANNELS + 1,
message=lambda:
f"{grpc._simple_stubs.ChannelCache.get()._test_only_channel_count()} channels remain"
)
def test_unary_stream(self):
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for response in grpc.experimental.unary_stream(
_REQUEST,
target,
_UNARY_STREAM,
channel_credentials=grpc.local_channel_credentials()):
self.assertEqual(_REQUEST, response)
def test_stream_unary(self):
def request_iter():
for _ in range(_CLIENT_REQUEST_COUNT):
yield _REQUEST
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
response = grpc.experimental.stream_unary(
request_iter(),
target,
_STREAM_UNARY,
channel_credentials=grpc.local_channel_credentials())
self.assertEqual(_REQUEST, response)
def test_stream_stream(self):
def request_iter():
for _ in range(_CLIENT_REQUEST_COUNT):
yield _REQUEST
with _server(grpc.local_server_credentials()) as port:
target = f'localhost:{port}'
for response in grpc.experimental.stream_stream(
request_iter(),
target,
_STREAM_STREAM,
channel_credentials=grpc.local_channel_credentials()):
self.assertEqual(_REQUEST, response)
def test_default_ssl(self):
_private_key = resources.private_key()
_certificate_chain = resources.certificate_chain()
_server_certs = ((_private_key, _certificate_chain),)
_server_host_override = 'foo.test.google.fr'
_test_root_certificates = resources.test_root_certificates()
_property_options = ((
'grpc.ssl_target_name_override',
_server_host_override,
),)
cert_dir = os.path.join(os.path.dirname(resources.__file__),
"credentials")
cert_file = os.path.join(cert_dir, "ca.pem")
with _env("GRPC_DEFAULT_SSL_ROOTS_FILE_PATH", cert_file):
server_creds = grpc.ssl_server_credentials(_server_certs)
with _server(server_creds) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(
_REQUEST, target, _UNARY_UNARY, options=_property_options)
def test_insecure_sugar(self):
with _server(None) as port:
target = f'localhost:{port}'
response = grpc.experimental.unary_unary(_REQUEST,
target,
_UNARY_UNARY,
insecure=True)
self.assertEqual(_REQUEST, response)
def test_insecure_sugar_mutually_exclusive(self):
with _server(None) as port:
target = f'localhost:{port}'
with self.assertRaises(ValueError):
response = grpc.experimental.unary_unary(
_REQUEST,
target,
_UNARY_UNARY,
insecure=True,
channel_credentials=grpc.local_channel_credentials())
def test_default_wait_for_ready(self):
addr, port, sock = get_socket()
sock.close()
target = f'{addr}:{port}'
channel = grpc._simple_stubs.ChannelCache.get().get_channel(
target, (), None, True, None)
rpc_finished_event = threading.Event()
rpc_failed_event = threading.Event()
server = None
def _on_connectivity_changed(connectivity):
nonlocal server
if connectivity is grpc.ChannelConnectivity.TRANSIENT_FAILURE:
self.assertFalse(rpc_finished_event.is_set())
self.assertFalse(rpc_failed_event.is_set())
server = test_common.test_server()
server.add_insecure_port(target)
server.add_generic_rpc_handlers((_GenericHandler(),))
server.start()
channel.unsubscribe(_on_connectivity_changed)
elif connectivity in (grpc.ChannelConnectivity.IDLE,
grpc.ChannelConnectivity.CONNECTING):
pass
else:
self.fail("Encountered unknown state.")
channel.subscribe(_on_connectivity_changed)
def _send_rpc():
try:
response = grpc.experimental.unary_unary(_REQUEST,
target,
_UNARY_UNARY,
timeout=None,
insecure=True)
rpc_finished_event.set()
except Exception as e:
rpc_failed_event.set()
t = threading.Thread(target=_send_rpc)
t.start()
t.join()
self.assertFalse(rpc_failed_event.is_set())
self.assertTrue(rpc_finished_event.is_set())
if server is not None:
server.stop(None)
def assert_times_out(self, invocation_args):
with _server(None) as port:
target = f'localhost:{port}'
with self.assertRaises(grpc.RpcError) as cm:
response = grpc.experimental.unary_unary(_REQUEST,
target,
_BLACK_HOLE,
insecure=True,
**invocation_args)
self.assertEqual(grpc.StatusCode.DEADLINE_EXCEEDED,
cm.exception.code())
def test_default_timeout(self):
not_present = object()
wait_for_ready_values = [True, not_present]
timeout_values = [0.5, not_present]
cases = []
for wait_for_ready in wait_for_ready_values:
for timeout in timeout_values:
case = {}
if timeout is not not_present:
case["timeout"] = timeout
if wait_for_ready is not not_present:
case["wait_for_ready"] = wait_for_ready
cases.append(case)
for case in cases:
with self.subTest(**case):
self.assert_times_out(case)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main(verbosity=2)
|
build_imagenet_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
data_dir = 'K:/ImageNet_object_localization/ILSVRC/Data/CLSLOC'
tf.app.flags.DEFINE_string('train_directory', 'K:/ImageNet_object_localization/ILSVRC/Data/CLSLOC/train',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', 'K:/ImageNet_object_localization/ILSVRC/Data/CLSLOC/val',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', 'K:',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'K:/ImageNet_object_localization/ILSVRC/Data/CLSLOC/imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'K:/ImageNet_object_localization/ILSVRC/Data/CLSLOC/imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'C:/Users/SpiderJaws/Desktop/process_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = b'RGB'
channels = 3
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(bytes(synset,'utf-8')),
'image/class/text': _bytes_feature(bytes(human,'utf-8')),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(bytes(os.path.basename(filename),'utf-8')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
# 'image/filename': _bytes_feature(os.path.basename(filename)),
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'rb').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
data.py
|
# THIS FILE IS FOR EXPERIMENTS, USE image_iter.py FOR NORMAL IMAGE LOADING.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import logging
import sys
import numbers
import math
import sklearn
import datetime
import numpy as np
import cv2
import mxnet as mx
from mxnet import ndarray as nd
#from . import _ndarray_internal as _internal
#from mxnet._ndarray_internal import _cvimresize as imresize
#from ._ndarray_internal import _cvcopyMakeBorder as copyMakeBorder
from mxnet import io
from mxnet import recordio
sys.path.append(os.path.join(os.path.dirname(__file__), 'common'))
import face_preprocess
import multiprocessing
logger = logging.getLogger()
def pick_triplets_impl(q_in, q_out):
more = True
while more:
deq = q_in.get()
if deq is None:
more = False
else:
embeddings, emb_start_idx, nrof_images, alpha = deq
print('running', emb_start_idx, nrof_images, os.getpid())
for j in xrange(1,nrof_images):
a_idx = emb_start_idx + j - 1
neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
for pair in xrange(j, nrof_images): # For every possible positive pair.
p_idx = emb_start_idx + pair
pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN
all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection
#all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction
nrof_random_negs = all_neg.shape[0]
if nrof_random_negs>0:
rnd_idx = np.random.randint(nrof_random_negs)
n_idx = all_neg[rnd_idx]
#triplets.append( (a_idx, p_idx, n_idx) )
q_out.put( (a_idx, p_idx, n_idx) )
#emb_start_idx += nrof_images
print('exit',os.getpid())
class FaceImageIter(io.DataIter):
def __init__(self, batch_size, data_shape,
path_imgrec = None,
shuffle=False, aug_list=None, mean = None,
rand_mirror = False, cutoff = 0,
c2c_threshold = 0.0, output_c2c = 0, c2c_mode = -10, limit = 0,
ctx_num = 0, images_per_identity = 0, data_extra = None, hard_mining = False,
triplet_params = None, coco_mode = False,
mx_model = None,
data_name='data', label_name='softmax_label', **kwargs):
super(FaceImageIter, self).__init__()
assert path_imgrec
if path_imgrec:
logging.info('loading recordio %s...',
path_imgrec)
path_imgidx = path_imgrec[0:-4]+".idx"
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
s = self.imgrec.read_idx(0)
header, _ = recordio.unpack(s)
self.idx2cos = {}
self.idx2flag = {}
self.idx2meancos = {}
self.c2c_auto = False
#if output_c2c or c2c_threshold>0.0 or c2c_mode>=-5:
# path_c2c = os.path.join(os.path.dirname(path_imgrec), 'c2c')
# print(path_c2c)
# if os.path.exists(path_c2c):
# for line in open(path_c2c, 'r'):
# vec = line.strip().split(',')
# idx = int(vec[0])
# self.idx2cos[idx] = float(vec[1])
# self.idx2flag[idx] = 1
# if len(vec)>2:
# self.idx2flag[idx] = int(vec[2])
# else:
# self.c2c_auto = True
# self.c2c_step = 10000
print("header.flag", header)
if header.flag>0:
print('header0 label', header.label)
self.header0 = (int(header.label[0]), int(header.label[1]))
#assert(header.flag==1)
self.imgidx = range(1, int(header.label[0]))
if c2c_mode==0:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f!=1:
continue
imgidx2.append(idx)
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_mode==1:
imgidx2 = []
tmp = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f==1:
imgidx2.append(idx)
else:
tmp.append( (idx, c) )
tmp = sorted(tmp, key = lambda x:x[1])
tmp = tmp[250000:300000]
for _t in tmp:
imgidx2.append(_t[0])
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_mode==2:
imgidx2 = []
tmp = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f==1:
imgidx2.append(idx)
else:
tmp.append( (idx, c) )
tmp = sorted(tmp, key = lambda x:x[1])
tmp = tmp[200000:300000]
for _t in tmp:
imgidx2.append(_t[0])
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_mode==-2:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if f==2:
continue
if c<0.73:
continue
imgidx2.append(idx)
print('idx count', len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
elif c2c_threshold>0.0:
imgidx2 = []
for idx in self.imgidx:
c = self.idx2cos[idx]
f = self.idx2flag[idx]
if c<c2c_threshold:
continue
imgidx2.append(idx)
print(len(self.imgidx), len(imgidx2))
self.imgidx = imgidx2
self.id2range = {}
self.seq_identity = range(int(header.label[0]), int(header.label[1]))
c2c_stat = [0,0]
for identity in self.seq_identity:
s = self.imgrec.read_idx(identity)
header, _ = recordio.unpack(s)
a,b = int(header.label[0]), int(header.label[1])
self.id2range[identity] = (a,b)
count = b-a
if count>=output_c2c:
c2c_stat[1]+=1
else:
c2c_stat[0]+=1
for ii in range(a,b):
self.idx2flag[ii] = count
if len(self.idx2cos)>0:
m = 0.0
for ii in range(a,b):
m+=self.idx2cos[ii]
m/=(b-a)
for ii in range(a,b):
self.idx2meancos[ii] = m
#self.idx2meancos[identity] = m
print('id2range', len(self.id2range))
print(len(self.idx2cos), len(self.idx2meancos), len(self.idx2flag))
print('c2c_stat', c2c_stat)
if limit>0 and limit<len(self.imgidx):
random.seed(727)
prob = float(limit)/len(self.imgidx)
new_imgidx = []
new_ids = 0
for identity in self.seq_identity:
s = self.imgrec.read_idx(identity)
header, _ = recordio.unpack(s)
a,b = int(header.label[0]), int(header.label[1])
found = False
for _idx in range(a,b):
if random.random()<prob:
found = True
new_imgidx.append(_idx)
if found:
new_ids+=1
self.imgidx = new_imgidx
print('new ids', new_ids)
random.seed(None)
#random.Random(727).shuffle(self.imgidx)
#self.imgidx = self.imgidx[0:limit]
else:
self.imgidx = list(self.imgrec.keys)
if shuffle:
self.seq = self.imgidx
self.oseq = self.imgidx
print(len(self.seq))
else:
self.seq = None
self.mean = mean
self.nd_mean = None
if self.mean:
self.mean = np.array(self.mean, dtype=np.float32).reshape(1,1,3)
self.nd_mean = mx.nd.array(self.mean).reshape((1,1,3))
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
self.batch_size = batch_size
self.data_shape = data_shape
self.shuffle = shuffle
self.image_size = '%d,%d'%(data_shape[1],data_shape[2])
self.rand_mirror = rand_mirror
print('rand_mirror', rand_mirror)
self.cutoff = cutoff
#self.cast_aug = mx.image.CastAug()
self.color_aug = mx.image.ColorJitterAug(0.4, 0.4, 0.4)
self.ctx_num = ctx_num
self.c2c_threshold = c2c_threshold
self.output_c2c = output_c2c
self.per_batch_size = int(self.batch_size/self.ctx_num)
self.images_per_identity = images_per_identity
if self.images_per_identity>0:
self.identities = int(self.per_batch_size/self.images_per_identity)
self.per_identities = self.identities
self.repeat = 3000000.0/(self.images_per_identity*len(self.id2range))
self.repeat = int(self.repeat)
print(self.images_per_identity, self.identities, self.repeat)
self.data_extra = None
if data_extra is not None:
self.data_extra = nd.array(data_extra)
self.provide_data = [(data_name, (batch_size,) + data_shape), ('extra', data_extra.shape)]
self.hard_mining = hard_mining
self.mx_model = mx_model
if self.hard_mining:
assert self.images_per_identity>0
assert self.mx_model is not None
self.triplet_params = triplet_params
self.triplet_mode = False
self.coco_mode = coco_mode
if len(label_name)>0:
if output_c2c:
self.provide_label = [(label_name, (batch_size,2))]
else:
self.provide_label = [(label_name, (batch_size,))]
else:
self.provide_label = []
print(self.provide_label[0][1])
if self.coco_mode:
assert self.triplet_params is None
assert self.images_per_identity>0
if self.triplet_params is not None:
assert self.images_per_identity>0
assert self.mx_model is not None
self.triplet_bag_size = self.triplet_params[0]
self.triplet_alpha = self.triplet_params[1]
self.triplet_max_ap = self.triplet_params[2]
assert self.triplet_bag_size>0
assert self.triplet_alpha>=0.0
assert self.triplet_alpha<=1.0
self.triplet_mode = True
self.triplet_oseq_cur = 0
self.triplet_oseq_reset()
self.seq_min_size = self.batch_size*2
self.cur = 0
self.nbatch = 0
self.is_init = False
self.times = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
#self.reset()
def ____pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
people_per_batch = len(nrof_images_per_class)
nrof_threads = 8
q_in = multiprocessing.Queue()
q_out = multiprocessing.Queue()
processes = [multiprocessing.Process(target=pick_triplets_impl, args=(q_in, q_out)) \
for i in range(nrof_threads)]
for p in processes:
p.start()
# VGG Face: Choosing good triplets is crucial and should strike a balance between
# selecting informative (i.e. challenging) examples and swamping training with examples that
# are too hard. This is achieve by extending each pair (a, p) to a triplet (a, p, n) by sampling
# the image n at random, but only between the ones that violate the triplet loss margin. The
# latter is a form of hard-negative mining, but it is not as aggressive (and much cheaper) than
# choosing the maximally violating example, as often done in structured output learning.
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
job = (embeddings, emb_start_idx, nrof_images, self.triplet_alpha)
emb_start_idx+=nrof_images
q_in.put(job)
for i in xrange(nrof_threads):
q_in.put(None)
print('joining')
for p in processes:
p.join()
print('joined')
q_out.put(None)
triplets = []
more = True
while more:
triplet = q_out.get()
if triplet is None:
more = False
else:
triplets.append(triplets)
np.random.shuffle(triplets)
return triplets
#cal pairwise dists on single gpu
def _pairwise_dists(self, embeddings):
nd_embedding = mx.nd.array(embeddings, mx.gpu(0))
pdists = []
for idx in xrange(embeddings.shape[0]):
a_embedding = nd_embedding[idx]
body = mx.nd.broadcast_sub(a_embedding, nd_embedding)
body = body*body
body = mx.nd.sum_axis(body, axis=1)
ret = body.asnumpy()
#print(ret.shape)
pdists.append(ret)
return pdists
def pairwise_dists(self, embeddings):
nd_embedding_list = []
for i in xrange(self.ctx_num):
nd_embedding = mx.nd.array(embeddings, mx.gpu(i))
nd_embedding_list.append(nd_embedding)
nd_pdists = []
pdists = []
for idx in xrange(embeddings.shape[0]):
emb_idx = idx%self.ctx_num
nd_embedding = nd_embedding_list[emb_idx]
a_embedding = nd_embedding[idx]
body = mx.nd.broadcast_sub(a_embedding, nd_embedding)
body = body*body
body = mx.nd.sum_axis(body, axis=1)
nd_pdists.append(body)
if len(nd_pdists)==self.ctx_num or idx==embeddings.shape[0]-1:
for x in nd_pdists:
pdists.append(x.asnumpy())
nd_pdists = []
return pdists
def pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
triplets = []
people_per_batch = len(nrof_images_per_class)
#self.time_reset()
pdists = self.pairwise_dists(embeddings)
#self.times[3] += self.time_elapsed()
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
for j in xrange(1,nrof_images):
#self.time_reset()
a_idx = emb_start_idx + j - 1
#neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)
neg_dists_sqr = pdists[a_idx]
#self.times[3] += self.time_elapsed()
for pair in xrange(j, nrof_images): # For every possible positive pair.
p_idx = emb_start_idx + pair
#self.time_reset()
pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))
#self.times[4] += self.time_elapsed()
#self.time_reset()
neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN
if self.triplet_max_ap>0.0:
if pos_dist_sqr>self.triplet_max_ap:
continue
all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<self.triplet_alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection
#self.times[5] += self.time_elapsed()
#self.time_reset()
#all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction
nrof_random_negs = all_neg.shape[0]
if nrof_random_negs>0:
rnd_idx = np.random.randint(nrof_random_negs)
n_idx = all_neg[rnd_idx]
triplets.append( (a_idx, p_idx, n_idx) )
emb_start_idx += nrof_images
np.random.shuffle(triplets)
return triplets
def __pick_triplets(self, embeddings, nrof_images_per_class):
emb_start_idx = 0
triplets = []
people_per_batch = len(nrof_images_per_class)
for i in xrange(people_per_batch):
nrof_images = int(nrof_images_per_class[i])
if nrof_images<2:
continue
for j in xrange(1,nrof_images):
a_idx = emb_start_idx + j - 1
pcount = nrof_images-1
dists_a2all = np.sum(np.square(embeddings[a_idx] - embeddings), 1) #(N,)
#print(a_idx, dists_a2all.shape)
ba = emb_start_idx
bb = emb_start_idx+nrof_images
sorted_idx = np.argsort(dists_a2all)
#print('assert', sorted_idx[0], a_idx)
#assert sorted_idx[0]==a_idx
#for idx in sorted_idx:
# print(idx, dists_a2all[idx])
p2n_map = {}
pfound = 0
for idx in sorted_idx:
if idx==a_idx: #is anchor
continue
if idx<bb and idx>=ba: #is pos
p2n_map[idx] = [dists_a2all[idx], []] #ap, [neg_list]
pfound+=1
else: # is neg
an = dists_a2all[idx]
if pfound==pcount and len(p2n_map)==0:
break
to_del = []
for p_idx in p2n_map:
v = p2n_map[p_idx]
an_ap = an - v[0]
if an_ap<self.triplet_alpha:
v[1].append(idx)
else:
#output
if len(v[1])>0:
n_idx = random.choice(v[1])
triplets.append( (a_idx, p_idx, n_idx) )
to_del.append(p_idx)
for _del in to_del:
del p2n_map[_del]
for p_idx,v in p2n_map.iteritems():
if len(v[1])>0:
n_idx = random.choice(v[1])
triplets.append( (a_idx, p_idx, n_idx) )
emb_start_idx += nrof_images
np.random.shuffle(triplets)
return triplets
def triplet_oseq_reset(self):
#reset self.oseq by identities seq
self.triplet_oseq_cur = 0
ids = []
for k in self.id2range:
ids.append(k)
random.shuffle(ids)
self.oseq = []
for _id in ids:
v = self.id2range[_id]
_list = list(range(*v))
random.shuffle(_list)
if len(_list)>self.images_per_identity:
_list = _list[0:self.images_per_identity]
self.oseq += _list
print('oseq', len(self.oseq))
def time_reset(self):
self.time_now = datetime.datetime.now()
def time_elapsed(self):
time_now = datetime.datetime.now()
diff = time_now - self.time_now
return diff.total_seconds()
def select_triplets(self):
self.seq = []
while len(self.seq)<self.seq_min_size:
self.time_reset()
embeddings = None
bag_size = self.triplet_bag_size
batch_size = self.batch_size
#data = np.zeros( (bag_size,)+self.data_shape )
#label = np.zeros( (bag_size,) )
tag = []
#idx = np.zeros( (bag_size,) )
print('eval %d images..'%bag_size, self.triplet_oseq_cur)
print('triplet time stat', self.times)
if self.triplet_oseq_cur+bag_size>len(self.oseq):
self.triplet_oseq_reset()
print('eval %d images..'%bag_size, self.triplet_oseq_cur)
self.times[0] += self.time_elapsed()
self.time_reset()
#print(data.shape)
data = nd.zeros( self.provide_data[0][1] )
label = nd.zeros( self.provide_label[0][1] )
ba = 0
while True:
bb = min(ba+batch_size, bag_size)
if ba>=bb:
break
#_batch = self.data_iter.next()
#_data = _batch.data[0].asnumpy()
#print(_data.shape)
#_label = _batch.label[0].asnumpy()
#data[ba:bb,:,:,:] = _data
#label[ba:bb] = _label
for i in range(ba, bb):
_idx = self.oseq[i+self.triplet_oseq_cur]
s = self.imgrec.read_idx(_idx)
header, img = recordio.unpack(s)
img = self.imdecode(img)
data[i-ba][:] = self.postprocess_data(img)
label[i-ba][:] = header.label
tag.append( ( int(header.label), _idx) )
#idx[i] = _idx
db = mx.io.DataBatch(data=(data,), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
#print('eval for selecting triplets',ba,bb)
#print(net_out)
#print(len(net_out))
#print(net_out[0].asnumpy())
net_out = net_out[0].asnumpy()
#print(net_out)
#print('net_out', net_out.shape)
if embeddings is None:
embeddings = np.zeros( (bag_size, net_out.shape[1]))
embeddings[ba:bb,:] = net_out
ba = bb
assert len(tag)==bag_size
self.triplet_oseq_cur+=bag_size
embeddings = sklearn.preprocessing.normalize(embeddings)
self.times[1] += self.time_elapsed()
self.time_reset()
nrof_images_per_class = [1]
for i in range(1, bag_size):
if tag[i][0]==tag[i-1][0]:
nrof_images_per_class[-1]+=1
else:
nrof_images_per_class.append(1)
triplets = self.pick_triplets(embeddings, nrof_images_per_class) # shape=(T,3)
print('found triplets', len(triplets))
ba = 0
while True:
bb = ba+self.per_batch_size//3
if bb>len(triplets):
break
_triplets = triplets[ba:bb]
for i in range(3):
for triplet in _triplets:
_pos = triplet[i]
_idx = tag[_pos][1]
self.seq.append(_idx)
ba = bb
self.times[2] += self.time_elapsed()
def triplet_reset(self):
self.select_triplets()
def hard_mining_reset(self):
#import faiss
from annoy import AnnoyIndex
data = nd.zeros( self.provide_data[0][1] )
label = nd.zeros( self.provide_label[0][1] )
#label = np.zeros( self.provide_label[0][1] )
X = None
ba = 0
batch_num = 0
while ba<len(self.oseq):
batch_num+=1
if batch_num%10==0:
print('loading batch',batch_num, ba)
bb = min(ba+self.batch_size, len(self.oseq))
_count = bb-ba
for i in range(_count):
idx = self.oseq[i+ba]
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
img = self.imdecode(img)
data[i][:] = self.postprocess_data(img)
label[i][:] = header.label
db = mx.io.DataBatch(data=(data,self.data_extra), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
embedding = net_out[0].asnumpy()
nembedding = sklearn.preprocessing.normalize(embedding)
if _count<self.batch_size:
nembedding = nembedding[0:_count,:]
if X is None:
X = np.zeros( (len(self.id2range), nembedding.shape[1]), dtype=np.float32 )
nplabel = label.asnumpy()
for i in range(_count):
ilabel = int(nplabel[i])
#print(ilabel, ilabel.__class__)
X[ilabel] += nembedding[i]
ba = bb
X = sklearn.preprocessing.normalize(X)
d = X.shape[1]
t = AnnoyIndex(d, metric='euclidean')
for i in range(X.shape[0]):
t.add_item(i, X[i])
print('start to build index')
t.build(20)
print(X.shape)
k = self.per_identities
self.seq = []
for i in range(X.shape[0]):
nnlist = t.get_nns_by_item(i, k)
assert nnlist[0]==i
for _label in nnlist:
assert _label<len(self.id2range)
_id = self.header0[0]+_label
v = self.id2range[_id]
_list = range(*v)
if len(_list)<self.images_per_identity:
random.shuffle(_list)
else:
_list = np.random.choice(_list, self.images_per_identity, replace=False)
for i in range(self.images_per_identity):
_idx = _list[i%len(_list)]
self.seq.append(_idx)
#faiss_params = [20,5]
#quantizer = faiss.IndexFlatL2(d) # the other index
#index = faiss.IndexIVFFlat(quantizer, d, faiss_params[0], faiss.METRIC_L2)
#assert not index.is_trained
#index.train(X)
#index.add(X)
#assert index.is_trained
#print('trained')
#index.nprobe = faiss_params[1]
#D, I = index.search(X, k) # actual search
#print(I.shape)
#self.seq = []
#for i in xrange(I.shape[0]):
# #assert I[i][0]==i
# for j in xrange(k):
# _label = I[i][j]
# assert _label<len(self.id2range)
# _id = self.header0[0]+_label
# v = self.id2range[_id]
# _list = range(*v)
# if len(_list)<self.images_per_identity:
# random.shuffle(_list)
# else:
# _list = np.random.choice(_list, self.images_per_identity, replace=False)
# for i in xrange(self.images_per_identity):
# _idx = _list[i%len(_list)]
# self.seq.append(_idx)
def reset_c2c(self):
self.select_triplets()
for identity,v in self.id2range.iteritems():
_list = range(*v)
for idx in _list:
s = imgrec.read_idx(idx)
ocontents.append(s)
embeddings = None
#print(len(ocontents))
ba = 0
while True:
bb = min(ba+args.batch_size, len(ocontents))
if ba>=bb:
break
_batch_size = bb-ba
_batch_size2 = max(_batch_size, args.ctx_num)
data = nd.zeros( (_batch_size2,3, image_size[0], image_size[1]) )
label = nd.zeros( (_batch_size2,) )
count = bb-ba
ii=0
for i in range(ba, bb):
header, img = mx.recordio.unpack(ocontents[i])
img = mx.image.imdecode(img)
img = nd.transpose(img, axes=(2, 0, 1))
data[ii][:] = img
label[ii][:] = header.label
ii+=1
while ii<_batch_size2:
data[ii][:] = data[0][:]
label[ii][:] = label[0][:]
ii+=1
db = mx.io.DataBatch(data=(data,), label=(label,))
self.mx_model.forward(db, is_train=False)
net_out = self.mx_model.get_outputs()
net_out = net_out[0].asnumpy()
model.forward(db, is_train=False)
net_out = model.get_outputs()
net_out = net_out[0].asnumpy()
if embeddings is None:
embeddings = np.zeros( (len(ocontents), net_out.shape[1]))
embeddings[ba:bb,:] = net_out[0:_batch_size,:]
ba = bb
embeddings = sklearn.preprocessing.normalize(embeddings)
embedding = np.mean(embeddings, axis=0, keepdims=True)
embedding = sklearn.preprocessing.normalize(embedding)
sims = np.dot(embeddings, embedding).flatten()
assert len(sims)==len(_list)
for i in range(len(_list)):
_idx = _list[i]
self.idx2cos[_idx] = sims[i]
def reset(self):
"""Resets the iterator to the beginning of the data."""
print('call reset()')
if self.c2c_auto:
self.reset_c2c()
self.cur = 0
if self.images_per_identity>0:
if self.triplet_mode:
self.triplet_reset()
elif not self.hard_mining:
self.seq = []
idlist = []
for _id,v in self.id2range.iteritems():
idlist.append((_id,range(*v)))
for r in range(self.repeat):
if r%10==0:
print('repeat', r)
if self.shuffle:
random.shuffle(idlist)
for item in idlist:
_id = item[0]
_list = item[1]
#random.shuffle(_list)
if len(_list)<self.images_per_identity:
random.shuffle(_list)
else:
_list = np.random.choice(_list, self.images_per_identity, replace=False)
for i in range(self.images_per_identity):
_idx = _list[i%len(_list)]
self.seq.append(_idx)
else:
self.hard_mining_reset()
print('seq len', len(self.seq))
else:
if self.shuffle:
random.shuffle(list(self.seq))
if self.seq is None and self.imgrec is not None:
self.imgrec.reset()
def num_samples(self):
return len(self.seq)
def next_sample(self):
"""Helper function for reading in next sample."""
#set total batch size, for example, 1800, and maximum size for each people, for example 45
if self.seq is not None:
while True:
if self.cur >= len(self.seq):
raise StopIteration
idx = self.seq[self.cur]
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
label = header.label
if self.output_c2c:
count = self.idx2flag[idx]
if self.output_c2c==1:
v = np.random.uniform(0.4, 0.5)
elif self.output_c2c==2:
v = np.random.uniform(0.4, 0.5)
if count>=self.output_c2c:
v = np.random.uniform(0.3, 0.4)
elif self.output_c2c==3:
v = (9.5 - math.log(2.0+count))/10.0
v = min(max(v, 0.3), 0.5)
elif self.output_c2c==4:
mu = 0.0
sigma = 0.1
mrange = [0.4,0.5]
v = numpy.random.normal(mu, sigma)
v = math.abs(v)*-1.0+mrange[1]
v = max(v, mrange[0])
elif self.output_c2c==5:
v = np.random.uniform(0.41, 0.51)
if count>=175:
v = np.random.uniform(0.37, 0.47)
elif self.output_c2c==6:
v = np.random.uniform(0.41, 0.51)
if count>=175:
v = np.random.uniform(0.38, 0.48)
else:
assert False
label = [label, v]
else:
if not isinstance(label, numbers.Number):
label = label[0]
return label, img, None, None
else:
label, fname, bbox, landmark = self.imglist[idx]
return label, self.read_image(fname), bbox, landmark
else:
s = self.imgrec.read()
if s is None:
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img, None, None
def brightness_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
src *= alpha
return src
def contrast_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
src *= alpha
src += gray
return src
def saturation_aug(self, src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = np.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
def color_aug(self, img, x):
augs = [self.brightness_aug, self.contrast_aug, self.saturation_aug]
random.shuffle(augs)
for aug in augs:
#print(img.shape)
img = aug(img, x)
#print(img.shape)
return img
def mirror_aug(self, img):
_rd = random.randint(0,1)
if _rd==1:
for c in range(img.shape[2]):
img[:,:,c] = np.fliplr(img[:,:,c])
return img
def next(self):
if not self.is_init:
self.reset()
self.is_init = True
"""Returns the next batch of data."""
#print('in next', self.cur, self.labelcur)
self.nbatch+=1
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.empty((batch_size, c, h, w))
if self.provide_label is not None:
batch_label = nd.empty(self.provide_label[0][1])
i = 0
try:
while i < batch_size:
label, s, bbox, landmark = self.next_sample()
_data = self.imdecode(s)
if self.rand_mirror:
_rd = random.randint(0,1)
if _rd==1:
_data = mx.ndarray.flip(data=_data, axis=1)
if self.nd_mean is not None:
_data = _data.astype('float32')
_data -= self.nd_mean
_data *= 0.0078125
if self.cutoff>0:
centerh = random.randint(0, _data.shape[0]-1)
centerw = random.randint(0, _data.shape[1]-1)
half = self.cutoff//2
starth = max(0, centerh-half)
endh = min(_data.shape[0], centerh+half)
startw = max(0, centerw-half)
endw = min(_data.shape[1], centerw+half)
_data = _data.astype('float32')
#print(starth, endh, startw, endw, _data.shape)
_data[starth:endh, startw:endw, :] = 127.5
#_npdata = _data.asnumpy()
#if landmark is not None:
# _npdata = face_preprocess.preprocess(_npdata, bbox = bbox, landmark=landmark, image_size=self.image_size)
#if self.rand_mirror:
# _npdata = self.mirror_aug(_npdata)
#if self.mean is not None:
# _npdata = _npdata.astype(np.float32)
# _npdata -= self.mean
# _npdata *= 0.0078125
#nimg = np.zeros(_npdata.shape, dtype=np.float32)
#nimg[self.patch[1]:self.patch[3],self.patch[0]:self.patch[2],:] = _npdata[self.patch[1]:self.patch[3], self.patch[0]:self.patch[2], :]
#_data = mx.nd.array(nimg)
data = [_data]
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
#print('aa',data[0].shape)
data = self.augmentation_transform(data)
#print('bb',data[0].shape)
for datum in data:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
#print(datum.shape)
batch_data[i][:] = self.postprocess_data(datum)
if self.provide_label is not None:
if not self.coco_mode:
if len(batch_label.shape)==1:
batch_label[i][:] = label
else:
for ll in range(batch_label.shape[1]):
v = label[ll]
if ll>0:
#c2c = v
#_param = [0.5, 0.4, 0.85, 0.75]
#_a = (_param[1]-_param[0])/(_param[3]-_param[2])
#m = _param[1]+_a*(c2c-_param[3])
#m = min(_param[0], max(_param[1],m))
#v = math.cos(m)
#v = v*v
m = v
v = math.cos(m)
v = v*v
#print('m', i,m,v)
batch_label[i][ll] = v
else:
batch_label[i][:] = (i%self.per_batch_size)//self.images_per_identity
i += 1
except StopIteration:
if i<batch_size:
raise StopIteration
#print('next end', batch_size, i)
_label = None
if self.provide_label is not None:
_label = [batch_label]
if self.data_extra is not None:
return io.DataBatch([batch_data, self.data_extra], _label, batch_size - i)
else:
return io.DataBatch([batch_data], _label, batch_size - i)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
img = mx.image.imdecode(s) #mx.ndarray
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = [ret for src in data for ret in aug(src)]
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
class FaceImageIterList(io.DataIter):
def __init__(self, iter_list):
assert len(iter_list)>0
self.provide_data = iter_list[0].provide_data
self.provide_label = iter_list[0].provide_label
self.iter_list = iter_list
self.cur_iter = None
def reset(self):
self.cur_iter.reset()
def next(self):
self.cur_iter = random.choice(self.iter_list)
while True:
try:
ret = self.cur_iter.next()
except StopIteration:
self.cur_iter.reset()
continue
return ret
|
test_channel.py
|
#!/usr/bin/env python
#
# Server that will accept connections from a Vim channel.
# Used by test_channel.vim.
#
# This requires Python 2.6 or later.
from __future__ import print_function
import json
import socket
import sys
import time
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def setup(self):
self.request.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
def handle(self):
print("=== socket opened ===")
while True:
try:
received = self.request.recv(4096).decode('utf-8')
except socket.error:
print("=== socket error ===")
break
except IOError:
print("=== socket closed ===")
break
if received == '':
print("=== socket closed ===")
break
print("received: {0}".format(received))
# We may receive two messages at once. Take the part up to the
# newline, which should be after the matching "]".
todo = received
while todo != '':
splitidx = todo.find('\n')
if splitidx < 0:
used = todo
todo = ''
else:
used = todo[:splitidx]
todo = todo[splitidx + 1:]
if used != received:
print("using: {0}".format(used))
try:
decoded = json.loads(used)
except ValueError:
print("json decoding failed")
decoded = [-1, '']
# Send a response if the sequence number is positive.
if decoded[0] >= 0:
if decoded[1] == 'hello!':
# simply send back a string
response = "got it"
elif decoded[1] == 'malformed1':
cmd = '["ex",":"]wrong!["ex","smi"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise it
# sometimes fails on OS X.
time.sleep(0.2)
elif decoded[1] == 'malformed2':
cmd = '"unterminated string'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise the double
# quote in the "ok" response terminates the string.
time.sleep(0.2)
elif decoded[1] == 'malformed3':
cmd = '["ex","missing ]"'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise the ]
# in the "ok" response terminates the list.
time.sleep(0.2)
elif decoded[1] == 'split':
cmd = '["ex","let '
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
time.sleep(0.01)
cmd = 'g:split = 123"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1].startswith("echo "):
# send back the argument
response = decoded[1][5:]
elif decoded[1] == 'make change':
# Send two ex commands at the same time, before
# replying to the request.
cmd = '["ex","call append(\\"$\\",\\"added1\\")"]'
cmd += '["ex","call append(\\"$\\",\\"added2\\")"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'bad command':
cmd = '["ex","foo bar"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'do normal':
# Send a normal command.
cmd = '["normal","G$s more\u001b"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-works':
# Send an eval request. We ignore the response.
cmd = '["expr","\\"foo\\" . 123", -1]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-special':
# Send an eval request. We ignore the response.
cmd = '["expr","\\"foo\x7f\x10\x01bar\\"", -2]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-getline':
# Send an eval request. We ignore the response.
cmd = '["expr","getline(3)", -3]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-fails':
# Send an eval request that will fail.
cmd = '["expr","xxx", -4]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-error':
# Send an eval request that works but the result can't
# be encoded.
cmd = '["expr","function(\\"tr\\")", -5]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-bad':
# Send an eval request missing the third argument.
cmd = '["expr","xxx"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'an expr':
# Send an expr request.
cmd = '["expr","setline(\\"$\\", [\\"one\\",\\"two\\",\\"three\\"])"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'call-func':
cmd = '["call","MyFunction",[1,2,3], 0]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'redraw':
cmd = '["redraw",""]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'redraw!':
cmd = '["redraw","force"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'empty-request':
cmd = '[]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-result':
# Send back the last received eval result.
response = last_eval
elif decoded[1] == 'call me':
cmd = '[0,"we called you"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'call me again':
cmd = '[0,"we did call you"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = ""
elif decoded[1] == 'send zero':
cmd = '[0,"zero index"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "sent zero"
elif decoded[1] == 'close me':
print("closing")
self.request.close()
response = ""
elif decoded[1] == 'wait a bit':
time.sleep(0.2)
response = "waited"
elif decoded[1] == '!quit!':
# we're done
self.server.shutdown()
return
elif decoded[1] == '!crash!':
# Crash!
42 / 0
else:
response = "what?"
if response == "":
print("no response")
else:
encoded = json.dumps([decoded[0], response])
print("sending: {0}".format(encoded))
self.request.sendall(encoded.encode('utf-8'))
# Negative numbers are used for "eval" responses.
elif decoded[0] < 0:
last_eval = decoded
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def writePortInFile(port):
# Write the port number in Xportnr, so that the test knows it.
f = open("Xportnr", "w")
f.write("{0}".format(port))
f.close()
def main(host, port, server_class=ThreadedTCPServer):
# Wait half a second before opening the port to test waittime in ch_open().
# We do want to get the port number, get that first. We cannot open the
# socket, guess a port is free.
if len(sys.argv) >= 2 and sys.argv[1] == 'delay':
port = 13684
writePortInFile(port)
print("Wait for it...")
time.sleep(0.5)
server = server_class((host, port), ThreadedTCPRequestHandler)
ip, port = server.server_address[0:2]
# Start a thread with the server. That thread will then start a new thread
# for each connection.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
writePortInFile(port)
print("Listening on port {0}".format(port))
# Main thread terminates, but the server continues running
# until server.shutdown() is called.
try:
while server_thread.is_alive():
server_thread.join(1)
except (KeyboardInterrupt, SystemExit):
server.shutdown()
if __name__ == "__main__":
main("localhost", 0)
|
multiProcess.py
|
from scripts import camShow as cvShow, utilities as init, tesseract as runner
from multiprocessing import process
cvShow.defaultCam = init.getCamera()
lang = init.pickLanguage()
tesseractProcess = process.Process(target=runner.main(lang, cvShow.defaultCam))
tesseractProcess.daemon = True
cameraProcess = process.Process(target=cvShow.show())
cameraProcess.daemon = True
if __name__ == '__main__':
cameraProcess.start()
tesseractProcess.start()
|
main.py
|
import discord
import os
import random
import requests
import sys
import threading
import time
import yaml
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return "Your Bot Is Ready"
sys.path.append("./objection_engine")
from deletion import Deletion
from discord.ext import commands, tasks
from message import Message
from objection_engine.beans.comment import Comment
from objection_engine.renderer import render_comment_list
from render import Render, State
from typing import List
# Global Variables:
renderQueue = []
deletionQueue = []
intents = discord.Intents.default()
intents.members = True
def loadConfig():
try:
with open("config.yaml") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
global token, prefix, deletionDelay
token = config["token"].strip()
if not token:
raise Exception("The 'token' field is missing in the config file (config.yaml)!")
prefix = config["prefix"].strip()
if not prefix:
raise Exception("The 'prefix' field is missing in the config file (config.yaml)!")
deletionDelay = config["deletionDelay"].strip()
if not deletionDelay:
raise Exception("The 'deletionDelay' field is missing in the config file (config.yaml)!")
return True
except KeyError as keyErrorException:
print(f"The mapping key {keyErrorException} is missing in the config file (config.yaml)!")
except Exception as exception:
print(exception)
return False
if not loadConfig():
exit()
courtBot = commands.AutoShardedBot(command_prefix=prefix, Intents=intents)
# Default 'help' command is removed, we will make our own
courtBot.remove_command("help")
currentActivityText = f"{prefix}help"
async def changeActivity(newActivityText):
try:
global currentActivityText
if currentActivityText == newActivityText:
return
else:
newActivity = discord.Game(newActivityText)
await courtBot.change_presence(activity=newActivity)
currentActivityText = newActivityText
print(f"Activity was changed to {currentActivityText}")
except Exception as exception:
print(f"Error: {exception}")
def addToDeletionQueue(message: discord.Message):
# Only if deletion delay is grater than 0, add it to the deletionQueue.
if int(deletionDelay) > 0:
newDeletion = Deletion(message, int(deletionDelay))
deletionQueue.append(newDeletion)
@courtBot.event
async def on_message(message):
if message.author is courtBot.user or message.author.bot:
return
if message.channel.type is discord.ChannelType.private:
embedResponse = discord.Embed(description="I won't process any messages via PM.\nIf you have any problems, please go to [the support server](https://discord.gg/pcS4MPbRDU).", color=0xff0000)
await message.channel.send(embed=embedResponse)
return
await courtBot.process_commands(message)
@courtBot.command()
async def help(context):
dummyAmount = random.randint(2, 20)
helpEmbed = discord.Embed(description="Discord bot that turns message chains into ace attorney scenes (~~also handy for server arguments~~).", color=0x3366CC, footer="Do not include these symbols (\"<\" and \">\") when using this command")
helpEmbed.set_author(name=courtBot.user.name, icon_url=courtBot.user.avatar_url)
helpEmbed.add_field(name="How to use?", value=f" {prefix}render <number_of_messages>", inline=False)
helpEmbed.add_field(name="Example", value=f"Turn the last {dummyAmount} messages into an ace attorney scene: {prefix}render {dummyAmount} ", inline=False)
helpEmbed.add_field(name="Starting message", value="By default, the bot will **load the specified number of messages from the last message** (before using the command) going backwards, **if you want the message count to start from another message, reply to it when using the command**.", inline=False)
helpMessage = await context.send(embed=helpEmbed)
addToDeletionQueue(helpMessage)
# This command is only for the bot owner, it will ignore everybody else
@courtBot.command()
@commands.is_owner()
async def queue(context):
filename = "queue.txt"
with open(filename, 'w', encoding="utf-8") as queue:
global renderQueue
renderQueueSize = len(renderQueue)
queue.write(f"There are {renderQueueSize} item(s) in the queue!\n")
for positionInQueue, render in enumerate(iterable=renderQueue):
queue.write(f"\n#{positionInQueue:04}\n")
try: queue.write(f"Requested by: {render.getContext().author.name}#{render.getContext().author.discriminator}\n")
except: pass
try: queue.write(f"Number of messages: {len(render.getMessages())}\n")
except: pass
try: queue.write(f"Guild: {render.getFeedbackMessage().channel.guild.name}\n")
except: pass
try: queue.write(f"Channel: #{render.getFeedbackMessage().channel.name}\n")
except: pass
try: queue.write(f"State: {render.getStateString()}\n")
except: pass
await context.send(file=discord.File(filename))
clean([], filename)
@courtBot.command()
async def render(context, numberOfMessages: int):
global renderQueue
feedbackMessage = await context.send(content=" Fetching messages... ")
try:
if not (numberOfMessages in range(1, 21)):
raise Exception("Currently, to ease queue times, the bot can only render a maximum of 20 messages. We're looking to increase that number soon enough.")
# baseMessage is the message from which the specified number of messages will be fetch, not including itself
baseMessage = context.message.reference.resolved if context.message.reference else context.message
courtMessages = []
discordMessages = []
# If the render command was executed within a reply (baseMessage and context.Message aren't the same), we want
# to append the message the user replied to (baseMessage) to the 'discordMessages' list and substract 1 from
# 'numberOfMessages' that way we are taking the added baseMessage into consideration and avoid getting 1 extra message)
if not baseMessage.id == context.message.id:
numberOfMessages = numberOfMessages - 1
discordMessages.append(baseMessage)
# This will append all messages to the already existing discordMessages, if the message was a reply it should already
# include one message (the one it was replying to), if not: it will be empty at this point.
discordMessages += await context.channel.history(limit=numberOfMessages, oldest_first=False, before=baseMessage).flatten()
for discordMessage in discordMessages:
message = Message(discordMessage)
if message.text.strip():
courtMessages.insert(0, message.to_Comment())
if len(courtMessages) < 1:
raise Exception("There should be at least one person in the conversation.")
newRender = Render(State.QUEUED, context, feedbackMessage, courtMessages)
renderQueue.append(newRender)
except Exception as exception:
exceptionEmbed = discord.Embed(description=exception, color=0xff0000)
await feedbackMessage.edit(content="", embed=exceptionEmbed)
addToDeletionQueue(feedbackMessage)
@tasks.loop(seconds=1)
async def deletionQueueLoop():
global deletionQueue
deletionQueueSize = len(deletionQueue)
# Delete message and remove from queue if remaining time is less than (or equal to) 0
if deletionQueueSize > 0:
for index in reversed(range(deletionQueueSize)):
if await deletionQueue[index].update():
deletionQueue.pop(index)
@tasks.loop(seconds=5)
async def renderQueueLoop():
global renderQueue
renderQueueSize = len(renderQueue)
await changeActivity(f"{prefix}help | queue: {renderQueueSize}")
for positionInQueue, render in enumerate(iterable=renderQueue, start=1):
try:
if render.getState() == State.QUEUED:
newFeedback = f"""
Fetching messages... :white_check_mark:!
Position in the queue: #{(positionInQueue)}
"""
await render.updateFeedback(newFeedback)
if render.getState() == State.INPROGRESS:
newFeedback = f"""
Fetching messages... :white_check_mark:!
Your video is being generated...
"""
await render.updateFeedback(newFeedback)
if render.getState() == State.FAILED:
newFeedback = f"""
Fetching messages... :white_check_mark:!
Your video is being generated... :x:!
"""
await render.updateFeedback(newFeedback)
render.setState(State.DONE)
if render.getState() == State.RENDERED:
newFeedback = f"""
Fetching messages... :white_check_mark:!
Your video is being generated... :white_check_mark:!
Uploading file to Discord...
"""
await render.updateFeedback(newFeedback)
render.setState(State.UPLOADING)
# If the file size is lower than the maximun file size allowed in this guild, upload it to Discord
fileSize = os.path.getsize(render.getOutputFilename())
if fileSize < render.getContext().channel.guild.filesize_limit:
await render.getContext().send(content=render.getContext().author.mention, file=discord.File(render.getOutputFilename()))
render.setState(State.DONE)
newFeedback = f"""
Fetching messages... :white_check_mark:!
Your video is being generated... :white_check_mark:!
Uploading file to Discord... :white_check_mark:!
"""
await render.updateFeedback(newFeedback)
else:
try:
newFeedback = f"""
Fetching messages... :white_check_mark:!
Your video is being generated... :white_check_mark:!
Video file too big for you server! {round(fileSize/1000000, 2)} MB
Trying to upload file to an external server...
"""
await render.updateFeedback(newFeedback)
with open(render.getOutputFilename(), 'rb') as videoFile:
files = {'files[]': (render.getOutputFilename(), videoFile)}
response = requests.post('https://uguu.se/upload.php?output=text', files=files).content.decode("utf-8").strip()
newFeedback = f"""
Fetching messages... :white_check_mark:!
Your video is being generated... :white_check_mark:!
Video file too big for you server! {round(fileSize/1000000, 2)} MB
Trying to upload file to an external server... :white_check_mark:!
"""
await render.updateFeedback(newFeedback)
await render.getContext().send(content=f"{render.getContext().author.mention}\n{response}\n_This video will be deleted in 48 hours_")
render.setState(State.DONE)
except Exception as exception:
newFeedback = f"""
Fetching messages... :white_check_mark:!
Your video is being generated... :white_check_mark:!
Video file too big for you server! {round(fileSize/1000000, 2)} MB
Trying to upload file to an external server... :x:!
"""
await render.updateFeedback(newFeedback)
exceptionEmbed = discord.Embed(description=exception, color=0xff0000)
exceptionMessage = await render.getContext().send(embed=exceptionEmbed)
addToDeletionQueue(exceptionMessage)
render.setState(State.DONE)
except Exception as exception:
print(f"Error: {exception}")
try:
render.setState(State.DONE)
except:
pass
finally:
if render.getState() == State.DONE:
clean(render.getMessages(), render.getOutputFilename())
addToDeletionQueue(render.getFeedbackMessage())
# Remove from queue if state is DONE
if renderQueueSize > 0:
for index in reversed(range(renderQueueSize)):
if renderQueue[index].getState() == State.DONE:
renderQueue.pop(index)
@courtBot.event
async def on_ready():
global currentActivityText
print("Bot is ready!")
print(f"Logged in as {courtBot.user.name}#{courtBot.user.discriminator} ({courtBot.user.id})")
currentActivityText = f"{prefix}help"
renderQueueLoop.start()
deletionQueueLoop.start()
def clean(thread: List[Comment], filename):
try:
os.remove(filename)
except Exception as exception:
print(f"Error: {exception}")
try:
for comment in thread:
if (comment.evidence_path is not None):
os.remove(comment.evidente_path)
except Exception as exception:
print(f"Error: {exception}")
def renderThread():
global renderQueue
while True:
time.sleep(2)
try:
for render in renderQueue:
if render.getState() == State.QUEUED:
render.setState(State.INPROGRESS)
try:
render_comment_list(render.getMessages(), render.getOutputFilename())
render.setState(State.RENDERED)
except Exception as exception:
print(f"Error: {exception}")
render.setState(State.FAILED)
finally:
break
except Exception as exception:
print(f"Error: {exception}")
backgroundThread = threading.Thread(target=renderThread, name="RenderThread")
backgroundThread.start()
courtBot.run(token)
backgroundThread.join()
def run():
app.run(host="0.0.0.0", port=8000)
def keep_alive():
server = Thread(target=run)
server.start()
run()
keep_alive()
|
receiverPubSubMultiprocessing_T1.py
|
"""pub_sub_receive.py -- receive OpenCV stream using PUB SUB."""
from parameters import ParticipantData
from parameters import Parameters
from parameters import OutsourceContract
from parameters import Helperfunctions
import json
from merkletools import MerkleTools
import sys
import videoStramSubscriber as vss
from nacl.signing import SigningKey
from nacl.signing import VerifyKey
import time
import imagezmq
import Responder as re
from utilities.stats import MovingAverage
import numpy as np
import cv2
from PIL import Image
# from absl.flags import FLAGS
# from absl import app, flags, logging
import os
import multiprocessing as mp
import cv2
# comment out below line to enable tensorflow outputs
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def inference(preprocess_queue, inference_queue):
import tensorflow as tf
import core.utils as utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.compat.v1 import ConfigProto
from core.functions import count_objects, crop_objects
from core.config import cfg
from core.utils import read_class_names
import os
import random
from core.yolov4 import filter_boxes
tf.keras.backend.clear_session()
input_size = Parameters.input_size
model = OutsourceContract.model
framework = Parameters.framework
tiny = OutsourceContract.tiny
weights = Parameters.weights
iou = Parameters.iou
score = Parameters.score
physical_devices = tf.config.experimental.list_physical_devices('GPU')
try:
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
except:
pass
# configure gpu usage
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
# load model
if framework == 'tflite':
interpreter = tf.lite.Interpreter(model_path=weights)
else:
saved_model_loaded = tf.saved_model.load(
weights, tags=[tag_constants.SERVING])
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
count = Parameters.count
info = Parameters.info
crop = Parameters.crop
while True:
if not preprocess_queue.empty():
queueData = preprocess_queue.get()
while not preprocess_queue.empty():
queueData = preprocess_queue.get()
#preprocess_queue.task_done()
images_data = queueData[0]
name = queueData[1]
original_image = queueData[2]
#preprocess_queue.task_done()
if framework == 'tflite':
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]['index'], images_data)
interpreter.invoke()
pred = [interpreter.get_tensor(
output_details[i]['index']) for i in range(len(output_details))]
if model == 'yolov3' and tiny == True:
boxes, pred_conf = filter_boxes(
pred[1], pred[0], score_threshold=0.25, input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(
pred[0], pred[1], score_threshold=0.25, input_shape=tf.constant([input_size, input_size]))
else:
infer = saved_model_loaded.signatures['serving_default']
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections=tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=iou,
score_threshold=score
) # 1.2ms
# format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, xmax, ymax
original_h, original_w, _=original_image.shape
bboxes=utils.format_boxes(
boxes.numpy()[0], original_h, original_w) # 1ms #-> no tf needed
# hold all detection data in one variable
pred_bbox=[bboxes, scores.numpy()[0], classes.numpy()[0],
valid_detections.numpy()[0]]
# by default allow all classes in .names file
allowed_classes=list(class_names.values())
# custom allowed classes (uncomment line below to allow detections for only people)
# allowed_classes = ['person']
# if crop flag is enabled, crop each detection and save it as new image
if crop:
crop_path=os.path.join(
os.getcwd(), 'detections', 'crop', image_name)
try:
os.mkdir(crop_path)
except FileExistsError:
pass
crop_objects(cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB),
pred_bbox, crop_path, allowed_classes)
if count:
# count objects found
counted_classes=count_objects(
pred_bbox, by_class=False, allowed_classes=allowed_classes)
# loop through dict and print
for key, value in counted_classes.items():
print("Number of {}s: {}".format(key, value))
boxtext, image=utils.draw_bbox(
original_image, pred_bbox, info, counted_classes, allowed_classes=allowed_classes)
else:
boxtext, image=utils.draw_bbox(
original_image, pred_bbox, info, allowed_classes=allowed_classes) # 0.5ms
image=Image.fromarray(image.astype(np.uint8)) # 0.3ms
inference_queue.put((boxtext, image, name))
def preprocessing(preprocess_queue):
vk = VerifyKey(OutsourceContract.public_key_outsourcer)
input_size = Parameters.input_size
merkle_tree_interval = OutsourceContract.merkle_tree_interval
hostname = Parameters.ip_outsourcer # Use to receive from other computer
port = Parameters.port_outsourcer
minimum_receive_rate_from_contractor = Parameters.minimum_receive_rate_from_contractor
contractHash = Helperfunctions.hashContract().encode('latin1')
# configure video stream receiver
receiver = vss.VideoStreamSubscriber(hostname, port)
print('RPi Stream -> Receiver Initialized')
old_imagecount = -1
while True:
name, compressed = receiver.receive()
if name == 'abort':
sys.exit('Contract aborted by outsourcer according to custom')
if (merkle_tree_interval == 0 and name[-1] != old_imagecount) or (merkle_tree_interval > 0 and name[-5] != old_imagecount):
decompressedImage = cv2.imdecode(
np.frombuffer(compressed, dtype='uint8'), -1)
if merkle_tree_interval == 0:
old_imagecount = name[-1]
try:
vk.verify(bytes(compressed) + contractHash +
bytes(name[-2]) + bytes(name[-1]), bytes(name[:-2]))
except:
sys.exit(
'Contract aborted: Outsourcer signature does not match input. Possible Consquences for Outsourcer: Blacklist, Bad Review')
# print(vrification_result)
if name[-1] < (image_count-2)*minimum_receive_rate_from_contractor:
sys.exit(
'Contract aborted: Outsourcer did not acknowledge enough ouputs. Possible Consquences for Outsourcer: Blacklist, Bad Review')
else:
old_imagecount = name[-5]
# verify if signature matches image, contract hash, and image count, and number of intervals, and random number
try:
vk.verify(bytes(compressed) + contractHash +
bytes(name[-5]) + bytes(name[-4]) + bytes(name[-3]) + bytes(name[-2]) + bytes(name[-1]), bytes(name[:-5]))
except:
sys.exit(
'Contract aborted: Outsourcer signature does not match input. Possible Consquences for Outsourcer: Blacklist, Bad Review')
if name[-4] < (image_count-2)*minimum_receive_rate_from_contractor:
sys.exit(
'Contract aborted: Outsourcer did not acknowledge enough ouputs. Possible Consquences for Outsourcer: Blacklist, Bad Review')
outsorucer_signature = name[:-5]
outsourcer_image_count = name[-5]
outsourcer_number_of_outputs_received = name[-4]
outsourcer_random_number = name[-3]
outsourcer_interval_count = name[-2]
outsourcer_time_to_challenge = bool(name[-1])
# region
original_image = cv2.cvtColor(decompressedImage, cv2.COLOR_BGR2RGB)
image_data = cv2.resize(
original_image, (input_size, input_size)) # 0.4ms
image_data = image_data / 255. # 2.53ms
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32) # 3.15ms
# endregion
preprocess_queue.put((images_data, name, original_image))
# from object_detection.object_detection import Model
# from utilities.render import Render
# from ecdsa import VerifyingKey
# from ecdsa import SigningKey
# Helper class implementing an IO deamon thread
def dummy():
while True:
#print('jo')
a = 0
def main():
# get paramters and contract details
# print(contractHash)
#preprocess_queue = queue.LifoQueue()
#inference_queue = queue.LifoQueue()
preprocess_queue = mp.Queue()
inference_queue = mp.Queue()
# postprocess_queue = Queue()
p1 = mp.Process(target=inference, args= (preprocess_queue, inference_queue))
p2 = mp.Process(target=preprocessing, args=(preprocess_queue,))
#p1 = Process(target=dummy)
#p2 = Process(target=dummy)
# p3 = Process(target=Show_Image_mp, args=(Processed_frames, show, Final_frames))
p1.start()
p2.start()
# p3.start()
sk = SigningKey(Parameters.private_key_contractor)
contractHash = Helperfunctions.hashContract().encode('latin1')
dont_show = Parameters.dont_show
merkle_tree_interval = OutsourceContract.merkle_tree_interval
hostname = Parameters.ip_outsourcer # Use to receive from other computer
port = Parameters.port_outsourcer
sendingPort = Parameters.sendingPort
#import tensorflow as tf
# time.sleep(1.0)
# configure responder
responder=re.Responder(hostname, sendingPort)
# statistics info
moving_average_points=50
# statistics
moving_average_fps=MovingAverage(moving_average_points)
moving_average_receive_time=MovingAverage(moving_average_points)
moving_average_decompress_time=MovingAverage(moving_average_points)
# moving_average_model_load_image_time = MovingAverage(moving_average_points)
moving_average_img_preprocessing_time=MovingAverage(
moving_average_points)
moving_average_model_inference_time=MovingAverage(moving_average_points)
moving_average_img_postprocessing_time=MovingAverage(
moving_average_points)
moving_average_reply_time=MovingAverage(moving_average_points)
moving_average_image_show_time=MovingAverage(moving_average_points)
moving_average_verify_image_sig_time=MovingAverage(moving_average_points)
moving_average_response_signing_time=MovingAverage(moving_average_points)
image_count=0
a=0
b=0
if merkle_tree_interval > 0:
mt=MerkleTools()
mtOld=MerkleTools()
interval_count=0
mtOld_leaf_indices={}
mt_leaf_indices={}
# rendundancy_counter = 0
# rendundancy_counter2 = 0
current_challenge=1
merkle_root=''
# stringsend = ''
last_challenge=0
image_showed_time=time.perf_counter() # init
while True:
# start_time = time.perf_counter()
if not inference_queue.empty():
queueData=inference_queue.get()
while not inference_queue.empty():
queueData=inference_queue.get()
start_time=image_showed_time
# # boxes, scores, classes, valid_detections, name, original_image
#queueData=inference_queue.get()
#inference_queue.task_done()
# boxes=queueData[0]
# scores=queueData[1]
# classes=queueData[2]
# valid_detections=queueData[3]
# name = queueData[4]
# original_image = queueData[5]
boxtext = queueData[0]
image = queueData[1]
name = queueData[2]
if merkle_tree_interval > 0:
outsorucer_signature=name[:-5]
outsourcer_image_count=name[-5]
outsourcer_number_of_outputs_received=name[-4]
outsourcer_random_number=name[-3]
outsourcer_interval_count=name[-2]
outsourcer_time_to_challenge=bool(name[-1])
received_time = time.perf_counter()
image_preprocessing_time=time.perf_counter()
decompressed_time = time.perf_counter()
verify_time = time.perf_counter()
# inference
# region
# endregion
model_inferenced_time=time.perf_counter()
# image postprocessing
# region
h=time.perf_counter()
# endregion
if merkle_tree_interval == 0:
boxtext='Image' + str(name[-2]) + ':;' + boxtext
else:
boxtext='Image' + str(outsourcer_image_count) + ':;' + boxtext
image_postprocessing_time=time.perf_counter()
# sign message ->need to add image_count/interval_count (for merkle tree sig), contract hash to output and verificaton
if merkle_tree_interval == 0:
# sig = sk.sign_deterministic(boxtext.encode('latin1'))
sig=sk.sign(boxtext.encode('latin1') + contractHash).signature
# sig = list(sig)
sig=sig.decode('latin1')
# send reply
responder.respond(boxtext + ';--' + sig)
else:
# print(image_count)
# add leafs dynamiclly to merkle tree
mt.add_leaf(boxtext, True)
# remember indices for challenge
mt_leaf_indices[outsourcer_image_count]=image_count % merkle_tree_interval
# print(image_count % merkle_tree_interval)
response=boxtext
# time to send a new merkle root
# e.g. if inervall = 128 then all respones from 0-127 are added to the merkle tree
if image_count > 1 and (image_count+1) % merkle_tree_interval == 0:
# print(image_count)
a=time.perf_counter()
# rendundancy_counter = 2
mt.make_tree()
merkle_root=mt.get_merkle_root()
sig=sk.sign(merkle_root.encode(
'latin1') + bytes(interval_count) + contractHash).signature # sign merkle root
# resond with merkle root
response += ';--' + str(merkle_root) + \
';--' + sig.decode('latin1')
interval_count += 1
mtOld=mt # save old merkle tree for challenge
# mtOld_leaf_indices.clear() # clear old indices
mtOld_leaf_indices.clear()
mtOld_leaf_indices=mt_leaf_indices.copy() # save old indices for challenge
# print(mtOld_leaf_indices)
mt_leaf_indices.clear() # clear for new indices
# mt_leaf_indices = {}
mt=MerkleTools() # construct new merkle tree for next interval
te=time.perf_counter()-a
# print('1', te, image_count)
else:
# if this is true then the outsourcer has not received the merkle root yet -> send again
if interval_count > outsourcer_image_count:
sig=sk.sign(merkle_root.encode(
'latin1') + bytes(interval_count) + contractHash).signature # sign merkle root
response += ';--' + str(merkle_root) + \
';--' + sig.decode('latin1')
# print('2', image_count)
else: # in this case outsourcer has confirmed to have recieved the merkle root
# in this case outsourcer has sent a challenge to meet with the old merkle tree, give outsourcer 3 frames time to confirm challenge received before sending again
if outsourcer_time_to_challenge and image_count - last_challenge > 3:
last_challenge=image_count
if outsourcer_random_number in mtOld_leaf_indices:
# if challenge can be found, send proof back
outsourcer_random_number_index=mtOld_leaf_indices[outsourcer_random_number]
else:
# if challenge index cannot be found return leaf 0
outsourcer_random_number_index=0
# print('proof index not found')
proofs=mtOld.get_proof(
outsourcer_random_number_index)
stringsend=''
for proof in proofs:
stringsend += ';--' # indicate start of proof
stringsend += proof.__str__() # send proof
stringsend += ';--'
# send leaf
stringsend += mtOld.get_leaf(
outsourcer_random_number_index)
stringsend += ';--'
stringsend += mtOld.get_merkle_root() # send root
stringarr=[]
stringarr=stringsend.split(';--')
leaf_node=stringarr[-2]
root_node=stringarr[-1]
proof_string=stringarr[0:-2]
# sign proof and contract details
sig=sk.sign(str(stringarr[1:]).encode(
'latin1') + bytes(interval_count-1) + contractHash).signature
# print(str(stringarr).encode('latin1') + bytes(interval_count-1) + contractHash)
# print(stringarr)
# attach signature
response += ';--' + sig.decode('latin1')
response += stringsend # attach challenge response to response
# print('3', te, image_count)
responder.respond(response)
response_signing_time=time.perf_counter()
# print(response_signing_time- image_postprocessing_time)
replied_time=time.perf_counter()
# display image
if not dont_show:
# image.show()
image=cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
cv2.imshow('raspberrypi', image)
if cv2.waitKey(1) == ord('q'):
responder.respond('abort12345:6')
sys.exit(
'Contract aborted: Contractor ended contract according to custom')
image_showed_time=time.perf_counter()
# statistics
moving_average_fps.add(1 / (image_showed_time - start_time))
moving_average_receive_time.add(received_time - start_time)
moving_average_decompress_time.add(
decompressed_time - received_time)
moving_average_verify_image_sig_time.add(
verify_time - decompressed_time)
moving_average_img_preprocessing_time.add(
image_preprocessing_time - verify_time)
moving_average_model_inference_time.add(
model_inferenced_time - image_preprocessing_time)
moving_average_img_postprocessing_time.add(
image_postprocessing_time - model_inferenced_time)
moving_average_response_signing_time.add(
response_signing_time - image_postprocessing_time) # adjust for merkle root
moving_average_reply_time.add(replied_time - response_signing_time)
moving_average_image_show_time.add(
image_showed_time - replied_time)
total_time=moving_average_receive_time.get_moving_average() \
+ moving_average_decompress_time.get_moving_average() \
+ moving_average_verify_image_sig_time.get_moving_average() \
+ moving_average_img_preprocessing_time.get_moving_average() \
+ moving_average_model_inference_time.get_moving_average() \
+ moving_average_img_postprocessing_time.get_moving_average() \
+ moving_average_response_signing_time.get_moving_average() \
+ moving_average_reply_time.get_moving_average() \
+ moving_average_image_show_time.get_moving_average()
if(image_count == 800):
a=time.perf_counter()
if(image_count == 1200):
a=time.perf_counter() - a
print(a)
# terminal prints
if image_count % 20 == 0:
print(" total: %4.1fms (%4.1ffps) "
" receiving %4.1f (%4.1f%%) "
" decoding %4.1f (%4.1f%%) "
" verifying %4.1f (%4.1f%%) "
" preprocessing %4.1f (%4.1f%%) "
" model inference %4.1f (%4.1f%%) "
" postprocessing %4.1f (%4.1f%%) "
" signing %4.1f (%4.1f%%) "
" replying %4.1f (%4.1f%%) "
" display %4.1f (%4.1f%%) "
% (
1000/moving_average_fps.get_moving_average(),
moving_average_fps.get_moving_average(),
moving_average_receive_time.get_moving_average()*1000,
moving_average_receive_time.get_moving_average() / total_time * 100,
moving_average_decompress_time.get_moving_average()*1000,
moving_average_decompress_time.get_moving_average() / total_time * 100,
moving_average_verify_image_sig_time.get_moving_average()*1000,
moving_average_verify_image_sig_time.get_moving_average() / total_time * 100,
moving_average_img_preprocessing_time.get_moving_average()*1000,
moving_average_img_preprocessing_time.get_moving_average() / total_time * 100,
moving_average_model_inference_time.get_moving_average()*1000,
moving_average_model_inference_time.get_moving_average() / total_time * 100,
moving_average_img_postprocessing_time.get_moving_average()*1000,
moving_average_img_postprocessing_time.get_moving_average() / total_time * 100,
moving_average_response_signing_time.get_moving_average()*1000,
moving_average_response_signing_time.get_moving_average() / total_time * 100,
moving_average_reply_time.get_moving_average() * 1000,
moving_average_reply_time.get_moving_average() / total_time * 100,
moving_average_image_show_time.get_moving_average()*1000,
moving_average_image_show_time.get_moving_average() / total_time * 100,), end='\r')
# counter
image_count += 1
# except (KeyboardInterrupt, SystemExit):
# print('Exit due to keyboard interrupt')
# except Exception as ex:
# print('Python error with no Exception handler:')
# print('Traceback error:', ex)
# traceback.print_exc()
# finally:
# receiver.close()
# sys.exit()
if __name__ == '__main__':
# try:
# app.run(main)
# except SystemExit:
# pass
#app.run(main)
main()
|
api_image_test.py
|
import contextlib
import json
import shutil
import socket
import tarfile
import tempfile
import threading
import pytest
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
import docker
from .base import BaseAPIIntegrationTest, BUSYBOX
class ListImagesTest(BaseAPIIntegrationTest):
def test_images(self):
res1 = self.client.images(all=True)
self.assertIn('Id', res1[0])
res10 = res1[0]
self.assertIn('Created', res10)
self.assertIn('RepoTags', res10)
distinct = []
for img in res1:
if img['Id'] not in distinct:
distinct.append(img['Id'])
self.assertEqual(len(distinct), self.client.info()['Images'])
def test_images_quiet(self):
res1 = self.client.images(quiet=True)
self.assertEqual(type(res1[0]), six.text_type)
class PullImageTest(BaseAPIIntegrationTest):
def test_pull(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
res = self.client.pull('hello-world')
self.tmp_imgs.append('hello-world')
self.assertEqual(type(res), six.text_type)
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
def test_pull_streaming(self):
try:
self.client.remove_image('hello-world')
except docker.errors.APIError:
pass
stream = self.client.pull('hello-world', stream=True, decode=True)
self.tmp_imgs.append('hello-world')
for chunk in stream:
assert isinstance(chunk, dict)
self.assertGreaterEqual(
len(self.client.images('hello-world')), 1
)
img_info = self.client.inspect_image('hello-world')
self.assertIn('Id', img_info)
class CommitTest(BaseAPIIntegrationTest):
def test_commit(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
self.assertIn('Container', img)
self.assertTrue(img['Container'].startswith(id))
self.assertIn('ContainerConfig', img)
self.assertIn('Image', img['ContainerConfig'])
self.assertEqual(BUSYBOX, img['ContainerConfig']['Image'])
busybox_id = self.client.inspect_image(BUSYBOX)['Id']
self.assertIn('Parent', img)
self.assertEqual(img['Parent'], busybox_id)
def test_commit_with_changes(self):
cid = self.client.create_container(BUSYBOX, ['touch', '/test'])
self.tmp_containers.append(cid)
self.client.start(cid)
img_id = self.client.commit(
cid, changes=['EXPOSE 8000', 'CMD ["bash"]']
)
self.tmp_imgs.append(img_id)
img = self.client.inspect_image(img_id)
assert 'Container' in img
assert img['Container'].startswith(cid['Id'])
assert '8000/tcp' in img['Config']['ExposedPorts']
assert img['Config']['Cmd'] == ['bash']
class RemoveImageTest(BaseAPIIntegrationTest):
def test_remove(self):
container = self.client.create_container(BUSYBOX, ['touch', '/test'])
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.commit(id)
self.assertIn('Id', res)
img_id = res['Id']
self.tmp_imgs.append(img_id)
self.client.remove_image(img_id, force=True)
images = self.client.images(all=True)
res = [x for x in images if x['Id'].startswith(img_id)]
self.assertEqual(len(res), 0)
class ImportImageTest(BaseAPIIntegrationTest):
'''Base class for `docker import` test cases.'''
TAR_SIZE = 512 * 1024
def write_dummy_tar_content(self, n_bytes, tar_fd):
def extend_file(f, n_bytes):
f.seek(n_bytes - 1)
f.write(bytearray([65]))
f.seek(0)
tar = tarfile.TarFile(fileobj=tar_fd, mode='w')
with tempfile.NamedTemporaryFile() as f:
extend_file(f, n_bytes)
tarinfo = tar.gettarinfo(name=f.name, arcname='testdata')
tar.addfile(tarinfo, fileobj=f)
tar.close()
@contextlib.contextmanager
def dummy_tar_stream(self, n_bytes):
'''Yields a stream that is valid tar data of size n_bytes.'''
with tempfile.NamedTemporaryFile() as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file
@contextlib.contextmanager
def dummy_tar_file(self, n_bytes):
'''Yields the name of a valid tar file of size n_bytes.'''
with tempfile.NamedTemporaryFile(delete=False) as tar_file:
self.write_dummy_tar_content(n_bytes, tar_file)
tar_file.seek(0)
yield tar_file.name
def test_import_from_bytes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
# The generic import_image() function cannot import in-memory bytes
# data that happens to be represented as a string type, because
# import_image() will try to use it as a filename and usually then
# trigger an exception. So we test the import_image_from_data()
# function instead.
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_file(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
# statuses = self.client.import_image(
# src=tar_filename, repository='test/import-from-file')
statuses = self.client.import_image_from_file(
tar_filename, repository='test/import-from-file')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_from_stream(self):
with self.dummy_tar_stream(n_bytes=self.TAR_SIZE) as tar_stream:
statuses = self.client.import_image(
src=tar_stream, repository='test/import-from-stream')
# statuses = self.client.import_image_from_stream(
# tar_stream, repository='test/import-from-stream')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
def test_import_image_from_data_with_changes(self):
with self.dummy_tar_stream(n_bytes=500) as f:
content = f.read()
statuses = self.client.import_image_from_data(
content, repository='test/import-from-bytes',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
def test_import_image_with_changes(self):
with self.dummy_tar_file(n_bytes=self.TAR_SIZE) as tar_filename:
statuses = self.client.import_image(
src=tar_filename, repository='test/import-from-file',
changes=['USER foobar', 'CMD ["echo"]']
)
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
assert 'error' not in result
img_id = result['status']
self.tmp_imgs.append(img_id)
img_data = self.client.inspect_image(img_id)
assert img_data is not None
assert img_data['Config']['Cmd'] == ['echo']
assert img_data['Config']['User'] == 'foobar'
@contextlib.contextmanager
def temporary_http_file_server(self, stream):
'''Serve data from an IO stream over HTTP.'''
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/x-tar')
self.end_headers()
shutil.copyfileobj(stream, self.wfile)
server = socketserver.TCPServer(('', 0), Handler)
thread = threading.Thread(target=server.serve_forever)
thread.setDaemon(True)
thread.start()
yield 'http://%s:%s' % (socket.gethostname(), server.server_address[1])
server.shutdown()
@pytest.mark.skipif(True, reason="Doesn't work inside a container - FIXME")
def test_import_from_url(self):
# The crappy test HTTP server doesn't handle large files well, so use
# a small file.
tar_size = 10240
with self.dummy_tar_stream(n_bytes=tar_size) as tar_data:
with self.temporary_http_file_server(tar_data) as url:
statuses = self.client.import_image(
src=url, repository='test/import-from-url')
result_text = statuses.splitlines()[-1]
result = json.loads(result_text)
self.assertNotIn('error', result)
self.assertIn('status', result)
img_id = result['status']
self.tmp_imgs.append(img_id)
|
old_startracker.py
|
# startracker.py
# by Umair Khan, from the Portland State Aerospace Society
# based on OpenStarTracker from Andrew Tennenbaum at the University of Buffalo
# openstartracker.org
# Imports - built-ins
import sys
import time
import threading
import glob
import random
import logging
# Imports - external
import numpy as np
import cv2
from pydbus.generic import signal
from pydbus import SystemBus
from gi.repository import GLib
from systemd import journal
# Imports - back-end
import beast
# Set up systemd logger
# modified from https://medium.com/@trstringer/logging-to-systemd-in-python-45150662440a
logger = logging.getLogger("org.OreSat.StarTracker")
journald_handler = journal.JournalHandler()
journald_handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
logger.addHandler(journald_handler)
logger.setLevel(logging.DEBUG)
# Solver
class StarTracker:
# Initialize everything
def __init__(self):
# Prepare constants
self.P_MATCH_THRESH = 0.99
self.YEAR = 1991.25
self.SAMPLE_DIR = None
self.MEDIAN_IMAGE = None
self.S_DB = None
self.SQ_RESULTS = None
self.S_FILTERED = None
self.C_DB = None
# Startup sequence
def startup(self, median_path, config_path, db_path, sample_dir = None):
# Set the sample directory
logger.info("Beginning startup sequence...")
self.SAMPLE_DIR = sample_dir
# Prepare star tracker
self.MEDIAN_IMAGE = cv2.imread(median_path)
logger.info("Loaded median image from {}".format(median_path))
beast.load_config(config_path)
logger.info("Loaded configuration from {}".format(config_path))
self.S_DB = beast.star_db()
self.S_DB.load_catalog(db_path, self.YEAR)
logger.info("Loaded star database from {}".format(db_path))
self.SQ_RESULTS = beast.star_query(self.S_DB)
self.SQ_RESULTS.kdmask_filter_catalog()
self.SQ_RESULTS.kdmask_uniform_density(beast.cvar.REQUIRED_STARS)
self.S_FILTERED = self.SQ_RESULTS.from_kdmask()
logger.info("Filtered stars")
self.C_DB = beast.constellation_db(self.S_FILTERED, 2 + beast.cvar.DB_REDUNDANCY, 0)
logger.info("Startup sequence complete!")
# Capture an image, or pull one from the sample directory
def capture(self):
# Pull from sample directory
if self.SAMPLE_DIR != None:
path = random.choice(glob.glob(self.SAMPLE_DIR + "*"))
return path, cv2.imread(path)
# Capture an image
return None, None
# See if an image is worth attempting to solve
def preprocess(self, img):
# Generate test parameters
height, width, channels = img.shape
total_pixels = height * width
blur_check = int(total_pixels * 0.99996744)
too_many_check = int(total_pixels * 0.99918619)
# Convert and threshold the image
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, threshold = cv2.threshold(img, 80, 255, cv2.THRESH_BINARY)
# Count the number of black pixels in the thresholded image
threshold_black = total_pixels - cv2.countNonZero(threshold)
# Check the test values and return appropriate value
if threshold_black > blur_check:
blur = cv2.Laplacian(img, cv2.CV_64F).var()
if blur != 0 and blur < 5:
return "image too blurry"
else:
return "image contains too few stars"
return 1
elif threshold_black < too_many_check:
return "unsuitable image"
return "good"
# Solution function
def solve(self, orig_img):
# Keep track of solution time
starttime = time.time()
# Create and initialize variables
img_stars = beast.star_db()
match = None
fov_db = None
# Process the image for solving
img = np.clip(orig_img.astype(np.int16) - self.MEDIAN_IMAGE, a_min = 0, a_max = 255).astype(np.uint8)
img_grey = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Remove areas of the image that don't meet our brightness threshold and then extract contours
ret, thresh = cv2.threshold(img_grey, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE, 255, cv2.THRESH_BINARY)
thresh_contours, contours, hierarchy = cv2.findContours(thresh, 1, 2);
# Process the contours
for c in contours:
M = cv2.moments(c)
if M["m00"] > 0:
# this is how the x and y position are defined by cv2
cx = M["m10"] / M["m00"]
cy = M["m01"] / M["m00"]
# see https://alyssaq.github.io/2015/computing-the-axes-or-orientation-of-a-blob/ for how to convert these into eigenvectors/values
u20 = M["m20"] / M["m00"] - cx ** 2
u02 = M["m02"] / M["m00"] - cy ** 2
u11 = M["m11"] / M["m00"] - cx * cy
# The center pixel is used as the approximation of the brightest pixel
img_stars += beast.star(cx - beast.cvar.IMG_X / 2.0, cy - beast.cvar.IMG_Y / 2.0, float(cv2.getRectSubPix(img_grey, (1,1), (cx,cy))[0,0]), -1)
# We only want to use the brightest MAX_FALSE_STARS + REQUIRED_STARS
img_stars_n_brightest = img_stars.copy_n_brightest(beast.cvar.MAX_FALSE_STARS + beast.cvar.REQUIRED_STARS)
img_const_n_brightest = beast.constellation_db(img_stars_n_brightest, beast.cvar.MAX_FALSE_STARS + 2, 1)
lis = beast.db_match(self.C_DB, img_const_n_brightest)
# Generate the match
if lis.p_match > self.P_MATCH_THRESH and lis.winner.size() >= beast.cvar.REQUIRED_STARS:
x = lis.winner.R11
y = lis.winner.R21
z = lis.winner.R31
r = beast.cvar.MAXFOV / 2
self.SQ_RESULTS.kdsearch(x, y, z, r, beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE)
# Estimate density for constellation generation
self.C_DB.results.kdsearch(x, y, z, r,beast.cvar.THRESH_FACTOR * beast.cvar.IMAGE_VARIANCE)
fov_stars = self.SQ_RESULTS.from_kdresults()
fov_db = beast.constellation_db(fov_stars, self.C_DB.results.r_size(), 1)
self.C_DB.results.clear_kdresults()
self.SQ_RESULTS.clear_kdresults()
img_const = beast.constellation_db(img_stars, beast.cvar.MAX_FALSE_STARS + 2, 1)
near = beast.db_match(fov_db, img_const)
if near.p_match > self.P_MATCH_THRESH:
match = near
# Get solution -- for reference:
# - dec - rotation about the y-axis
# - ra - rotation about the z-axis
# - ori - rotation about the camera axis
if match is not None:
match.winner.calc_ori()
dec = match.winner.get_dec()
ra = match.winner.get_ra()
ori = match.winner.get_ori()
else:
dec, ra, ori = 0.0, 0.0, 0.0
# Calculate how long it took to process
runtime = time.time() - starttime
# Return solution
return dec, ra, ori, time
# Camera control
def modify(self, mod_string):
return 0
# Error processing
def error(self, err_string):
# Handle what we can handle, everything else will be ignored
if err_string == "image too blurry":
self.modify("more sharp")
elif err_string == "image contains too few stars":
self.modify("increase gain")
# We always handle successfully
return 0
# Server
class StarTrackerServer:
# XML definition
dbus = """
<node>
<interface name='org.OreSat.StarTracker'>
<signal name='error'>
<arg type='s' />
</signal>
<property name='coor' type='(dddd)' access='read'>
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="true" />
</property>
<property name='filepath' type='s' access='read'>
<annotation name="org.freedesktop.DBus.Property.EmitsChangedSignal" value="true" />
</property>
</interface>
</node>
"""
# Error signal
error = signal()
PropertiesChanged = signal()
# Initialize properties and worker thread
def __init__(self):
# Properties
self.dec = 0.0
self.ra = 0.0
self.ori = 0.0
self.l_solve = 0.0
self.t_solve = 0.0
self.p_solve = ""
self.interface_name = "org.OreSat.StarTracker"
# Set up star tracker solver
self.st = StarTracker()
self.st_thread = threading.Thread(target = self.star_tracker)
self.st_lock = threading.Lock()
self.st_running = True
# Star tracker thread
def star_tracker(self):
# Keep going while we're running
while (self.st_running):
# Capture an image
self.st_lock.acquire()
self.p_solve, img = self.st.capture()
# self.PropertiesChanged(self.interface_name, {"filepath": self.p_solve}, [])
# Check the image
check = self.st.preprocess(img)
if check != "good":
self.st.error(check)
logger.warning(check + "(for {})".format(p_solve))
error(check)
time.sleep(0.5)
continue
# Solve the image
self.dec, self.ra, self.ori, self.l_solve = self.st.solve(img)
# self.PropertiesChanged(self.interface_name, {"coor": self.dec}, []) #TODO need to handle the struct
if self.dec == self.ra == self.ori == 0.0:
self.st.error("bad solve")
logger.error("bad solve (for {})".format(p_solve))
error("bad solve")
time.sleep(0.5)
continue
# Update the solution timestamp
self.t_solve = time.time()
self.st_lock.release()
# Send property signal
self.PropertiesChanged(self.interface_name, {"filepath": self.p_solve}, [])
time.sleep(0.5)
# Start up solver and server
def start(self, median_path, config_path, db_path, sample_dir = None):
# Start up star tracker
self.st.startup(median_path, config_path, db_path, sample_dir = sample_dir)
time.sleep(20)
self.st_thread.start()
logger.info("Started worker thread")
# Start up D-Bus server
bus = SystemBus()
loop = GLib.MainLoop()
bus.publish(self.interface_name, self)
try:
logger.info("Starting D-Bus loop...")
loop.run()
except KeyboardInterrupt as e:
loop.quit()
logger.info("Ended D-Bus loop")
self.end()
# Stop threads in preparation to exit
def end(self):
self.st_running = False
if self.st_thread.is_alive():
self.st_thread.join()
# Coordinates
@property
def coor(self):
self.st_lock.acquire()
dec, ra, ori, t_solve = self.dec, self.ra, self.ori, self.t_solve
self.st_lock.release()
return (dec, ra, ori, t_solve)
# Filepath of last solved image
@property
def filepath(self):
self.st_lock.acquire()
p_solve = self.p_solve
self.st_lock.release()
return p_solve
##########
# Test if run independently
if __name__ == "__main__":
server = StarTrackerServer()
db_root = "/usr/share/oresat-star-tracker/data/"
data_root = db_root + "downsample/"
server.start(data_root + "median_image.png", data_root + "calibration.txt", db_root + "hip_main.dat", sample_dir = data_root + "samples/")
|
demo3.py
|
"""
【多线程】多线程共享全局变量以及锁机制 2019/11/03 23:50
"""
# TODO: 多线程共享全局变量的问题
"""
多线程都是在同一个进程中运行的。因此在进程中的全局变量所有线程都是可共享的。
这就造成了一个问题,因为线程执行的顺序是无序的。有可能会造成数据错误。
"""
# TODO: 锁机制
"""
为了解决以上使用共享全局变量的问题。threading提供了一个Lock类,
这个类可以在某个线程访问某个变量的时候加锁,其他线程此时就不能进来,
直到当前线程处理完后,把锁释放了,其他线程才能进来处理。
加锁: gLock.acquire()
释放锁: gLock.release()
"""
import threading
VALUE = 0
gLock = threading.Lock()
def add_value():
global VALUE
gLock.acquire()
for x in range(100000):
VALUE += 1
gLock.release()
print(VALUE)
def main():
for x in range(2):
t = threading.Thread(target=add_value)
t.start()
if __name__ == '__main__':
main()
|
task_queue.py
|
import sys
import time
import traceback
import threading
class TaskQueue:
# ---------------------------------------- Task -----------------------------------------
class Task:
def __init__(self, name: str):
self.__name = name
def __str__(self):
return 'Task %s [%s]' % (self.name(), self.identity())
def name(self) -> str:
return self.__name
def status(self) -> int:
# TODO: Waiting, Running, Finished, Canceled
pass
def run(self):
pass
def quit(self):
pass
def identity(self) -> str:
pass
# -------------------------------------- Observer ---------------------------------------
class Observer:
def __init__(self):
pass
def on_task_updated(self, task, change: str):
pass
# -------------------------------------- TaskQueue --------------------------------------
def __init__(self):
self.__lock = threading.Lock()
self.__quit_flag = True
self.__observers = []
self.__task_queue = []
self.__task_thread = None
self.__running_task = None
self.__will_task = None
def join(self, timeout: int):
if self.__task_thread is not None:
self.__task_thread.join(timeout)
def quit(self):
self.__lock.acquire()
self.__quit_flag = True
self.__clear_pending_task()
self.__cancel_running_task()
self.__lock.release()
def start(self):
if self.__task_thread is None or self.__task_thread.is_alive():
self.__quit_flag = False
self.__task_thread = threading.Thread(target=self.__task_thread_entry)
self.__task_thread.start()
def is_busy(self) -> bool:
return self.__running_task is not None or len(self.__task_queue) > 0
# -------------------------------- Task Related --------------------------------
def get_tasks(self, name: str or None, identity: str or None) -> [Task]:
task_list = []
self.__lock.acquire()
for task in self.__task_queue:
if (name is None and identity is None) or \
(name is not None and task.name):
task_list.append(task)
if self.__running_task is not None:
if identity is None or self.__running_task.identity() == identity:
task_list.insert(0, self.__running_task)
self.__lock.release()
return task_list
def append_task(self, task: Task, unique: bool = True) -> bool:
print('Task queue -> append : ' + str(task))
self.__lock.acquire()
if unique and (task.identity() is not None and
len(self.__find_adapt_tasks(None, task.identity())) > 0):
self.__lock.release()
print('Task queue -> found duplicate, drop.')
return False
self.__task_queue.append(task)
self.__lock.release()
self.notify_task_updated(task, 'append')
return True
def insert_task(self, task: Task, index: int = 0, unique: bool = True):
print('Task queue -> insert : ' + str(task))
self.__lock.acquire()
if unique:
self.__remove_pending_task(task.identity())
self.__check_cancel_running_task(task.identity())
if index >= len(self.__task_queue):
self.__task_queue.append(task)
else:
self.__task_queue.insert(index, task)
self.__lock.release()
self.notify_task_updated(task, 'insert')
def set_will_task(self, task: Task):
self.__will_task = task
def cancel_task(self, identity: str or None):
self.__lock.acquire()
if identity is None:
self.__clear_pending_task()
self.__cancel_running_task()
else:
self.__remove_pending_task(identity)
self.__check_cancel_running_task(identity)
self.__lock.release()
self.notify_task_updated(None, 'canceled')
def cancel_running_task(self):
self.__lock.acquire()
self.__cancel_running_task()
self.__lock.release()
self.notify_task_updated(None, 'canceled')
def find_matching_tasks(self, name: str or None, identity: str or None) -> [Task]:
self.__lock.acquire()
tasks = self.__find_adapt_tasks(name, identity)
self.__lock.release()
return tasks
# ------------------------------------ Observer -------------------------------------
def add_observer(self, ob: Observer):
if ob not in self.__observers:
self.__observers.append(ob)
def notify_task_updated(self, task: Task, action: str):
for ob in self.__observers:
ob.on_task_updated(task, action)
# ------------------------------------- private --------------------------------------
def __adapted_task(self, task: Task, name: str or None, identity: str or None) -> Task or None:
adapt = True
if task is not None:
if name is not None and name != '':
adapt = (adapt and (task.name() == name))
if identity is not None and identity != '':
adapt = (adapt and (task.identity() == identity))
return task if adapt else None
def __find_adapt_tasks(self, name: str or None, identity: str or None) -> [Task]:
tasks = []
for task in self.__task_queue:
if self.__adapted_task(task, name, identity):
tasks.append(task)
if self.__adapted_task(self.__running_task, name, identity):
tasks.append(self.__running_task)
return tasks
def __remove_pending_task(self, identity):
if identity is None:
return
task_queue = self.__task_queue.copy()
for task in task_queue:
if task.identity() == identity:
self.__task_queue.remove(task)
def __clear_pending_task(self):
self.__task_queue.clear()
def __check_cancel_running_task(self, identity: str or None):
if identity is None or \
(self.__running_task is not None and
self.__running_task.identity() == identity):
self.__cancel_running_task()
def __cancel_running_task(self):
if self.__running_task is not None:
self.__running_task.quit()
# ----------------------------------- Thread Entry -----------------------------------
def __task_thread_entry(self):
quit_thread = False
while not quit_thread:
self.__lock.acquire()
if self.__quit_flag:
quit_thread = True
task = self.__will_task
else:
task = self.__task_queue.pop(0) if len(self.__task_queue) > 0 else None
self.__running_task = task
self.__lock.release()
clock = time.time()
if task is not None:
try:
print('Task queue -> start: ' + str(task))
self.notify_task_updated(task, 'started')
task.run()
except Exception as e:
print('Task queue -> ' + str(task) + ' got exception:')
print(e)
finally:
print('Task queue -> finish: %s, time spending: %.2f ms' %
(str(task), (time.time() - clock) * 1000))
self.notify_task_updated(task, 'finished')
self.__lock.acquire()
self.__running_task = None
self.__lock.release()
else:
time.sleep(0.1)
# ----------------------------------------------------------------------------------------------------------------------
class TestTask(TaskQueue.Task):
LOG_START = []
LOG_FINISH = []
@staticmethod
def reset():
TestTask.LOG_START.clear()
TestTask.LOG_FINISH.clear()
def __init__(self, name: str, _id: str, delay: int):
super(TestTask, self).__init__(name)
self.__id = _id
self.__delay = delay
self.__quit_flag = False
def run(self):
TestTask.LOG_START.append(self.__id)
print('Task %s started' % self.__id)
slept = 0
while not self.__quit_flag and slept < self.__delay:
time.sleep(0.1)
slept += 0.1
TestTask.LOG_FINISH.append(self.__id)
print('Task %s finished' % self.__id)
def quit(self):
self.__quit_flag = True
def identity(self) -> str:
return self.__id
def test_basic_feature():
task_queue = TaskQueue()
task_queue.start()
TestTask.reset()
task_a = TestTask('TaskA', 'task_a', 30)
task_b = TestTask('TaskB', 'task_b', 4)
task_c = TestTask('TaskB', 'task_c', 3)
task_will = TestTask('TaskWill', 'will', 0)
task_blocking = TestTask('TaskBlocking', 'task_blocking', 999)
task_queue.append_task(task_a)
task_queue.append_task(task_b)
task_queue.append_task(task_c)
task_queue.append_task(task_blocking)
task_queue.set_will_task(task_will)
assert task_queue.is_busy()
time.sleep(1)
assert 'task_a' in TestTask.LOG_START
task_queue.cancel_running_task()
time.sleep(1)
assert 'task_a' in TestTask.LOG_FINISH
assert task_queue.is_busy()
time.sleep(1)
assert 'task_b' in TestTask.LOG_START
task_queue.cancel_task('task_c')
assert 'task_b' not in TestTask.LOG_FINISH
assert task_queue.is_busy()
time.sleep(4)
assert 'task_b' in TestTask.LOG_FINISH
assert 'task_c' not in TestTask.LOG_START
assert task_queue.is_busy()
time.sleep(1)
assert 'task_blocking' in TestTask.LOG_START
assert task_queue.is_busy()
task_queue.quit()
task_queue.join(1)
assert not task_queue.is_busy()
assert 'task_blocking' in TestTask.LOG_FINISH
assert 'will' in TestTask.LOG_START
assert 'will' in TestTask.LOG_FINISH
def test_entry() -> bool:
test_basic_feature()
return True
def main():
test_entry()
# ----------------------------------------------------------------------------------------------------------------------
def exception_hook(type, value, tback):
# log the exception here
print('Exception hook triggered.')
print(type)
print(value)
print(tback)
# then call the default handler
sys.__excepthook__(type, value, tback)
sys.excepthook = exception_hook
if __name__ == "__main__":
try:
main()
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
exit()
finally:
pass
|
main.py
|
#libraraies
from pytube import *
import os
from tkinter import *
from tkinter.filedialog import *
from tkinter.messagebox import *
from threading import *
file_size = 0
q= input("")
if a == "shutdown":
os.system("shutdown -s")
#function progress to keep check of progress of function.
def progress(stream=None, chunk=None, remaining=None):
file_downloaded = (file_size-remaining)
per = round((file_downloaded/file_size)*100, 1)
dBtn.config(text=f'{per}% downloaded')
#function start download to start the download of files
def startDownload():
global file_size
try:
URL = urlField.get()
dBtn.config(text='Please wait...')
dBtn.config(state=DISABLED)
path_save = askdirectory()
if path_save is None:
return
ob = YouTube(URL, on_progress_callback=progress)
strm = ob.streams[0]
x = ob.description.split("|")
file_size = strm.filesize
dfile_size = file_size
dfile_size /= 1000000
dfile_size = round(dfile_size, 2)
label.config(text='Size: ' + str(dfile_size) + ' MB')
label.pack(side=TOP, pady=10)
desc.config(text=ob.title + '\n\n' + 'Label: ' + ob.author + '\n\n' + 'length: ' + str(round(ob.length/60, 1)) + ' mins\n\n'
'Views: ' + str(round(ob.views/1000000, 2)) + 'M')
desc.pack(side=TOP, pady=10)
strm.download(path_save, strm.title)
dBtn.config(state=NORMAL)
showinfo("Download Finished", 'Downloaded Successfully')
urlField.delete(0, END)
label.pack_forget()
desc.pack_forget()
dBtn.config(text='Start Download')
except Exception as e:
print(e)
print('Error!!')
def startDownloadthread():
thread = Thread(target=startDownload)
thread.start()
# main functions
main = Tk()
main.title("My YouTube Downloader")
main.config(bg='#3498DB')
main.iconbitmap('youtube-ios-app.ico')
main.geometry("500x600")
file = PhotoImage(file='photo.png')
headingIcon = Label(main, image=file)
headingIcon.pack(side=TOP)
urlField = Entry(main, font=("Times New Roman", 18), justify=CENTER)
urlField.pack(side=TOP, fill=X, padx=10, pady=15)
dBtn = Button(main, text="Start Download", font=(
"Times New Roman", 18), relief='ridge', activeforeground='red', command=startDownloadthread)
dBtn.pack(side=TOP)
label = Label(main, text='')
desc = Label(main, text='')
author = Label(main, text="@G.S.")
author.config(font=("Courier", 44))
author.pack(side=BOTTOM)
main.mainloop()
|
ftx.py
|
import os
apikey = os.environ['key']#'IO7okj6AU7sgEKq4M_T2Ld-KO1VTXpSuq3WkfUF0'
apisecret = os.environ['secret']#'1DMOnLSW3fk3SF8KVCbsKTTgqxSdl78tRZja_zQo'
divisor=1000
abc = -937.0961358420444
abc = abc / 28 / 10
print(abc)
import requests
import math
from datetime import timedelta
import datetime
import sys
from threading import Timer
import threading
import linecache
from time import sleep
ts = []
import ccxt
binance = ccxt.binance({'enableRateLimit': True,
"options":{"defaultMarket":"futures"},
'urls': {'api': {
'public': 'https://dapi.binance.com/dapi/v1',
'private': 'https://dapi.binance.com/dapi/v1',},}
})
#print(dir(binance))
#sleep(1)
SECONDS_IN_DAY = 3600 * 24
from cryptofeed import FeedHandler
from cryptofeed import FeedHandler
from cryptofeed.callback import BookCallback, TickerCallback, TradeCallback
from cryptofeed.defines import TICKER_FUTURES, TICKER_OKS, BID, ASK, FUNDING, L2_BOOK, OPEN_INTEREST, TICKER, TRADES
from cryptofeed.exchanges import OKEx, KrakenFutures, HuobiDM, BinanceFutures, FTX
fh = FeedHandler()
fundingwinners = []
from flask import Flask
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
from flask import jsonify
import threading
print(2)
minArb = 0.015
print(minArb)
minArb = minArb * 75
print(minArb)
minArb = minArb * 365
print(minArb)
premiumwinners = []
@app.route('/json')
def summary():
global fundingwinners, premiumwinners
return jsonify({'premiumwinners': premiumwinners, 'fundingwinners': fundingwinners})
def loop_in_thread():
fh.run()
def loop_in_thread2():
app.run(host='0.0.0.0', port=8080)
def PrintException():
if warmup == False:
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
string = 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
if 'rder already queued for cancellatio' not in string:
print (string)
##sleep(1)
#if 'UNI' not in string:
##sleep(1)
# if 'Account does not have enough margin for order' not in string:
##sleep(1)
warmup = True
def handle_function():
global warmup
warmup = False
print(warmup)
thread = Timer(25,handle_function)
thread.daemon = True
thread.start()
async def ticker(feed, pair, bid, ask, timestamp, ex):
global mids
#print(f'Ex?: {ex} Timestamp: {timestamp} Feed: {feed} Pair: {pair} Bid: {bid} Ask: {ask}')
if 'OKEX' in feed.upper():
ex = 'ftx'
if 'USDT' not in pair:
name = pair.split('-')[0]
if '-' not in pair:
return
dt = pair[-4:]
if dt == 'SWAP':
dt = 'PERP'
#print(pair)
else:
return
elif 'FTX' in feed:
ex = 'ftx'
name = pair.split('-')[0]
if '-' not in pair:
return
dt = pair.split('-')[1]
#print(dt)
elif 'KRAKEN' in feed:
if 'PI' in pair:
p = pair.split('_')[1]
name = p.replace('USD','').replace('XBT','BTC')
dt = 'PERP'
else:
name = pair.split('_')[1].split('_')[0].replace('USD', '').replace('XBT', 'BTC')
dt = pair[-4:]
ex = 'kraken'
elif 'BINANCE' in feed:
#ETH-USD_200925
name = pair.split('-')[0]
dt = pair[-4:]
ex = 'binance'
#print(dt)
# print(feed + '-' + name + '-' + dt +': ' + str( 0.5 * ( float(bid) + float(ask))))
mids[ex][name + '-' + dt] = 0.5 * ( float(ask) + float(bid))
bids[ex][name + '-' + dt] = float(bid)
asks[ex][name + '-' + dt] = float(ask)
async def book(feed, pair, book, timestamp, receipt_timestamp):
global mids
hb = 0
la = 99999999999999
for bid in book[BID]:
if bid > hb:
hb = bid
for ask in book[ASK]:
if ask < la:
la = ask
#print(pair)
dt = pair[-4:]
name = pair.split('20'+dt)[0]
#print(name)
# if 'BTC' in name and lastex != feed and lastbtc != 0.5 * ( float(bid) + float(ask)):
# lastex = feed
# lastbtc = 0.5 * ( float(bid) + float(ask))
#print(feed + '-' + name + '-' + dt +': ' + str( 0.5 * ( float(bid) + float(ask))))
#print(pair)
mids['huobi'][name + '-' + dt] = 0.5 * ( float(la) + float(hb))
#print(mids)
#print(f'Timestamp: {timestamp} Feed: {feed} Pair: {pair} Book Bid Size is {len(book[BID])} Ask Size is {len(book[ASK])}')
def cancelall():
try:
try:
ftx.privateDeleteOrders( )
except Exception as e:
PrintException()
except Exception as e:
PrintException()
arbwinnersavg = []
arbwinnersc = []
maxmax = 0
levs = {}
trading = {}
def doOrder(coin, direction, wantingcoin, prc, count, laste=[], postonly=True):
if wantingcoin * mids['ftx'][coin] < 0.1:
return
try:
#print(wantingcoin * mids['ftx'][coin])
f = ftx.createOrder( coin, 'limit', direction, wantingcoin, prc, {'postOnly': postonly})
print(f)
except Exception as e:
try:
dummy_event.wait(timeout=((1/25)*1))
if 'Size too small' in str(e):
laste.append('small')
if len(laste) > 4:
if laste[-1] == 'small' and laste[-3] == 'small' and laste[-5] == 'small' and laste[-2] == 'large' and laste[-4] == 'large':
return
#PrintException()
return doOrder(coin, direction, wantingcoin * (3 * count), prc, count + 1, laste, postonly)
elif 'Account does not have enough' in str(e):
laste.append('large')
if len(laste) > 4:
if laste[-2] == 'small' and laste[-4] == 'small' and laste[-1] == 'large' and laste[-3] == 'large' and laste[-5] == 'large':
return
#PrintException()
return doOrder(coin, direction, wantingcoin / (3 * count), prc, count + 1, laste, postonly)
else:
PrintException()
except:
PrintException()
start_time = datetime.datetime.utcnow()- timedelta(hours = 0)
SECONDS_IN_DAY = 3600 * 24
def output_status():
try:
now = datetime.datetime.utcnow()
days = ( now - start_time ).total_seconds() / SECONDS_IN_DAY
print( '********************************************************************' )
#print( '%% Delta: %s%%'% round( self.get_pct_delta() / PCT, 1 ))
#print( 'Total Delta (BTC): %s' % round( sum( self.deltas.values()), 2 ))
#print_dict_of_dicts( {
# k: {
# 'BTC': self.deltas[ k ]
# } for k in self.deltas.keys()
# },
# roundto = 2, title = 'Deltas' )
#print(self.positions)
print('Positions')
for s in usdpos:
if usdpos[s] > balance / 100 or usdpos[s] < -1 * balance / 50:
print(s + ': $' + str(round(usdpos[s] * 100) / 100))
print('Skews')
for s in skews:
if skews[s] > balance / 100 or skews[s] < -1 * balance / 50:
print(s + ': $' + str(round(skews[s] * 100) / 100))
print('Orders #')
print(lenords)
print( 'Start Time: %s' % start_time.strftime( '%Y-%m-%d %H:%M:%S' ))
print( 'Current Time: %s' % now.strftime( '%Y-%m-%d %H:%M:%S' ))
print( 'Days: %s' % round( days, 1 ))
print( 'Hours: %s' % round( days * 24, 1 ))
equity_usd = balance
equity_btc = round(balance / mids['ftx']['BTC-PERP'] * 100000000) / 100000000
pnl_usd = equity_usd - firstbalance
pnl_btc = equity_btc - firstbtc
print( 'Equity ($): %7.2f' % equity_usd)
print( 'P&L ($) %7.2f' % pnl_usd)
print( 'Equity (BTC): %7.4f' % equity_btc)
print( 'P&L (BTC) %7.4f' % pnl_btc)
print('\nLeverage: ' + str(lev))
num = threading.active_count()
print('Threads: ' + str(num) + ' out of ' + str(startnum * 3 + 1))
print('Percs')
print(percs)
t = 0
for r in percs:
t = t + math.fabs(percs[r])
print('t result ' + str(t))
except:
PrintException()
"""
print( '\nMean Loop Time: %s' % round( self.mean_looptime, 2 ))
#self.cancelall()
print( '' )
print(' ')
days = ( datetime.utcnow() - self.start_time ).total_seconds() / SECONDS_IN_DAY
print('Volumes Traded Projected Daily of Required (' + str(days) + ' days passed thus far...)')
print('Equity: $' + str(round(self.equity_usd*100)/100))
btc = self.get_spot('BTC/USDT')
print('btc')
percent = self.equity_usd / btc
volumes = []
tradet = 0
feest = 0
for pair in pairs:
gettrades = self.getTrades(pair, 0, 0, 0)
#print(gettrades)
volume = (gettrades[0] / (gettrades[2]))
feest = feest + gettrades[0] * 0.0002
tradet = tradet + volume * 30
printprint = True
if pair in fifteens:
volume = (volume / 15000)
elif pair in tens:
volume = (volume / 10000)
elif pair in fives:
volume = (volume / 5000)
elif pair in threes:
volume = (volume / 3000)
else:
printprint = False
volume = (volume / 25000)
volumes.append(volume)
#print(volume)
if printprint == True:
print(pair + ': ' + str(round(volume*1000)/10) + '%' + ', (Real) USD traded: $' + str(round(gettrades[0]*100)/100) + ', fees paid: $' + str(round(gettrades[1] * 10000)/10000))
else:
print('(Real) USD traded: $' + str(round(gettrades[0]*100)/100) + ', fees paid: $' + str(round(gettrades[1] * 10000)/10000))
volumes.sort()
h = 100
for i in range(0,5):
if volumes[-i] < h and volumes[-i] > 0:
h = volumes[-i]
if h > 1:
h = 1
try:
h = 1 / h
except:
h = 1
mult = h
h = h * self.equity_usd
print('Approx. traded volumes over 30 days: ' + str(tradet) + ', in BTC: ' + str(round(tradet/btc*1000)/1000))
print('Approx. Min Equity at 25x in USD to Achieve 100% Daily Requirements Across 6 Highest %s Above: $' + str(round(h * 100)/100))
diff = h / self.equity_usd
print('That\'s ' + str(round(diff*100)/100) + 'x the balance now, bringing projected USD/month to: ' + str(round(tradet * diff * 100)/100) + ', and BTC: ' + str(round((tradet * diff / btc)* 100)/100))
apy = 365 / (gettrades[2])
pnl = (((self.equity_usd + feest) / self.equity_usd) -1) * 100
pnl2 = pnl * apy
print('Now, if we were running in a trial mode of Binance Market Maker Program\'s Rebates, or if we had achieved these rates already, we would have earned $' + str(round(feest * 100)/100) + ' by now, or rather earning ' + str(round(pnl*1000)/1000) + '% PnL so far, or ' + str(round(pnl2*1000)/1000) + ' % Annual Percentage Yield!')
btcneed = (((tradet * diff / btc) / 3000) )
if btcneed < 1 and btcneed != 0:
h = h / btcneed
print('For 3000 btc/month volumes, would make the equity minimum approx. $' + str(round(h * 100)/100))
"""
lenords = {}
def aThread(coin):
while True:
try:
global skews, lenords
#print(coin)
dummy_event.wait(timeout=((1/25)*1))
ords = ftx.fetchOpenOrders( coin )
lenords[coin] = len(ords)
#if len(ords) > 0:
#print(coin + ' lenords: ' + str(len(ords)) + ' & lev: ' + str(levs[coin.split('-')[0]]))
direction = 'buy'
prc = bids['ftx'][coin]
go = True
print(wanting)
#sleep(1000)
if coin.split('-')[0] not in skews:
skews[coin.split('-')[0]] = 0
for direction in ['buy', 'sell']:
if direction == 'buy' and skews[coin.split('-')[0]] < -1 * balance * 0.75 or direction == 'sell' and skews[coin.split('-')[0]] > balance * 0.75:
print('wowwwww market out yo ' + coin)
#if 'ETH' in coin:
#print(wanting[coin])
#trading[coin] = True
amt = wanting[coin]
if amt < 0:
amt = amt * -1
if direction == 'buy':
prc = asks['ftx'][coin]
elif direction == 'sell':
prc = bids['ftx'][coin]
doOrder(coin, direction, amt, prc, 1, [], False)
if coin in wanting:
if go == True:
wanting[coin] = (wanting[coin] / percs[coin.split('-')[0]]) * 3
if wanting[coin] < balance / 7:
wanting[coin] = wanting[coin] * 2
# #sleep(1)
try:
#dummy_event.wait(timeout=((1/25)*1))
#ords = ftx.fetchOpenOrders( coin )
gogo = True
for o in ords:
if direction == o['info']['side'] and o['info']['future'] == coin:
gogo = False
qty = o['info']['remainingSize']
if qty < 0:
qty = -1 * qty
print(qty)
#138 < 18 * 0.15
#if 'BTC' in coin:
#print('skews n wanting ' + coin)
#print(direction)
#print(skews[coin.split('-')[0]])
#print(wanting[coin] * 0.15)
##sleep(1)
if direction == 'sell' and -1 * balance / 4 < skews[coin.split('-')[0]] or direction == 'buy' and balance / 4 > skews[coin.split('-')[0]]:
if prc != o['info']['price']:
trading[coin] = True
dummy_event.wait(timeout=((1/25)*1))
try:
e = ftx.editOrder( o['info']['id'], coin, 'limit', direction, float(qty), prc, {'postOnly': True} )
except Exception as e:
PrintException()
try:
e = ftx.cancelOrder(o['info']['id'], coin)
except:
abc=123
##sleep(1)
else:
print(coin + ' skew bad on ' + direction)
#print(e)
print(wanting)
#sleep(1000)
#if 'ETH' in coin:
#print(gogo)
#print(direction)
#print(skews[coin.split('-')[0]])
#print(wanting[coin] * 0.15 )
if gogo == True:
if 'BTC' in coin:
#print('skews n wanting ' + coin)
#print(direction)
#print(skews[coin.split('-')[0]])
#print(wanting[coin] * 0.15)
if direction == 'sell' and -1 * balance / 4 < skews[coin.split('-')[0]] or direction == 'buy' and balance / 4 > skews[coin.split('-')[0]]:
abc=123
else:
print('can\'t ' + direction)
##sleep(10)
if direction == 'sell' and -1 * balance / 4 > skews[coin.split('-')[0]] and direction == 'buy' and balance / 4 < skews[coin.split('-')[0]]:
print('bad can\'t order!')
dummy_event.wait(timeout=5)
if direction == 'sell' and -1 * balance / 4 < skews[coin.split('-')[0]] or direction == 'buy' and balance / 4 > skews[coin.split('-')[0]]:
#if 'ETH' in coin:
#print(wanting[coin])
trading[coin] = True
doOrder(coin, direction, wanting[coin], prc, 1)
else:
print(coin + ' skew bad on ' + direction)
except:
PrintException()
except:
PrintException()
runtimefirst = True
def doCheckTrading():
global trading, wanting, skews
for coin in mids['ftx']:
if coin.split('-')[0] in levs:
try:
if coin.split('-')[0] not in skews:
skews[coin.split('-')[0]] = 0
direction = 'buy'
prc = bids['ftx'][coin]
go = False
if coin in wanting:
if wanting[coin] > 0:
go = True
#print('1')
try:
if skews[coin.split('-')[0]] > wanting[coin] * 0.15:
#print('cancel2! ' + coin)
#ords = ftx.fetchOpenOrders( coin )
gogo = True
#for o in ords:
#ftx.cancelOrder( o['info']['id'] , o['info']['future'])
go = False
except:
PrintException()
if wanting[coin] < 0:
go = True
#print('3')
try:
if skews[coin.split('-')[0]] < wanting[coin] * 0.15 * mids['ftx'][coin]:
#ords = ftx.fetchOpenOrders( coin )
gogo = True
#for o in ords:
# ftx.cancelOrder( o['info']['id'] , o['info']['future'])
#print('cancel! ' + coin)
go = False
except:
PrintException()
wanting[coin] = wanting[coin] * -1
direction = 'sell'
prc = asks['ftx'][coin]
if go == True:
wanting[coin] = (wanting[coin] / levs[coin.split('-')[0]]) * 3
# #sleep(1)
try:
dummy_event.wait(timeout=((1/25)*1))
ords = ftx.fetchOpenOrders( coin )
lenords[coin] = len(ords)
gogo = True
for o in ords:
if direction == o['info']['side'] and o['info']['future'] == coin:
gogo = False
qty = o['info']['remainingSize']
#138 < 18 * 0.15
#if 'BTC' in coin:
#print('skews n wanting ' + coin)
#print(direction)
#print(skews[coin.split('-')[0]])
#print(wanting[coin] * 0.15)
##sleep(1)
if direction == 'sell' and -1 * balance / 4 < skews[coin.split('-')[0]] or direction == 'buy' and balance / 4 > skews[coin.split('-')[0]]:
if prc != o['info']['price']:
trading[coin] = True
#e = ftx.editOrder( o['info']['id'], coin, 'limit', direction, float(qty), prc, {'postOnly': True} )
##sleep(1)
else:
print(coin + ' skew bad on ' + direction)
#print(e)
#if 'ETH' in coin:
#print(gogo)
#print(direction)
#print(skews[coin.split('-')[0]])
#print(wanting[coin] * 0.15 )
if gogo == True:
if direction == 'sell' and -1 * balance / 4 > skews[coin.split('-')[0]] and direction == 'buy' and balance / 4 < skews[coin.split('-')[0]]:
print('bad can\'t order!')
dummy_event.wait(timeout=5)
if direction == 'sell' and -1 * balance / 4 < skews[coin.split('-')[0]] or direction == 'buy' and balance / 4 > skews[coin.split('-')[0]]:
#if 'ETH' in coin:
#print(wanting[coin])
trading[coin] = True
#doOrder(coin, direction, wanting[coin], prc, 1)
else:
print(coin + ' skew bad on ' + direction)
except:
PrintException()
except:
PrintException()
def doCalc():
index = 59
while True:
try:
#sleep(3)
r = random.randint(0, 1000)
if r <= 5:
cancelall()
global levs, premiumwinners, arbwinnersc, arbwinnersavg, maxmax, trading, ts, runtimefirst
#dt_to_string = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
#added = worksheet2.append_row([dt_to_string, '=VALUE(' + str((balance/firstbalance-1)*100) + ')'], 'USER_ENTERED')
doCheckTrading()
rans = []
toran = []
#print('w3')
#print(wanting['BTC-0326'])
#print(wanting['BTC-1225'])
#print(wanting['BTC-PERP'])
donecoins = []
print(len(wanting))
for w in wanting:
toran.append(w)
for i in range(0, len(toran)*4-1):
ran = random.randint(0, len(toran)-1)
coin = toran[ran]
if coin not in donecoins:
print(coin)
donecoins.append(coin)
t = threading.Thread(target=aThread, args=(coin,))
t.daemon = True
ts.append(t)
t.start()
rans = []
donecoins = []
toran = []
for t in trading:
if t in marketList:
if trading[t] == False or result[t.split('-')[0]] <= 1:
toran.append(t)
for i in range(0, len(toran)*4-1):
ran = random.randint(0, len(toran)-1)
coin = toran[ran]
if coin not in donecoins:
donecoins.append(coin)
#print(coin)
if coin in marketList:
t = threading.Thread(target=doMm, args=(coin,))
#t.daemon = True
#ts.append(t)
#t.start()
done = False
#sleep(5)
num = threading.active_count()
print('threads outside loop: ' + str(num) + ' out of ' + str(startnum * 3 + 1))
while done == False:
#sleep(5)
#print(num % 4)
#print('threads: ' + str(num))
if num <= startnum * 3 + 1:
done = True
#for t in ts:
# t.exit()
ts = []
index = index + 1
if index >= 10:
index = 0
output_status()
sleep(1)
sleep(1)
except:
PrintException()
ftx = ccxt.ftx({
'apiKey': apikey,
'secret': apisecret,
'enableRateLimit': True
})
markets = ftx.fetchMarkets()
marketList = []
for m in markets:
if 'OIL' not in m['symbol'] and '-' in m['symbol']:
marketList.append(m['symbol'])
print(marketList)
cancelall()
sizeIncrements = {}
r = requests.get('https://ftx.com/api/markets').json()['result']
for m in r:
sizeIncrements[m['name']] = m['sizeIncrement']
markets = binance.fetchMarkets()
futs = '200925'
for m in markets:
#print(m['id'])
try:
binance.dapiPrivatePostLeverage({'symbol': m['id'], 'leverage': 75})
except:
abc=123
huobi = ccxt.huobipro({"urls": {'api':{'public': 'https://api.hbdm.com/swap-api',
'private': 'https://api.hbdm.com/swap-api'}}})
insts = binance.fetchMarkets()
#print(insts[0])
bin_futures_all = insts
funding = {}
exchanges = ['binance', 'kraken', 'ftx', 'huobi', 'okex']
mids = {}
bids = {}
asks = {}
for ex in exchanges:
funding[ex] = {}
mids[ex] = {}
bids[ex] = {}
asks[ex] = {}
expis = {}
futureends = ["_CW", "_NW", "_CQ", "_NQ"]
precisions = {}
ticksizes = {}
rates = {}
for ex in exchanges:
rates[ex] = {}
"""
huobi = requests.get("https://api.hbdm.com/api/v1/contract_contract_info").json()['data']
huobis = []
dts = []
for market in huobi:
#stri = str(huobi[market])
#if 'usd' in market['quoteId']:
if market['symbol'] not in huobis:
huobis.append(market['symbol'])
dt = market['delivery_date']
expiry = datetime.datetime.strptime(
dt,
'%Y%m%d' )
#print(dt)
dt = dt[-4:]
if dt not in dts:
dts.append(dt)
now = datetime.datetime.utcnow()
days = ( expiry - now ).total_seconds() / SECONDS_IN_DAY
#print(days)
expis[dt] = days
#print(expis)
#print(huobi[market])
bcontracts = []
pairs = requests.get('https://dapi.binance.com/dapi/v1/exchangeInfo').json()
for symbol in pairs['symbols']:
split = len(symbol['baseAsset'])
#if 'BTC' in symbol['symbol']:
#print(symbol['symbol'])
normalized = symbol['symbol'][:split] + '-' + symbol['symbol'][split:]
bcontracts.append(normalized)
config = {TICKER: bcontracts}
fh.add_feed(BinanceFutures(config=config, callbacks={TICKER: TickerCallback(ticker)}))
ofuts = []
oswaps = []
swaps = requests.get('https://www.okex.com/api/swap/v3/instruments').json()
futures = requests.get('https://www.okex.com/api/futures/v3/instruments').json()
for s in swaps:
oswaps.append(s['instrument_id'])
for f in futures:
ofuts.append(f['instrument_id'])
config = {TICKER_OKS: oswaps
,TICKER_FUTURES: ofuts}
fh.add_feed(OKEx(config=config, callbacks={TICKER_FUTURES: TickerCallback(ticker), TICKER_OKS: TickerCallback(ticker)}))
#print(expis)
takens = []
dts.sort()
#print(dts)
times = {"_CW": dts[0], "_NW": dts[1], "_CQ": dts[2], "_NQ": dts[3]}
for fut in bin_futures_all:
try:
split = (fut['info']['symbol']).split('_')[1][-4:]
expi = datetime.datetime.fromtimestamp(fut['info']['deliveryDate'] / 1000)
now = datetime.datetime.utcnow()
days = ( expi - now ).total_seconds() / SECONDS_IN_DAY
#print(days)
#print(days)
expis[split] = days
precisions[fut['info']['symbol']] = 1
ticksizes[fut['info']['symbol']] = 1
for precision in range(0, fut['info']['pricePrecision']):
precisions[fut['info']['symbol']] = precisions[fut['info']['symbol']] / 10
ticksizes[fut['info']['symbol']]= ticksizes[fut['info']['symbol']] / 10
#print(fut['info']['symbol'])
#print(ticksizes_binance[fut['info']['symbol']])
#print(precisions_binance[fut['info']['symbol']])
except:
PrintException()
ftx = requests.get("https://ftx.com/api/funding_rates").json()['result']
doneFtx = {}
for rate in ftx:
doneFtx[rate['future'].replace('-PERP', '')] = False
for rate in ftx:
if rate['future'].replace('-PERP', '') != 'BTC' and rate['future'].replace('-PERP', '') != 'ETH':
if doneFtx[rate['future'].replace('-PERP', '')] == False:
doneFtx[rate['future'].replace('-PERP', '')] = True
rates['ftx'][rate['future'].replace('-PERP', '')] = rate['rate'] * 24
allfuts = []
expiries = {}
hcontracts = []
for contract in huobis:
for futureend in futureends:
hcontracts.append(contract + futureend)
config = {L2_BOOK: hcontracts}
fh.add_feed(HuobiDM(config=config, callbacks={L2_BOOK: BookCallback(book)}))
kcontracts = []
binance = requests.get("https://dapi.binance.com/dapi/v1/premiumIndex").json()
#binance_f = requests.get("https://fapi.binance.com/fapi/v1/premiumIndex").json()
kraken = requests.get("https://futures.kraken.com/derivatives/api/v3/tickers").json()
for market in kraken['tickers']:
if 'tag' in market:
kcontracts.append(market['symbol'].upper())
#print(kcontracts)
config = {TICKER: kcontracts}
fh.add_feed(KrakenFutures(config=config, callbacks={TICKER: TickerCallback(ticker)}))
"""
fcontracts = []
ftxmarkets = requests.get("https://ftx.com/api/futures").json()['result']
for market in ftxmarkets:
if 'MOVE' not in market['name'] and 'HASH' not in market['name'] and 'BERNIE' not in market['name'] and 'TRUMP' not in market['name'] and 'BIDEN' not in market['name'] and 'HASH' not in market['name'] and 'BLOOM' not in market['name'] and 'PETE' not in market['name'] and 'WARREN' not in market['name'] :
fcontracts.append(market['name'])
config = {TICKER: fcontracts}
fh.add_feed(FTX(config=config, callbacks={TICKER: TickerCallback(ticker)}))
#loop = asyncio.get_event_loop()
t = threading.Thread(target=loop_in_thread, args=())
t.start()
sleep(5)
levs = {'EXCH': 0.37553863680201305, 'BTMX': 1.5414075400914378e-07, 'KNC': 6.504949311775132e-05, 'AMPL': 0.0003068704707663269, 'XRP': 0.0006384053495788409, 'ZEC': 0.013748364608799758, 'COMP': 0.0629017519054634, 'ETH': 2.0804287716760284, 'PAXG': 0.06948621343310857, 'FIL': 0.18360624334823186, 'OKB': 0.004431717207269474, 'SUSHI': 0.0007188568091976016, 'LINK': 0.10218120047799369, 'PRIV': 0.05535607927851392, 'EOS': 0.013079940586044732, 'LEO': 1.0682260698596705e-05, 'UNI': 0.006283060901768046, 'ATOM': 0.0039046197880313782, 'TRX': 4.467987974508902e-05, 'BNB': 0.07183256137756255, 'HNT': 1.960033017153904e-05, 'YFI': 2.0804287716760284, 'DEFI': 0.7226054407069601, 'AAVE': 0.011336436999694966, 'TRYB': 2.6519982646385847e-06, 'AVAX': 0.0013048080367771176, 'VET': 1.3166190327889263e-06, 'ALT': 0.5072715134352992, 'ADA': 0.00011662264337981375, 'TOMO': 7.651949823380968e-05, 'BTC': 2.0804287716760284, 'CREAM': 0.029009585298679273, 'DOT': 0.0069686083325276845, 'BCH': 0.8946385933299136, 'XTZ': 0.002295816002623051, 'ETC': 0.0006230217286083596, 'THETA': 0.000824750143370905, 'DOGE': 4.9580533201608386e-08, 'LTC': 0.21036516090203852, 'USDT': 0.00025816775330248843, 'XAUT': 0.05589805457579935, 'DMG': 8.515814764419041e-06, 'MTA': 3.780109516306997e-05, 'SHIT': 0.17053239923391053, 'MKR': 0.01108933548734965, 'CUSDT': 1.3594205858973971e-07, 'CHZ': 1.1212631509476283e-07, 'DRGN': 0.03747354727919051, 'MATIC': 3.849056292476446e-06, 'FLM': 2.4591184576301782e-05, 'BAL': 0.0009743637094156327, 'BSV': 0.6206267187197785, 'BRZ': 1.901169209272247e-06, 'HT': 0.0008406819850498995, 'MID': 0.3298700363253504, 'OMG': 0.0003251759801236203, 'NEO': 0.0024258124136339668, 'ALGO': 0.0002364707420270012, 'RUNE': 6.511742139383555e-05, 'UNISWAP': 0.41328401717525715, 'SXP': 0.002745614771531447, 'SOL': 0.00039568130566015446}
for lev in levs:
if levs[lev] == 0:
levs[lev] = 1
dummy_event = threading.Event()
levsdone = True
def ohlcvs():
global levs, levsdone
while True:
try:
cvs = {}
for arb in mids['ftx']:
if 'PERP' in arb:
try:
if 'PERP' in arb:
dummy_event.wait(timeout=((1/25)*1))
ohlcvs = ftx.fetch_ohlcv(arb)
vs = []
for ohlcv in ohlcvs:
vs.append(ohlcv[-1] * ohlcv[-2])
#print(arb)
#print(sum(vs))
if len(vs) > 0:
cvs[arb.split('-')[0]] = (sum(vs))
else:
cvs[arb.split('-')[0]] = 0
except Exception as e:
print(e)
t = 0
c = 0
maxi = 0
maxi2 = 0
maxi3 = 0
maxi4 = 0
for v in cvs:
t = t + cvs[v]
c = c + 1
if cvs[v] > maxi:
maxi4 = maxi3
maxi3 = maxi2
maxi2 = maxi
maxi = cvs[v]
t = t - maxi - maxi2 - maxi3 - maxi4
avg = t / cw
#print(avg)
levs = {}
newlist = {}
t = 0
for v in cvs:
if cvs[v] < maxi3:
t = t + cvs[v]
newlist[v] = cvs[v]
#print(t)
#print('average')
#print(avg)
maxlev = 0
for v in cvs:
levs[v] = (5 * cvs[v] / t)
if levs[v] > 10:
levs[v] = 4.5
if levs[v] > 7:
levs[v] = 3.5
if levs[v] > maxlev:
maxlev = levs[v]
levsnew = {}
t = 0
for l in levs:
t = t + levs[l]
margin = ftx.privateGetAccount()
# marg = 1 / margin['result']['openMarginFraction']
marg = 5
mult = 5
if marg > 20: #((20 / 20.53) * 20) / 2
mult = ((5 / marg) * 5) / 2
print('mult')
print(mult)
#sleep(1000)
for l in levs:
levsnew[l] = (levs[l] / t) * mult# edit to account for int rounddown of percs object later
#levs = levsnew
levsdone = True
#print('levs')
#print(levs)
#levs['BTC'] = 10
#levs['ETH'] = 10
#newlevs = levs
#percs2 = {}
#for l in levs:
# percs2[l] = levs[l] / maxlev
# newlevs[l] = percs2[l] * 7.5
#levs = newlevs
#for lev in levs:
# if levs[lev] == 0:
# levs[lev] = 1
print('levs')
print(levs)
except Exception as e:
print(e)
t = threading.Thread(target=ohlcvs, args=())
t.daemon = True
ts.append(t)
t.start()
#t = threading.Thread(target=loop_in_thread2, args=())
#t.start()
print(expis)
import random, string
import requests
import math
funding = {}
exchanges = ['binance']#['binance', 'kraken', 'ftx', 'phemex', 'okex']
for ex in exchanges:
funding[ex] = {}
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
def doupdates():
global fundingwinners
#todo: replace with dapi.binance.com/, and change all of the ccxt stuff in ccxt/binance.py to dapi.binance.com
binance2 = requests.get('https://dapi.binance.com/dapi/v1/premiumIndex').json()
for obj in binance2:
try:
funding['binance'][obj['symbol'].replace('USDT', '')] = float(obj['lastFundingRate']) * 3
except:
abc=123
"""
kraken = requests.get('https://futures.kraken.com/derivatives/api/v3/tickers').json()['tickers']
for obj in kraken:
if 'tag' in obj:
if obj['tag'] == 'perpetual':
funding['kraken'][obj['pair'].replace('XBT','BTC').replace(':USD', '')] = float(obj['fundingRate']) * 3
ftx = requests.get('https://ftx.com/api/funding_rates').json()['result']
takenftx = []
for obj in ftx:
if obj['future'].replace('-PERP','') not in takenftx:
takenftx.append(obj['future'].replace('-PERP',''))
funding['ftx'][obj['future'].replace('-PERP','')] = float(obj['rate']) * 24
phemproducts = requests.get('https://api.phemex.com/exchange/public/cfg/v2/products').json()['data']['products']
phemperps = []
for obj in phemproducts:
if obj['type'] == 'Perpetual':
phemperps.append(obj['symbol'])
for perp in phemperps:
phemex = requests.get('https://api.phemex.com/md/ticker/24hr?symbol=' + perp).json()['result']
funding['phemex'][perp.replace('USD', '')] = float(phemex['fundingRate'])/100000000*3
swaps = requests.get('https://www.okex.com/api/swap/v3/instruments').json()
for s in swaps:
okex = requests.get('https://www.okex.com/api/swap/v3/instruments/' + s['instrument_id'] + '/funding_time').json()
funding['okex'][okex['instrument_id'].replace('-USDT-SWAP', '').replace('-USD-SWAP', '')] = float(okex['funding_rate']) * 3
"""
rates = {}
for ex in funding:
rates[ex] = {}
for coin in funding[ex]:
rates[ex][coin] = []
for ex in funding:
for coin in funding[ex]:
rates[ex][coin].append(float(funding[ex][coin]))
APRS = {}
longshorts = {}
for ex in rates:
APRS[ex] = {}
for coin in rates[ex]:
maximum = max(rates[ex][coin])
minimum = min(rates[ex][coin])
# #print(coin)
# #print(math.fabs(maximum) * 100)
# #print(math.fabs(minimum) * 100)
# #print(str(0.015*3))
# #print(' ')
if math.fabs(maximum) > math.fabs(minimum):
if (math.fabs(maximum) * 365 * 100 * 75 / 2) - minArb > 0:
if maximum < 0:
longshorts[coin] = 'long'
else:
longshorts[coin] = 'short'
APRS[ex][coin] = (math.fabs(maximum) * 365 * 100 * 75 / 2) - minArb
else:
if (math.fabs(minimum) * 365 * 100 * 75 / 2) - minArb > 0:
if minimum < 0:
longshorts[coin] = 'long'
else:
longshorts[coin] = 'short'
APRS[ex][coin] = (math.fabs(minimum) * 365 * 100 * 75 / 2) - minArb
fundingwinners = []
t = 0
c = 0
for ex in APRS:
maximum = 0
winner = ""
for coin in APRS[ex]:
if APRS[ex][coin] > 0 and 'LINK' in coin or 'BTC' in coin or 'ETH' in coin or 'ADA' in coin:
t = t + APRS[ex][coin]
c = c + 1
fundingwinners.append({'ex': ex, 'coin': coin, 'arb': APRS[ex][coin]})
# #print({'ex': ex, 'coin': coin, 'arb': APRS[ex][coin]})
#print('The Maximum funding opportunity on ' + ex + ' now is ' + winner + ' with ' + str(maximum) + '%!')
percs = {}
tobuy = {}
for ex in APRS:
maximum = 0
winner = ""
for coin in APRS[ex]:
if APRS[ex][coin] > 0 and 'LINK' in coin or 'BTC' in coin or 'ETH' in coin or 'ADA' in coin:
percs[coin] = 1#APRS[ex][coin] / t
#((1000000 * 0.66) * 75 /2) / 10
#((25 * 0.25 ) * 75 / 2) / 10
tobuy[coin] = ((balances[coin.split('_')[0].replace('USD', '')] * percs[coin]) * 75 / 2) / 10
tobuy[coin.replace('PERP', futs)] = tobuy[coin] * -1
elif 'LINK' in coin or 'BTC' in coin or 'ETH' in coin or 'ADA' in coin:
tobuy[coin] = 0
tobuy[coin.replace('PERP', futs)] = 0
#print(percs)
for coin in longshorts:
if longshorts[coin] == 'short':
try:
tobuy[coin] = tobuy[coin] * -1
tobuy[coin.replace('PERP', futs)] = tobuy[coin.replace('PERP', futs)] * -1
except:
abc=123
#print(tobuy)
##sleep(100)
for coin in tobuy:
#cancelall(coin)
#-100 btc
#-800
#100
#800
try:
if math.fabs((tobuy[coin]) / (balances[coin.split('_')[0].replace('USD', '')] * 75)) > ((1/divisor) * 0.5) / 75:
if 'BTC' in coin:
tobuy[coin] = tobuy[coin] / 10
tobuy[coin] = tobuy[coin] - pos[coin] / 10
else:
tobuy[coin] = tobuy[coin] - pos[coin] / 100
#print(tobuy)
direction = 'BUY'
if tobuy[coin] < 0:
direction = 'SELL'
tobuy[coin] = tobuy[coin] * -1
if tobuy[coin] != 0:
#print(tobuy[coin])
bbo = mids['binance'][coin.replace('USD', '-USD')]
#print(int(tobuy[coin] / divisor))
#print(tobuy[coin])
if direction == 'SELL':
binance.dapiPrivatePostOrder( {'timeInForce': 'GTC', 'symbol': coin, 'side': direction, 'type': 'LIMIT', 'price': bbo['bid'], 'quantity': int(tobuy[coin] / divisor),"newClientOrderId": "x-v0tiKJjj-" + randomword(15)})
else:
binance.dapiPrivatePostOrder( {'timeInForce': 'GTC', 'symbol': coin, 'side': direction, 'type': 'LIMIT', 'price': bbo['ask'], 'quantity': int(tobuy[coin] / divisor),"newClientOrderId": "x-v0tiKJjj-" + randomword(15)})
except:
PrintException()
#print(tobuy)
balances = {}
totrade = ['BTC', 'ETH', 'LINK', 'ADA']
pos = {}
usdpos = {}
skews = {}
for t in totrade:
balances[t] = 0
def updatePositions():
while True:
try:
#sleep(3)
global positions, skews
skewsnew = {}
positions = ftx.privateGetPositions()['result']
for p in positions:
name = p['future']
#if p['entryPrice'] is not None:
# #print(p)
skewsnew[p['future'].split('-')[0]] = 0
size = float(p['size'])
if p['side'] == 'sell':
size = size * -1
pos[p['future']] = size
usdpos[p['future']] = float(p['cost'])
for p in positions:
size = float(p['cost'])
#if p['side'] == 'sell':
# size = size * -1
skewsnew[p['future'].split('-')[0]] = skewsnew[p['future'].split('-')[0]] + size
skews = skewsnew
for skew in skews:
if skews[skew] > 0.75 * balance or skews[skew] < -0.75 * balance:
print('skew ' + skew + ': ' + str(skews[skew]))
#sleep(3)
dummy_event.wait(timeout=((1/25)*1))
except:
#sleep(9)
PrintException()
lev = 0
firstrun = True
firstbalance = 0
firstbtc = 0
def updateBalance():
while True:
try:
#sleep(3)
global firstrun, firstbalance, balance, lev, firstbtc
bal2 = ftx.fetchBalance()
newbal = 0
##print(bal2)
for coin in bal2['info']['result']:
newbal = newbal + coin['usdValue']
t = 0
for pos in usdpos:
t = t + math.fabs(usdpos[pos])
lev = t / newbal
balance = newbal
#print(balance)
#print(lev)
if firstrun == True and balance != 0 and mids['ftx']['BTC-PERP'] > 0:
firstrun = False
firstbalance = balance
firstbtc = round(balance / mids['ftx']['BTC-PERP'] * 100000000) / 100000000
#if random.randint(0,100) <= 1:
# print('balance: ' + str(balance))
#sleep(3)
dummy_event.wait(timeout=((1/25)*1))
except:
#sleep(10)
PrintException()
print(1)
startnum = threading.active_count()
t = threading.Thread(target=updateBalance, args=())
t.daemon = True
ts.append(t)
t.start()
print(3)
#sleep(1)
t = threading.Thread(target=updatePositions, args=())
t.daemon = True
ts.append(t)
t.start()
print(2)
wanting = {}
percs = {}
def wantingThread():
global wanting, maxmax, trading, ts, runtimefirst, expis, result, percs
while True:
try:
for ex in mids:
for dt in mids[ex]:
if dt.split('-')[1] not in expis:
try:
if 'PERP' in dt:
expis[dt.split('-')[1]] = 30000
else:
now = datetime.datetime.utcnow()
expiry = datetime.datetime.strptime(
'2021' + dt.split('-')[1],
'%Y%m%d' )
days = ( expiry - now ).total_seconds() / SECONDS_IN_DAY
#print(days)
#print(dt.split('-')[1])
expis[dt.split('-')[1]] = days
except:
abc=123
try:
if levsdone == True:
dts = []
coins = []
tempmids = mids
for ex in tempmids:
for coin in tempmids[ex]:
#print(coin)
if coin.split('-')[1] not in dts:
dts.append(coin.split('-')[1])
if coin.split('-')[0] not in coins:
coins.append(coin.split('-')[0])
arbs = {}
exes = {}
#print(expis)
for coin in coins:
arbs[coin] = {}
exes[coin] = {}
for ex in tempmids:
for dt in expis:
arbs[coin][dt] = []
exes[coin][dt] = {}
for coin in coins:
for ex in tempmids:
for dt in tempmids[ex]:
try:
exes[coin][dt.split('-')[1]][tempmids[ex][dt]] = ex
except:
abc=123
#PrintException()
# print(dt)
if coin in dt:
try:
if '-' in dt:
if 'e' not in str(tempmids[ex][dt]):
arbs[coin][dt.split('-')[1]].append(tempmids[ex][dt])
except:
#PrintException()
abc=123
perps = {}
lalaexes = {}
for coin in coins:
for ex in tempmids:
for dt in tempmids[ex]:
# print(dt)
if coin in dt and 'PERP' in dt:
perps[coin] = tempmids[ex][dt]
lalaexes[coin] = ex
for coin in arbs:
for dt in arbs[coin]:
try:
if '-' in dt:
if 'e' not in str(perps[coin]):
arbs[coin][dt].append(perps[coin])
exes[coin][dt][perps[coin]] = lalaexes[coin]
except:
# PrintException()
PrintException()
#print(exes)
#print(arbs)
#print(expis)
thearbs = []
coinarbs = {}
"""
print('arbs')
print(len(arbs))
print('expis')
print(len(expis))
print('tempmids')
print(len(tempmids['ftx']))
"""
for coin in arbs:
for dt in expis:
if dt != 'PERP' and coin + '-PERP' in marketList:
trading[coin + '-PERP'] = False
trading[coin + '-' + dt] = False
try:
#print(len(arbs[coin][dt]))
#if len(arbs[coin][dt]) > 0:
minimum = tempmids['ftx'][coin + '-PERP'] #10900/10709 #pos long perp, neg short perp
maximum = tempmids['ftx'][coin + '-' + dt] #pos short fut, neg long fut #pos long perp, neg short perp
# if coin == 'BTC':
## #print(arbs[coin][dt])
# #print(maximum / minimum)
thearb = ((((maximum / minimum)-1)*100)*365*levs[coin]) #1.1/1.05 =
#print(thearb)
#print(expis[dt])
#print('thearb of ' + coin + ' at ' + dt + ' in total ' + str(thearb))
thearb = thearb / expis[dt]
coinarbs[thearb] = coin + '-' + dt
#print(thearb)
#print( ' ' )
#print(thearb)
if thearb > 0.005 or thearb < -0.005:
thearbs.append(thearb)
else:
thearbs.append(0)
"""
if thearb > 10 and coin != 'USDT':
# print(exes[coin][dt])
thearbs.append({'exlong': exes[coin][dt][minimum], 'exshort': exes[coin][dt][maximum], 'coin': coin, 'thearb': thearb, 'dt': dt, 'arbscoindt': arbs[coin][dt]})
#print({'exlong': exes[coin][dt][minimum], 'exshort': exes[coin][dt][maximum], 'coin': coin, 'thearb': thearb, 'dt': dt, 'arbscoindt': arbs[coin][dt]})
"""
# print('and after figuring out daily arb it\'s ' + str(thearb))
except Exception as e:
if coin not in str(e):
PrintException()
abc=123#PrintException()
t = 0
c = 0
#print('thearbs')
#print(len(thearbs))
for arb in thearbs:
t = t + math.fabs(arb)
if arb != 0:
c = c + 1
percs = {}
result = {}
wanting = {}
if c > 0:
avg = t / c
c1 = c
t = 0
c = 0
maxi = 0
t = 0
c = 0
maxi = 0
for arb in thearbs:
if math.fabs(arb) > avg:
#print(coinarbs[arb] + ' is a good coin no?')
if arb > maxi:
maxi = arb
if arb > maxmax:
maxmax = arb
t = t + math.fabs(arb)
c = c + 1
cvs = {}
for arb in thearbs:
if arb != 0:
try:
#if math.fabs(arb) > avg:
percs[coinarbs[arb].split('-')[0]] = arb / t
lev = coinarbs[arb].split('-')[0]
percs[lev] = percs[lev] * 10
result[lev] = ((2 * percs[lev] + 2 * levs[lev]) / 4) * 10
if coinarbs[arb] in usdpos: #-0.3 * 10 * 100 + 360 #-0.16*400*10+1826=1186
#-0.16*400*10-266=-906
wanting[coinarbs[arb]] = percs[lev] * balance
wanting[coinarbs[arb]] = wanting[coinarbs[arb]] * -1
wanting[coinarbs[arb]] = wanting[coinarbs[arb]] - usdpos[coinarbs[arb]]
else:
wanting[coinarbs[arb]] = percs[lev] * balance
wanting[coinarbs[arb]] = wanting[coinarbs[arb]] * -1
if coinarbs[arb].split('-')[0] + '-PERP' in usdpos:
wanting[coinarbs[arb].split('-')[0] + '-PERP'] = percs[lev] * balance - usdpos[coinarbs[arb].split('-')[0] + '-PERP']
else:
wanting[coinarbs[arb].split('-')[0] + '-PERP'] = percs[lev] * balance
"""
else:
if arb != 0:
percs[coinarbs[arb].split('-')[0]] = 0
if coinarbs[arb] in usdpos:
wanting[coinarbs[arb]] = -1 * usdpos[coinarbs[arb]]
else:
wanting[coinarbs[arb]] = 0
if coinarbs[arb].split('-')[0] + '-PERP' in usdpos:
wanting[coinarbs[arb].split('-')[0] + '-PERP'] = -1 * usdpos[coinarbs[arb].split('-')[0] + '-PERP']
else:
wanting[coinarbs[arb].split('-')[0] + '-PERP'] = 0
"""
except:
PrintException()
for pos2 in usdpos:
if pos2 not in wanting:
if usdpos[pos2] != 0:
wanting[pos2] = -1 * usdpos[pos2]
#print(pos2)
#print(wanting)
##sleep(10)
#print('w1')
#print(wanting['BTC-0326'])
#print(wanting['BTC-1225'])
#print(wanting['BTC-PERP'])
#pos short fut, neg long fut #pos long perp, neg short perp
for coin in wanting:
wanting[coin] = wanting[coin] / mids['ftx'][coin] / (balance / 200)
# round(0.1 * (0.1)) / 0.1
# abc = -937.0961358420444
# abc = abc / 28 / 10
# abc = ( round(abc * (0.1)) / 0.1)
wanting[coin] = ( round(wanting[coin] * (1/sizeIncrements[coin])) / (1/sizeIncrements[coin]))
lowers = {}
counts = {}
twanting = {}
#print('w2')
#print(wanting['BTC-0326'])
#print(wanting['BTC-1225'])
#print(wanting['BTC-PERP'])
for arb in wanting:
lowers[arb] = 999999999999999
counts[arb.split('-')[0]] = 0
twanting[arb] = 0
for arb in wanting:
if 'PERP' not in arb:
try:
counts[arb.split('-')[0]] = counts[arb.split('-')[0]] + 1
twanting[arb.split('-')[0] + '-PERP'] = twanting[arb.split('-')[0] + '-PERP'] + wanting[arb]
except:
counts[arb.split('-')[0]] = 1
twanting[arb.split('-')[0] + '-PERP'] = 0 + wanting[arb]
for arb in wanting:
#print(arb)
try:
if counts[arb.split('-')[0]] == 2 and 'PERP' not in arb:
wanting[arb] = wanting[arb] * mids['ftx'][arb] * (balance / 50)
wanting[arb] = wanting[arb] + usdpos[arb]
wanting[arb] = wanting[arb] * -1
twanting[arb] = wanting[arb] / 2
twanting[arb] = twanting[arb] - usdpos[arb]
twanting[arb] = twanting[arb] * -1
twanting[arb] = twanting[arb] / mids['ftx'][arb] / (balance / 50)
elif counts[arb.split('-')[0]] == 2 and 'PERP' in arb:
wanting[arb.split('-')[0] + '-PERP'] = wanting[arb.split('-')[0] + '-PERP'] * mids['ftx'][arb.split('-')[0] + '-PERP'] * (balance / 50)
wanting[arb.split('-')[0] + '-PERP'] = wanting[arb.split('-')[0] + '-PERP'] + usdpos[arb.split('-')[0] + '-PERP']
twanting[arb.split('-')[0] + '-PERP'] = wanting[arb.split('-')[0] + '-PERP'] / 2
wanting[arb.split('-')[0] + '-PERP'] = wanting[arb.split('-')[0] + '-PERP'] - usdpos[arb.split('-')[0] + '-PERP']
twanting[arb.split('-')[0] + '-PERP'] = twanting[arb.split('-')[0] + '-PERP'] / mids['ftx'][arb.split('-')[0] + '-PERP'] / (balance / 50)
else:
twanting[arb] = wanting[arb]
twanting[arb] = round(twanting[arb] * (1/sizeIncrements[arb])) / (1/sizeIncrements[arb])
except:
twanting[arb] = wanting[arb]
wanting = twanting
except:
PrintException()
#sleep(2)
except:
PrintException()
#sleep(2)
t = threading.Thread(target=wantingThread, args=())
t.daemon = True
t.start()
"""
import gspread
spreadsheetId = "1kJIZH2Ku2M_T_Grz6rGqMLfCrJhO_y-V77RCuMh4BeA" # Please set the Spreadsheet ID.
sheetName = 'Sheet1' # Please set the sheet name.
client = gspread.service_account(filename='../google.json')
sh = client.open_by_key(spreadsheetId)
worksheet = sh.worksheet("Sheet1")
try:
worksheet2 = sh.worksheet(apikey)
except:
sh.add_worksheet(apikey, 1, 2)
worksheet2 = sh.worksheet(apikey)
worksheet2.append_row(['datetime', 'balance'])
"""
#sleep(10)
#sleep(2)
#print(levs)
##sleep(1000)
##sleep(1000)
#ohlcvs()
def doMm(coin):
global trading
while True:
try:
try:
if skews[coin.split('-')[0]] > 0:
abc=123
except:
skews[coin.split('-')[0]] = 0
for direction in ['buy', 'sell']:
try:
if direction == 'buy':
prc = bids['ftx'][coin]
else:
prc = asks['ftx'][coin]
except:
abc=123
#ob = ftx.fetchOrderBook(coin, 1)
#print(ob)
dummy_event.wait(timeout=((1/25)*1))
ords = ftx.fetchOpenOrders( coin )
lenords[coin] = len(ords)
gogo = True
for o in ords:
if direction == o['info']['side'] and o['info']['future'] == coin:
gogo = False
#if 'ETH' in coin:
#print('edit')
qty = o['info']['remainingSize']
if qty < 0:
qty = -1 * qty
print(qty)
#138 < 18 * 0.15
##sleep(1)
try:
if skews[coin.split('-')[0]] > 0:
abc=123
except:
skews[coin.split('-')[0]] = 0
if direction == 'sell' and -1 * balance / 80 < skews[coin.split('-')[0]] or direction == 'buy' and balance / 80 > skews[coin.split('-')[0]]:
if prc != o['info']['price']:
trading[coin] = True
dummy_event.wait(timeout=((1/25)*1))
try:
e = ftx.editOrder( o['info']['id'], coin, 'limit', direction, float(qty), prc, {'postOnly': True} )
except Exception as e:
PrintException()
try:
e = ftx.cancelOrder(o['info']['id'], coin)
except:
abc=123
#if direction == 'buy':
#if 'DEFI' in coin:
#print(direction + ' mm edit ' + coin)
#PrintException()
#else:
#if 'DEFI' in coin:
#print('same price mm edit gogo false...')
if gogo == True:
try:
try:
if skews[coin.split('-')[0]] > 0:
abc=123
except:
skews[coin.split('-')[0]] = 0
if direction == 'sell' and -1 * balance / 80 < skews[coin.split('-')[0]] or direction == 'buy' and balance / 80 > skews[coin.split('-')[0]]:
#print(10 / mids['ftx'][coin])
mm = doOrder(coin, direction, ((balance / 700)) / mids['ftx'][coin], prc, 1)
#if direction == 'buy':
#print('mm ' + coin)
#print(mm)
except Exception as e:
if coin.split('-')[0] in str(e):
mm = doOrder(coin, direction, 1 / mids['ftx'][coin], prc, 1)
#if 'DEFI' in coin:
#print(direction + ' mm ' + coin)
except:
PrintException()
import sys
print(0)
##sleep(1)
#sleep(7)
doCalc()
##sleep(1)
##sleep(20)
#updateBalance()
##sleep(5)
#doupdates()
##sleep(35)
startnum = 0
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import copy
import importlib
import importlib.util
import json
import multiprocessing
import os
from datetime import datetime, timedelta
from glob import glob
from unittest import mock
import pendulum
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from airflow.exceptions import SerializationError
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.mappedoperator import MappedOperator
from airflow.models.param import Param, ParamsDict
from airflow.models.xcom import XCOM_RETURN_KEY, XCom
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import (
SerializedBaseOperator,
SerializedDAG,
SerializedTaskGroup,
)
from airflow.timetables.simple import NullTimetable, OnceTimetable
from airflow.utils import timezone
from airflow.utils.context import Context
from airflow.utils.operator_resources import Resources
from airflow.utils.task_group import TaskGroup
from tests.test_utils.mock_operators import CustomOperator, GoogleLink, MockOperator
from tests.test_utils.timetables import CustomSerializationTimetable, cron_timetable, delta_timetable
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_ext": ['.sh', '.bash'],
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_ext": [],
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
},
],
"schedule_interval": {"__type": "timedelta", "__var": 86400.0},
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
"edge_info": {},
"dag_dependencies": [],
"params": {},
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
CUSTOM_TIMETABLE_SERIALIZED = {
"__type": "tests.test_utils.timetables.CustomSerializationTimetable",
"__var": {"value": "foo"},
}
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: f'Hello {name}'},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def get_timetable_based_simple_dag(timetable):
"""Create a simple_dag variant that uses timetable instead of schedule_interval."""
dag = collect_dags(["airflow/example_dags"])["simple_dag"]
dag.timetable = timetable
dag.schedule_interval = timetable.summary
return dag
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
@pytest.fixture()
def timetable_plugin(monkeypatch):
"""Patch plugins manager to always and only return our custom timetable."""
from airflow import plugins_manager
monkeypatch.setattr(plugins_manager, "initialize_timetables_plugins", lambda: None)
monkeypatch.setattr(
plugins_manager,
"timetable_classes",
{"tests.test_utils.timetables.CustomSerializationTimetable": CustomSerializationTimetable},
)
class TestStringifiedDAGs:
"""Unit tests for stringified DAGs."""
def setup_method(self):
self.backup_base_hook_get_connection = BaseHook.get_connection
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None
def teardown_method(self):
BaseHook.get_connection = self.backup_base_hook_get_connection
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
@pytest.mark.parametrize(
"timetable, serialized_timetable",
[
(
cron_timetable("0 0 * * *"),
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "0 0 * * *", "timezone": "UTC"},
},
),
(
CustomSerializationTimetable("foo"),
CUSTOM_TIMETABLE_SERIALIZED,
),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_serialization_to_timetable(self, timetable, serialized_timetable):
"""Verify a timetable-backed schedule_interval is excluded in serialization."""
dag = get_timetable_based_simple_dag(timetable)
serialized_dag = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(serialized_dag)
expected = copy.deepcopy(serialized_simple_dag_ground_truth)
del expected["dag"]["schedule_interval"]
expected["dag"]["timetable"] = serialized_timetable
self.validate_serialized_dag(serialized_dag, expected)
def test_dag_serialization_unregistered_custom_timetable(self):
"""Verify serialization fails without timetable registration."""
dag = get_timetable_based_simple_dag(CustomSerializationTimetable("bar"))
with pytest.raises(SerializationError) as ctx:
SerializedDAG.to_dict(dag)
message = (
"Failed to serialize DAG 'simple_dag': Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, DAG)
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
@pytest.mark.parametrize(
"timetable",
[cron_timetable("0 0 * * *"), CustomSerializationTimetable("foo")],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_roundtrip_from_timetable(self, timetable):
"""Verify a timetable-backed serialization can be deserialized."""
dag = get_timetable_based_simple_dag(timetable)
roundtripped = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(roundtripped, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually.
'timetable',
'timezone',
# Need to check fields in it, to exclude functions.
'default_args',
"_task_group",
'params',
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timetable.summary == dag.timetable.summary
assert serialized_dag.timetable.serialize() == dag.timetable.serialize()
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
# Every task should have a task_group property -- even if it's the DAG's root task group
assert serialized_task.task_group
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_ext',
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
'params',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_ext) == set(task.template_ext)
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Ugly hack as some operators override params var in their init
if isinstance(task.params, ParamsDict):
assert serialized_task.params.dump() == task.params.dump()
# Check that for Deserialized task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@pytest.mark.parametrize(
"dag_start_date, task_start_date, expected_task_start_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(pendulum.datetime(2019, 8, 1, tz='UTC'), None, pendulum.datetime(2019, 8, 1, tz='UTC')),
],
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@pytest.mark.parametrize(
"dag_end_date, task_end_date, expected_task_end_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
],
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@pytest.mark.parametrize(
"serialized_timetable, expected_timetable",
[
({"__type": "airflow.timetables.simple.NullTimetable", "__var": {}}, NullTimetable()),
(
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "@weekly", "timezone": "UTC"},
},
cron_timetable("0 0 * * 0"),
),
({"__type": "airflow.timetables.simple.OnceTimetable", "__var": {}}, OnceTimetable()),
(
{
"__type": "airflow.timetables.interval.DeltaDataIntervalTimetable",
"__var": {"delta": 86400.0},
},
delta_timetable(timedelta(days=1)),
),
(CUSTOM_TIMETABLE_SERIALIZED, CustomSerializationTimetable("foo")),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_deserialization_timetable(
self,
serialized_timetable,
expected_timetable,
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": serialized_timetable,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
def test_deserialization_timetable_unregistered(self):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": CUSTOM_TIMETABLE_SERIALIZED,
},
}
SerializedDAG.validate_schema(serialized)
with pytest.raises(ValueError) as ctx:
SerializedDAG.from_dict(serialized)
message = (
"Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
@pytest.mark.parametrize(
"serialized_schedule_interval, expected_timetable",
[
(None, NullTimetable()),
("@weekly", cron_timetable("0 0 * * 0")),
("@once", OnceTimetable()),
(
{"__type": "timedelta", "__var": 86400.0},
delta_timetable(timedelta(days=1)),
),
],
)
def test_deserialization_schedule_interval(
self,
serialized_schedule_interval,
expected_timetable,
):
"""Test DAGs serialized before 2.2 can be correctly deserialized."""
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
@pytest.mark.parametrize(
"val, expected",
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
],
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
assert serialized == expected
round_tripped = SerializedDAG._deserialize(serialized)
assert val == round_tripped
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
({"param_1": {1, 2, 3}}, {"param_1": {1, 2, 3}}),
],
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag_json = SerializedDAG.to_json(dag)
serialized_dag = json.loads(serialized_dag_json)
assert "params" in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params.dump()
assert expected_val == deserialized_simple_task.params.dump()
def test_invalid_params(self):
"""
Test to make sure that only native Param objects are being passed as dag or task params
"""
class S3Param(Param):
def __init__(self, path: str):
schema = {"type": "string", "pattern": r"s3:\/\/(.+?)\/(.+)"}
super().__init__(default=path, schema=schema)
dag = DAG(dag_id='simple_dag', params={'path': S3Param('s3://my_bucket/my_path')})
with pytest.raises(SerializationError):
SerializedDAG.to_dict(dag)
dag = DAG(dag_id='simple_dag')
BaseOperator(
task_id='simple_task',
dag=dag,
start_date=datetime(2019, 8, 1),
params={'path': S3Param('s3://my_bucket/my_path')},
)
@pytest.mark.parametrize(
'param',
[
Param('my value', description='hello', schema={'type': 'string'}),
Param('my value', description='hello'),
Param(None, description=None),
],
)
def test_full_param_roundtrip(self, param):
"""
Test to make sure that only native Param objects are being passed as dag or task params
"""
dag = DAG(dag_id='simple_dag', params={'my_param': param})
serialized_json = SerializedDAG.to_json(dag)
serialized = json.loads(serialized_json)
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.params["my_param"] == param.value
observed_param = dag.params.get_param('my_param')
assert isinstance(observed_param, Param)
assert observed_param.description == param.description
assert observed_param.schema == param.schema
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
({"param_1": {1, 2, 3}}, {"param_1": {1, 2, 3}}),
],
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params.dump()
@pytest.mark.parametrize(
("bash_command", "serialized_links", "links"),
[
pytest.param(
"true",
[{'tests.test_utils.mock_operators.CustomOpLink': {}}],
{"Google Custom": "http://google.com/custom_base_link?search=true"},
id="non-indexed-link",
),
pytest.param(
["echo", "true"],
[
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
],
{
"BigQuery Console #1": "https://console.cloud.google.com/bigquery?j=echo",
"BigQuery Console #2": "https://console.cloud.google.com/bigquery?j=true",
},
id="multiple-indexed-links",
),
],
)
def test_extra_serialized_field_and_operator_links(
self, bash_command, serialized_links, links, dag_maker
):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
If CustomOperator is called with a string argument for bash_command it
has a single link, if called with an array it has one link per element.
We use this to test the serialization of link data.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
with dag_maker(dag_id='simple_dag', start_date=test_date) as dag:
CustomOperator(task_id='simple_task', bash_command=bash_command)
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == bash_command
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == serialized_links
# Test all the extra_links are set
assert set(simple_task.extra_links) == {*links, 'airflow', 'github', 'google'}
dr = dag_maker.create_dagrun(execution_date=test_date)
(ti,) = dr.task_instances
XCom.set(
key='search_query',
value=bash_command,
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
run_id=dr.run_id,
)
# Test Deserialized inbuilt link
for name, expected in links.items():
link = simple_task.get_extra_links(ti, name)
assert link == expected
# Test Deserialized link registered via Airflow Plugin
link = simple_task.get_extra_links(ti, GoogleLink.name)
assert "https://www.google.com" == link
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self, caplog):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context: Context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with caplog.at_level("ERROR", logger="airflow.serialization.serialized_objects"):
SerializedDAG.from_dict(serialized_dag)
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' not registered"
)
assert expected_err_msg in caplog.text
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@pytest.mark.parametrize(
"templated_field, expected_field",
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
],
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {
"is_subdag",
"tasks",
"has_on_success_callback",
"has_on_failure_callback",
"dag_dependencies",
"params",
}
keys_for_backwards_compat: set = {
"_concurrency",
}
dag_params: set = set(dag_schema.keys()) - ignored_keys - keys_for_backwards_compat
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = {k: v for (k, v) in vars(base_operator).items() if k in BaseOperator.get_serialized_fields()}
assert fields == {
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_pre_execute_hook': None,
'_post_execute_hook': None,
'depends_on_past': False,
'downstream_task_ids': set(),
'do_xcom_push': True,
'doc': None,
'doc_json': None,
'doc_md': None,
'doc_rst': None,
'doc_yaml': None,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'execution_timeout': None,
'executor_config': {},
'max_active_tis_per_dag': None,
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
}, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_operator_deserialize_old_names(self):
blob = {
"task_id": "custom_task",
"_downstream_task_ids": ['foo'],
"template_ext": [],
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
"ui_color": "#fff",
"ui_fgcolor": "#000",
}
SerializedDAG._json_schema.validate(blob, _schema=load_dag_schema_dict()['definitions']['operator'])
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.downstream_task_ids == {'foo'}
def test_task_resources(self):
"""
Test task resources serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
execution_date = datetime(2020, 1, 1)
task_id = 'task1'
with DAG("test_task_resources", start_date=execution_date) as dag:
task = DummyOperator(task_id=task_id, resources={"cpus": 0.1, "ram": 2048})
SerializedDAG.validate_schema(SerializedDAG.to_dict(dag))
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
deserialized_task = json_dag.get_task(task_id)
assert deserialized_task.resources == task.resources
assert isinstance(deserialized_task.resources, Resources)
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_deps_sorted(self):
"""
Tests serialize_operator, make sure the deps is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskSensor
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_deps_sorted", start_date=execution_date) as dag:
task1 = ExternalTaskSensor(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
task2 = DummyOperator(task_id="task2")
task1 >> task2
serialize_op = SerializedBaseOperator.serialize_operator(dag.task_dict["task1"])
deps = serialize_op["deps"]
assert deps == [
'airflow.ti_deps.deps.not_in_retry_period_dep.NotInRetryPeriodDep',
'airflow.ti_deps.deps.not_previously_skipped_dep.NotPreviouslySkippedDep',
'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep',
'airflow.ti_deps.deps.ready_to_reschedule.ReadyToRescheduleDep',
'airflow.ti_deps.deps.trigger_rule_dep.TriggerRuleDep',
]
def test_task_group_sorted(self):
"""
Tests serialize_task_group, make sure the list is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.serialization.serialized_objects import SerializedTaskGroup
"""
start
╱ ╲
╱ ╲
task_group_up1 task_group_up2
(task_up1) (task_up2)
╲ ╱
task_group_middle
(task_middle)
╱ ╲
task_group_down1 task_group_down2
(task_down1) (task_down2)
╲ ╱
╲ ╱
end
"""
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_task_group_sorted", start_date=execution_date) as dag:
start = DummyOperator(task_id="start")
with TaskGroup("task_group_up1") as task_group_up1:
_ = DummyOperator(task_id="task_up1")
with TaskGroup("task_group_up2") as task_group_up2:
_ = DummyOperator(task_id="task_up2")
with TaskGroup("task_group_middle") as task_group_middle:
_ = DummyOperator(task_id="task_middle")
with TaskGroup("task_group_down1") as task_group_down1:
_ = DummyOperator(task_id="task_down1")
with TaskGroup("task_group_down2") as task_group_down2:
_ = DummyOperator(task_id="task_down2")
end = DummyOperator(task_id='end')
start >> task_group_up1
start >> task_group_up2
task_group_up1 >> task_group_middle
task_group_up2 >> task_group_middle
task_group_middle >> task_group_down1
task_group_middle >> task_group_down2
task_group_down1 >> end
task_group_down2 >> end
task_group_middle_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_middle"]
)
upstream_group_ids = task_group_middle_dict["upstream_group_ids"]
assert upstream_group_ids == ['task_group_up1', 'task_group_up2']
upstream_task_ids = task_group_middle_dict["upstream_task_ids"]
assert upstream_task_ids == ['task_group_up1.task_up1', 'task_group_up2.task_up2']
downstream_group_ids = task_group_middle_dict["downstream_group_ids"]
assert downstream_group_ids == ['task_group_down1', 'task_group_down2']
task_group_down1_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_down1"]
)
downstream_task_ids = task_group_down1_dict["downstream_task_ids"]
assert downstream_task_ids == ['end']
def test_edge_info_serialization(self):
"""
Tests edge_info serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.edgemodifier import Label
with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag:
task1 = DummyOperator(task_id="task1")
task2 = DummyOperator(task_id="task2")
task1 >> Label("test label") >> task2
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.edge_info == dag.edge_info
@pytest.mark.parametrize("mode", ["poke", "reschedule"])
def test_serialize_sensor(self, mode):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context: Context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
assert "deps" in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
@pytest.mark.parametrize(
"passed_success_callback, expected_value",
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@pytest.mark.parametrize(
"passed_failure_callback, expected_value",
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@pytest.mark.parametrize(
"object_to_serialized, expected_output",
[
(
['task_1', 'task_5', 'task_2', 'task_4'],
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{'task_1', 'task_5', 'task_2', 'task_4'},
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
('task_1', 'task_5', 'task_2', 'task_4'),
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{
"staging_schema": [
{"key:": "foo", "value": "bar"},
{"key:": "this", "value": "that"},
"test_conf",
]
},
{
"staging_schema": [
{"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
{
"__type": "dict",
"__var": {"key:": "this", "value": "that"},
},
"test_conf",
]
},
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
(
('task_1', 'task_5', 'task_2', 3, ["x", "y"]),
['task_1', 'task_5', 'task_2', 3, ["x", "y"]],
),
],
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Sets are sorted while list and tuple preserve order"""
serialized_obj = SerializedDAG._serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_params_upgrade(self):
"""when pre-2.2.0 param (i.e. primitive) is deserialized we convert to Param"""
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": '/path/to/file.py',
"tasks": [],
"timezone": "UTC",
"params": {"none": None, "str": "str", "dict": {"a": "b"}},
},
}
dag = SerializedDAG.from_dict(serialized)
assert dag.params["none"] is None
assert isinstance(dag.params.get_param("none"), Param)
assert dag.params["str"] == "str"
def test_params_serialize_default_2_2_0(self):
"""In 2.0.0, param ``default`` was assumed to be json-serializable objects and were not run though
the standard serializer function. In 2.2.2 we serialize param ``default``. We keep this
test only to ensure that params stored in 2.2.0 can still be parsed correctly."""
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": '/path/to/file.py',
"tasks": [],
"timezone": "UTC",
"params": {"str": {"__class": "airflow.models.param.Param", "default": "str"}},
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert isinstance(dag.params.get_param("str"), Param)
assert dag.params["str"] == "str"
def test_params_serialize_default(self):
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": '/path/to/file.py',
"tasks": [],
"timezone": "UTC",
"params": {
"my_param": {
"default": "a string value",
"description": "hello",
"schema": {"__var": {"type": "string"}, "__type": "dict"},
"__class": "airflow.models.param.Param",
}
},
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.params["my_param"] == "a string value"
param = dag.params.get_param('my_param')
assert isinstance(param, Param)
assert param.description == 'hello'
assert param.schema == {'type': 'string'}
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
def test_mapped_operator_serde():
literal = [1, 2, {'a': 'b'}]
real_op = BashOperator.partial(task_id='a', executor_config={'dict': {'sub': 'value'}}).expand(
bash_command=literal
)
serialized = SerializedBaseOperator._serialize(real_op)
assert serialized == {
'_is_dummy': False,
'_is_mapped': True,
'_task_module': 'airflow.operators.bash',
'_task_type': 'BashOperator',
'downstream_task_ids': [],
'mapped_kwargs': {
'bash_command': [
1,
2,
{"__type": "dict", "__var": {'a': 'b'}},
]
},
'partial_kwargs': {
'executor_config': {
'__type': 'dict',
'__var': {
'dict': {"__type": "dict", "__var": {'sub': 'value'}},
},
},
},
'task_id': 'a',
'operator_extra_links': [],
'template_fields': ['bash_command', 'env'],
'template_ext': ['.sh', '.bash'],
'ui_color': '#f0ede4',
'ui_fgcolor': '#000',
}
op = SerializedBaseOperator.deserialize_operator(serialized)
assert isinstance(op, MappedOperator)
assert op.deps is MappedOperator.deps_for(BaseOperator)
assert op.operator_class == "airflow.operators.bash.BashOperator"
assert op.mapped_kwargs['bash_command'] == literal
assert op.partial_kwargs['executor_config'] == {'dict': {'sub': 'value'}}
def test_mapped_operator_xcomarg_serde():
from airflow.models.xcom_arg import XComArg
with DAG("test-dag", start_date=datetime(2020, 1, 1)) as dag:
task1 = BaseOperator(task_id="op1")
mapped = MockOperator.partial(task_id='task_2').expand(arg2=XComArg(task1))
serialized = SerializedBaseOperator._serialize(mapped)
assert serialized == {
'_is_dummy': False,
'_is_mapped': True,
'_task_module': 'tests.test_utils.mock_operators',
'_task_type': 'MockOperator',
'downstream_task_ids': [],
'mapped_kwargs': {'arg2': {'__type': 'xcomref', '__var': {'task_id': 'op1', 'key': 'return_value'}}},
'partial_kwargs': {},
'task_id': 'task_2',
'template_fields': ['arg1', 'arg2'],
'template_ext': [],
'operator_extra_links': [],
'ui_color': '#fff',
'ui_fgcolor': '#000',
}
op = SerializedBaseOperator.deserialize_operator(serialized)
assert op.deps is MappedOperator.deps_for(BaseOperator)
arg = op.mapped_kwargs['arg2']
assert arg.task_id == 'op1'
assert arg.key == XCOM_RETURN_KEY
serialized_dag: DAG = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
xcom_arg = serialized_dag.task_dict['task_2'].mapped_kwargs['arg2']
assert isinstance(xcom_arg, XComArg)
assert xcom_arg.operator is serialized_dag.task_dict['op1']
def test_task_resources_serde():
"""
Test task resources serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
execution_date = datetime(2020, 1, 1)
task_id = 'task1'
with DAG("test_task_resources", start_date=execution_date) as _:
task = DummyOperator(task_id=task_id, resources={"cpus": 0.1, "ram": 2048})
serialized = SerializedBaseOperator._serialize(task)
assert serialized['resources'] == {
"cpus": {"name": "CPU", "qty": 0.1, "units_str": "core(s)"},
"disk": {"name": "Disk", "qty": 512, "units_str": "MB"},
"gpus": {"name": "GPU", "qty": 0, "units_str": "gpu(s)"},
"ram": {"name": "RAM", "qty": 2048, "units_str": "MB"},
}
def test_mapped_decorator_serde():
from airflow.decorators import task
from airflow.models.xcom_arg import XComArg
from airflow.serialization.serialized_objects import _XComRef
with DAG("test-dag", start_date=datetime(2020, 1, 1)) as dag:
op1 = BaseOperator(task_id="op1")
@task(retry_delay=30)
def x(arg1, arg2, arg3):
print(arg1, arg2, arg3)
x.partial(arg1=[1, 2, {"a": "b"}]).expand(arg2={"a": 1, "b": 2}, arg3=XComArg(op1))
original = dag.get_task("x")
serialized = SerializedBaseOperator._serialize(original)
assert serialized == {
'_is_dummy': False,
'_is_mapped': True,
'_task_module': 'airflow.decorators.python',
'_task_type': '_PythonDecoratedOperator',
'downstream_task_ids': [],
'partial_kwargs': {
'op_args': [],
'op_kwargs': {'arg1': [1, 2, {"__type": "dict", "__var": {'a': 'b'}}]},
'retry_delay': {'__type': 'timedelta', '__var': 30.0},
},
'mapped_kwargs': {},
'mapped_op_kwargs': {
'arg2': {"__type": "dict", "__var": {'a': 1, 'b': 2}},
'arg3': {'__type': 'xcomref', '__var': {'task_id': 'op1', 'key': 'return_value'}},
},
'operator_extra_links': [],
'ui_color': '#ffefeb',
'ui_fgcolor': '#000',
'task_id': 'x',
'template_ext': [],
'template_fields': ['op_args', 'op_kwargs'],
}
deserialized = SerializedBaseOperator.deserialize_operator(serialized)
assert isinstance(deserialized, MappedOperator)
assert deserialized.deps is MappedOperator.deps_for(BaseOperator)
assert deserialized.upstream_task_ids == set()
assert deserialized.downstream_task_ids == set()
assert deserialized.mapped_op_kwargs == {
"arg2": {"a": 1, "b": 2},
"arg3": _XComRef("op1", XCOM_RETURN_KEY),
}
assert deserialized.partial_kwargs == {
"op_args": [],
"op_kwargs": {"arg1": [1, 2, {"a": "b"}]},
"retry_delay": timedelta(seconds=30),
}
def test_mapped_task_group_serde():
execution_date = datetime(2020, 1, 1)
literal = [1, 2, {'a': 'b'}]
with DAG("test", start_date=execution_date) as dag:
with TaskGroup("process_one", dag=dag).expand(literal) as process_one:
BaseOperator(task_id='one')
serialized = SerializedTaskGroup.serialize_task_group(process_one)
assert serialized == {
'_group_id': 'process_one',
'children': {'process_one.one': ('operator', 'process_one.one')},
'downstream_group_ids': [],
'downstream_task_ids': [],
'prefix_group_id': True,
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'upstream_task_ids': [],
'mapped_arg': [
1,
2,
{"__type": "dict", "__var": {'a': 'b'}},
],
}
with DAG("test", start_date=execution_date):
SerializedTaskGroup.deserialize_task_group(serialized, None, dag.task_dict)
|
resubmit.py
|
import threading
import time
from automation.ena.ena_brokering import ENASubmission
__author__ = 'Ahmed G. Ali'
experiments = '''ahmed_a7a E-MTAB-3851'''
# MAGE_8337 E-MTAB-94848373'''
def reload_experiment(dir_name, accession):
# conan = ConanPage(url=settings.CONAN_URL)
# conan.login(login_email=settings.CONAN_LOGIN_EMAIL)
# conan.unload_experiment(accession)
# job_status = retrieve_job_status(accession)
# while job_status != 'COMPLETED':
# if job_status == 'FAILED':
# break
# raise Exception('%s Unload Failed' % accession)
# time.sleep(30)
# job_status = retrieve_job_status(accession)
ena = ENASubmission(exp_dir=dir_name, accession=accession, skip_validation=True, new_alias='_resub', replace_idf=True)
ena.submit_experiment()
# conan.load_experiment(accession)
# job_status = retrieve_job_status(accession)
# while job_status != 'COMPLETED':
# if job_status == 'FAILED':
# raise Exception('Loading Failed')
# time.sleep(30)
# job_status = retrieve_job_status(accession)
def main():
threads = []
for line in experiments.split('\n'):
folder, acc = line.strip().split(' ')
t = threading.Thread(target=reload_experiment, args=(folder, acc))
threads.append(t)
t.daemon = False
running = []
while True:
while len(running) <= 3 and threads:
t = threads.pop()
t.start()
running.append(t)
time.sleep(5)
if not running:
break
for t in running:
if not t.is_alive():
print 'removing thread'
running.remove(t)
break
print 'Running Threads: ', len(running)
print 'pending Threads: ', len(threads)
time.sleep(30)
if __name__ == '__main__':
main()
|
manager.py
|
#!/usr/bin/env python3
import datetime
import importlib
import os
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import textwrap
import time
import traceback
from multiprocessing import Process
from typing import Dict, List
from common.basedir import BASEDIR
from common.spinner import Spinner
from common.text_window import TextWindow
from selfdrive.hardware import HARDWARE, EON, PC
from selfdrive.swaglog import cloudlog, add_logentries_handler
os.environ['BASEDIR'] = BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
from common.op_params import opParams
from common.travis_checker import travis
op_params = opParams()
traffic_lights = op_params.get('traffic_lights')
TOTAL_SCONS_NODES = 1040
WEBCAM = os.getenv("WEBCAM") is not None
PREBUILT = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
from common.text_window import TextWindow
if not (os.system("python3 -m pip list | grep 'scipy' ") == 0):
os.system("cd /data/openpilot/installer/scipy_installer/ && ./scipy_installer")
# Run scons
spinner = Spinner(noop=(__name__ != "__main__"))
spinner.update("0")
def build():
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else f"-j{nproc - 1}"
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
process = subprocess.check_output(['git', 'pull'])
os.system('reboot')
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
no_ui = __name__ != "__main__"
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s, noop=no_ui) as t:
t.wait_for_exit()
process = subprocess.check_output(['git', 'pull'])
os.system('reboot')
exit(1)
else:
break
if __name__ == "__main__" and not PREBUILT:
build()
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from selfdrive.hardware.eon.apk import update_apks, pm_apply_packages, start_offroad
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"trafficd": ("selfdrive/trafficd", ["./trafficd"]),
"traffic_manager": "selfdrive.trafficd.traffic_manager",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"mapd": ("selfdrive/mapd", ["./mapd.py"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
'mapd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if traffic_lights:
car_started_processes += [
'trafficd',
'traffic_manager',
]
if not PC or WEBCAM:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if EON:
car_started_processes += [
'gpsd',
'rtshield',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p, build=False):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "SConscript")) and build:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["scons", "u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# clean and retry if the build failed
cloudlog.warning("building %s failed, cleaning and retrying" % (proc, ))
subprocess.check_call(["scons", "-u", "-c", "."], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["scons", "-u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if EON:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init():
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set dongle id
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
os.environ['DONGLE_ID'] = dongle_id
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if EON:
os.chmod(BASEDIR, 0o755)
os.chmod("/dev/shm", 0o777)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
shutdownd = Process(name="shutdorwnd",target=launcher,args=("selfdrive.shutdownd",))
shutdownd.start()
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
EnableLogger = int(params.get('OpkrEnableLogger'))
if not EnableLogger:
car_started_processes.remove( 'loggerd' )
persistent_processes.remove( 'logmessaged' )
persistent_processes.remove( 'uploader' )
persistent_processes.remove( 'logcatd' )
persistent_processes.remove( 'updated' )
persistent_processes.remove( 'deleter' )
persistent_processes.remove( 'tombstoned' )
else:
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if EON:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
thermal_sock = messaging.sub_sock('thermal')
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
os.sync()
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
process_cnt = len(managed_processes)
loader_proc = []
params = Params()
spinner_text = "dashcam" if params.get("Passive")=="1" else "프로세스"
for n,p in enumerate(managed_processes):
if os.getenv("PREPAREONLY") is None:
loader_proc.append(subprocess.Popen(["./spinner",
"{0} 로딩: {1}/{2} {3}".format(spinner_text, n+1, process_cnt, p)],
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True))
prepare_managed_process(p)
# end subprocesses here to stop screen flickering
[loader_proc[pc].terminate() for pc in range(process_cnt) if loader_proc]
def main():
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "1"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("IsGeofenceEnabled", "-1"),
("LimitSetSpeed", "0"),
("LimitSetSpeedNeural", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
("IsOpenpilotViewEnabled", "0"),
("OpkrAutoShutdown", "2"),
("OpkrAutoScreenOff", "0"),
("OpkrUIBrightness", "0"),
("OpkrEnableDriverMonitoring", "1"),
("OpkrEnableLogger", "0"),
("OpkrEnableGetoffAlert", "1"),
("OpkrAutoResume", "1"),
("OpkrVariableCruise", "0"),
("OpkrLaneChangeSpeed", "60"),
("OpkrAutoLaneChangeDelay", "0"),
("OpkrSteerAngleCorrection", "0"),
("PutPrebuiltOn", "0"),
("FingerprintIssuedFix", "0"),
("LdwsCarFix", "0"),
("LateralControlMethod", "2"),
("CruiseStatemodeSelInit", "1"),
("InnerLoopGain", "30"),
("OuterLoopGain", "20"),
("TimeConstant", "10"),
("ActuatorEffectiveness", "15"),
("Scale", "1750"),
("LqrKi", "10"),
("DcGain", "30"),
("IgnoreZone", "0"),
("PidKp", "20"),
("PidKi", "40"),
("PidKf", "5"),
("CameraOffsetAdj", "60"),
("SteerRatioAdj", "140"),
("SteerActuatorDelayAdj", "35"),
("SteerRateCostAdj", "45"),
("SteerLimitTimerAdj", "40"),
("TireStiffnessFactorAdj", "85"),
("SteerMaxAdj", "380"),
("SteerMaxBaseAdj", "275"),
("SteerDeltaUpAdj", "3"),
("SteerDeltaDownAdj", "7"),
("SteerMaxvAdj", "10"),
("OpkrBatteryChargingControl", "1"),
("OpkrBatteryChargingMin", "70"),
("OpkrBatteryChargingMax", "80"),
("OpkrUiOpen", "0"),
("OpkrDriveOpen", "0"),
("OpkrTuneOpen", "0"),
("OpkrControlOpen", "0"),
("LeftCurvOffsetAdj", "0"),
("RightCurvOffsetAdj", "0"),
("DebugUi1", "0"),
("DebugUi2", "0"),
("OpkrBlindSpotDetect", "1"),
("OpkrMaxAngleLimit", "90"),
("OpkrAutoResumeOption", "1"),
("OpkrAngleOffsetSelect", "0"),
("OpkrSpeedLimitOffset", "0"),
("LimitSetSpeedCamera", "0"),
("OpkrLiveSteerRatio", "0"),
("OpkrVariableSteerMax", "1"),
("OpkrVariableSteerDelta", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if EON:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
process = subprocess.check_output(['git', 'pull'])
os.system('reboot')
raise
# manual exit because we are forked
sys.exit(0)
|
dewertokin.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Created By : https://github.com/mishnz
# Created Date: 14/01/2022
# version ='1.0'
# ---------------------------------------------------------------------------
""" DewertOkin HE150 controller module for mqtt-bed
https://github.com/karl0ss/mqtt-bed
I recently purchased a "Napp" https://napp.co.nz/ bed and mattress.
On arrival, the base is an "A.H. Beard" https://ahbeard.com/ base.
Digging into the internals the Bluetooth chips identify themselves as "Okin"
branded.
The controller unit as a whole is branded as a DewertOkin HE150
https://dewertokin.hu/termek/he-150/
The DewertOkin Android application for this is "Comfort Enhancement 2"
aka "Comfort Plus"
https://play.google.com/store/apps/details?id=com.dewertokin.comfortplus
Using this application I intercepted the Bluetooth codes.
I moved from the depreciated pygatt to bluepy due to connectivity issues.
The Bluetooth connection string for this bed uses "random" instead of
"public" like the other beds.
This module ended up being bigger than expected as the HE150 disconnects on a
lack of connectivity and on other unknown conditions.
The additional code manages a keepalive/heartbeat thread.
This module is more verbose than the others to aid in debugging.
Note: This module will work with some other "Okin"/"DewertOkin" models.
"""
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import bluepy.btle as ble
import time
import threading
class dewertokinBLEController:
def __init__(self, addr):
self.charWriteInProgress = False
self.addr = addr
self.commands = {
"Flat Preset": "040210000000",
"ZeroG Preset": "040200004000",
"TV Position": "040200003000",
"Quiet Sleep": "040200008000",
"Memory 1": "040200001000",
"Memory 2": "040200002000",
"Underlight": "040200020000",
"Lift Head": "040200000001",
"Lower Head": "040200000002",
"Lift Foot": "040200000004",
"Lower Foot": "040200000008",
# Note: Wave cycles "On High", "On Medium", "On Low", "Off"
"Wave Massage Cycle": "040280000000",
# Note: Head and Foot cycles "On Low, "On Medium", "On High", "Off"
"Head Massage Cycle": "040200000800",
"Foot Massage Cycle": "040200400000",
"Massage Off": "040202000000",
"Keepalive NOOP": "040200000000",
}
# Initialise the adapter and connect to the bed before we start waiting for messages.
self.connectBed(ble)
# Start the background polling/keepalive/heartbeat function.
thread = threading.Thread(target=self.bluetoothPoller, args=())
thread.daemon = True
thread.start()
# There seem to be a lot of conditions that cause the bed to disconnect Bluetooth.
# Here we use the value of 040200000000, which seems to be a noop.
# This lets us poll the bed, detect a disconnection and reconnect before the user notices.
def bluetoothPoller(self):
while True:
if self.charWriteInProgress is False:
try:
cmd = self.commands.get("Keepalive NOOP", None)
self.device.writeCharacteristic(0x0013, bytes.fromhex(cmd), withResponse=True)
print("Keepalive success!")
except:
print("Keepalive failed! (1/2)")
try:
# We perform a second keepalive check 0.5 seconds later before reconnecting.
time.sleep(0.5)
cmd = self.commands.get("Keepalive NOOP", None)
self.device.writeCharacteristic(0x0013, bytes.fromhex(cmd), withResponse=True)
print("Keepalive success!")
except:
# If both keepalives failed, we reconnect.
print("Keepalive failed! (2/2)")
self.connectBed(ble)
else:
# To minimise any chance of contention, we don't heartbeat if a charWrite is in progress.
print("charWrite in progress, heartbeat skipped.")
time.sleep(10)
# Separate out the bed connection to an infinite loop that can be called on init (or a communications failure).
def connectBed(self, ble):
while True:
try:
print("Attempting to connect to bed.")
self.device = ble.Peripheral(deviceAddr=self.addr, addrType='random')
print("Connected to bed.")
return
except:
pass
print("Error connecting to bed, retrying in one second.")
time.sleep(1)
# Separate out the command handling.
def sendCommand(self,name):
cmd = self.commands.get(name, None)
if cmd is None:
# print, but otherwise ignore Unknown Commands.
print("Unknown Command, ignoring.")
return
self.charWriteInProgress = True
try:
self.charWrite(cmd)
except:
print("Error sending command, attempting reconnect.")
start = time.time()
self.connectBed(ble)
end = time.time()
if ((end - start) < 5):
try:
self.charWrite(self, cmd)
except:
print("Command failed to transmit despite second attempt, dropping command.")
else:
print("Bluetooth reconnect took more than five seconds, dropping command.")
self.charWriteInProgress = False
# Separate charWrite function.
def charWrite(self, cmd):
print("Attempting to transmit command.")
self.device.writeCharacteristic(0x0013, bytes.fromhex(cmd), withResponse=True)
print("Command sent successfully.")
return
|
bbox_regression.py
|
"""
This file has functions about generating bounding box regression targets
"""
from __future__ import print_function
import numpy as np
import cv2
import threading
import multiprocessing as mp
from six.moves import queue
from bbox_transform import bbox_overlaps, nonlinear_transform
from rcnn.config import config
bbox_transform = nonlinear_transform
def compute_bbox_regression_targets(rois, overlaps, labels):
"""
given rois, overlaps, gt labels, compute bounding box regression targets
:param rois: roidb[i]['boxes'] k * 4
:param overlaps: roidb[i]['max_overlaps'] k * 1
:param labels: roidb[i]['max_classes'] k * 1
:return: targets[i][class, dx, dy, dw, dh] k * 5
"""
# Ensure ROIs are floats
rois = rois.astype(np.float, copy=False)
# Sanity check
assert len(rois) == len(overlaps), '#rois != #max_overlaps'
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
assert len(gt_inds) > 0, 'zero ground truth rois'
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= config.TRAIN.BBOX_REGRESSION_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(rois[ex_inds, :].astype(np.float32, copy=False),
rois[gt_inds, :].astype(np.float32, copy=False))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
return targets
def add_bbox_regression_targets(roidb):
"""
given roidb, add ['bbox_targets'] and normalize bounding box regression targets
:param roidb: roidb to be processed. must have gone through imdb.prepare_roidb
:return: means, std variances of targets
"""
print('add bounding box regression targets')
assert len(roidb) > 0
assert 'max_classes' in roidb[0]
num_images = len(roidb)
num_classes = config.NUM_CLASSES
for im_i in range(num_images):
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
roidb[im_i]['bbox_targets'] = compute_bbox_regression_targets(rois, max_overlaps, max_classes)
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
# use fixed / precomputed means and stds instead of empirical values
means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (num_classes, 1))
stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (num_classes, 1))
else:
# compute mean, std values
class_counts = np.zeros((num_classes, 1)) + 1e-14
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
if cls_indexes.size > 0:
class_counts[cls] += cls_indexes.size
sums[cls, :] += targets[cls_indexes, 1:].sum(axis=0)
squared_sums[cls, :] += (targets[cls_indexes, 1:] ** 2).sum(axis=0)
means = sums / class_counts
# var(x) = E(x^2) - E(x)^2
stds = np.sqrt(squared_sums / class_counts - means ** 2)
# normalized targets
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] -= means[cls, :]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] /= stds[cls, :]
return means.ravel(), stds.ravel()
def compute_mask_and_label(rois, instances, labels, ins_seg, flipped):
if isinstance(ins_seg, str):
ins_seg = cv2.imread(ins_seg, -1)
if flipped:
ins_seg = ins_seg[:, ::-1]
n_rois = rois.shape[0]
class_id = config.CLASS_ID
mask_target = np.zeros((n_rois, 28, 28), dtype=np.int8)
mask_label = np.zeros((n_rois, ), dtype=np.int8)
# YuntaoChen: hack for double resize overflow
rois[:, 0][rois[:, 0] < 0] = 0
for n in range(n_rois):
target = ins_seg[int(rois[n, 1]): int(rois[n, 3]), int(rois[n, 0]): int(rois[n, 2])]
ins_id = config.SEG_CODE * class_id[labels[n]] + instances[n]
# YuntaoChen: hack for roi less than 1px
if 0 not in target.shape:
mask = np.zeros(target.shape)
else:
mask = np.zeros((1, 1))
idx = np.where(target == ins_id)
mask[idx] = 1
mask = cv2.resize(mask, (28, 28), interpolation=cv2.INTER_NEAREST)
mask_target[n] = mask
mask_label[n] = labels[n]
return mask_target, mask_label
def compute_mask_and_label_fcn(rois, instances, labels, ins_seg, flipped):
ins_seg_lvl = []
if isinstance(ins_seg, str):
ins_seg = cv2.imread(ins_seg, -1)
ins_seg_lvl.append(ins_seg)
ins_seg_lvl.append(cv2.resize(ins_seg, dsize=None, fx=1.0/2, fy=1.0/2, interpolation=cv2.INTER_NEAREST))
ins_seg_lvl.append(cv2.resize(ins_seg, dsize=None, fx=1.0/4, fy=1.0/4, interpolation=cv2.INTER_NEAREST))
ins_seg_lvl.append(cv2.resize(ins_seg, dsize=None, fx=1.0/8, fy=1.0/8, interpolation=cv2.INTER_NEAREST))
if flipped:
for ins_seg in ins_seg_lvl:
ins_seg[...] = ins_seg[:, ::-1]
n_rois = rois.shape[0]
class_id = config.CLASS_ID
mask_target = np.zeros((n_rois, 112, 112), dtype=np.int8)
mask_label = np.zeros((n_rois, ), dtype=np.int8)
for n in range(n_rois):
x1, y1, x2, y2 = rois[n]
long_side = max(x2 - x1, y2 - y1)
if long_side <= 112:
ins_seg = ins_seg_lvl[0]
elif long_side <= 224:
ins_seg = ins_seg_lvl[1]
x1, y1, x2, y2 = x1/2, y1/2, x2/2, y2/2
elif long_side <= 448:
ins_seg = ins_seg_lvl[2]
x1, y1, x2, y2 = x1/4, y1/4, x2/4, y2/4
elif long_side <= 896:
ins_seg = ins_seg_lvl[3]
x1, y1, x2, y2 = x1/8, y1/8, x2/8, y2/8
else:
# do not handle very large instance for now
ins_seg = ins_seg_lvl[0]
x1, y1, x2, y2 = 0, 0, 0, 0
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
target = ins_seg[y1:y2, x1:x2]
new_ins_id = config.SEG_CODE * class_id[labels[n]] + instances[n]
mask = np.full(fill_value=-1, shape=(112, 112), dtype=np.int8)
mask[0:(y2-y1), 0:(x2-x1)] = 0
idx = np.where(target == new_ins_id)
mask[idx] = 1
mask_target[n] = mask
mask_label[n] = labels[n]
return mask_target, mask_label
def compute_bbox_mask_targets_and_label(rois, instances, overlaps, labels, seg, flipped, for_maskfcn):
"""
given rois, overlaps, gt labels, seg, compute bounding box mask targets
:param rois: roidb[i]['boxes'] k * 4
:param overlaps: roidb[i]['max_overlaps'] k * 1
:param labels: roidb[i]['max_classes'] k * 1
:return: targets[i][class, dx, dy, dw, dh] k * 5
"""
# Ensure ROIs are floats
rois = rois.astype(np.float32, copy=False)
# Sanity check
assert len(rois) == len(overlaps), 'number of proposal ROIs and max overlap with gt bbox does not match'
fg_indexes = np.where(overlaps >= config.TRAIN.BBOX_REGRESSION_THRESH)[0]
fg_rois = rois[fg_indexes, :]
if for_maskfcn:
mask_targets, mask_label = \
compute_mask_and_label_fcn(fg_rois, instances[fg_indexes], labels[fg_indexes], seg, flipped)
else:
mask_targets, mask_label = \
compute_mask_and_label(fg_rois, instances[fg_indexes], labels[fg_indexes], seg, flipped)
return mask_targets, mask_label, fg_indexes
def add_mask_targets(roidb, for_maskfcn=False):
"""
given roidb, add ['bbox_targets'] and normalize bounding box regression targets
:param roidb: roidb to be processed. must have gone through imdb.prepare_roidb
:return: means, std variances of targets
"""
assert len(roidb) > 0
assert 'max_classes' in roidb[0]
if for_maskfcn:
print('add bounding box mask targets for maskfcn')
else:
print('add bounding box mask targets for maskrcnn')
num_images = len(roidb)
# Multi threads processing
im_quene = queue.Queue(maxsize=0)
for im_i in range(num_images):
im_quene.put(im_i)
def process():
while not im_quene.empty():
im_i = im_quene.get()
if im_i > 0 and im_i % 500 == 0:
print("-----process img {}".format(im_i))
rois = roidb[im_i]['boxes']
max_instances = roidb[im_i]['ins_id']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
ins_seg = roidb[im_i]['ins_seg']
flipped = roidb[im_i]['flipped']
# gather masks for fore ground rois only
# masks are later reconstructed in sample_rois using
# mask_targets + mask_labels + mask_inds
roidb[im_i]['mask_targets'], roidb[im_i]['mask_labels'], roidb[im_i]['mask_inds'] = \
compute_bbox_mask_targets_and_label(rois, max_instances, max_overlaps, max_classes, ins_seg, flipped,
for_maskfcn)
threads = [threading.Thread(target=process, args=()) for _ in range(mp.cpu_count())]
for t in threads:
t.start()
for t in threads:
t.join()
def add_consistent_targets(roidb):
assert len(roidb) > 0
assert 'max_classes' in roidb[0]
num_images = len(roidb)
# Multi threads processing
im_quene = queue.Queue(maxsize=0)
for im_i in range(num_images):
im_quene.put(im_i)
def process():
while not im_quene.empty():
im_i = im_quene.get()
if im_i > 0 and im_i % 500 == 0:
print("-----process img {}".format(im_i))
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
sem_seg = roidb[im_i]['sem_seg']
flipped = roidb[im_i]['flipped']
fg_inds = np.where(max_overlaps >= config.TRAIN.BBOX_REGRESSION_THRESH)[0]
roidb[im_i]['consist_inds'] = fg_inds
# gather masks for fore ground rois only
# masks are later reconstructed in sample_rois using
# mask_targets + mask_labels + mask_inds
roidb[im_i]['consist_targets'], roidb[im_i]['consist_labels'], = \
compute_consist_mask_and_label_fcn(rois[fg_inds], max_classes[fg_inds], sem_seg, flipped)
threads = [threading.Thread(target=process, args=()) for _ in range(mp.cpu_count())]
for t in threads:
t.start()
for t in threads:
t.join()
def compute_consist_mask_and_label_fcn(rois, labels, sem_seg, flipped):
sem_seg_lvl = []
if isinstance(sem_seg, str):
sem_seg = cv2.imread(sem_seg, -1)
sem_seg_lvl.append(sem_seg)
sem_seg_lvl.append(cv2.resize(sem_seg, dsize=None, fx=1.0 / 2, fy=1.0 / 2, interpolation=cv2.INTER_NEAREST))
sem_seg_lvl.append(cv2.resize(sem_seg, dsize=None, fx=1.0 / 4, fy=1.0 / 4, interpolation=cv2.INTER_NEAREST))
sem_seg_lvl.append(cv2.resize(sem_seg, dsize=None, fx=1.0 / 8, fy=1.0 / 8, interpolation=cv2.INTER_NEAREST))
if flipped:
for sem_seg in sem_seg_lvl:
sem_seg[...] = sem_seg[:, ::-1]
n_rois = rois.shape[0]
class_id = config.CLASS_ID
mask_target = np.zeros((n_rois, 112, 112), dtype=np.int8)
mask_label = np.zeros((n_rois,), dtype=np.int8)
for n in range(n_rois):
x1, y1, x2, y2 = rois[n]
long_side = max(x2 - x1, y2 - y1)
if long_side <= 112:
sem_seg = sem_seg_lvl[0]
elif long_side <= 224:
sem_seg = sem_seg_lvl[1]
x1, y1, x2, y2 = x1 / 2, y1 / 2, x2 / 2, y2 / 2
elif long_side <= 448:
sem_seg = sem_seg_lvl[2]
x1, y1, x2, y2 = x1 / 4, y1 / 4, x2 / 4, y2 / 4
elif long_side <= 896:
sem_seg = sem_seg_lvl[3]
x1, y1, x2, y2 = x1 / 8, y1 / 8, x2 / 8, y2 / 8
else:
# do not handle very large instance for now
sem_seg = sem_seg_lvl[0]
x1, y1, x2, y2 = 0, 0, 0, 0
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
target = sem_seg[y1:y2, x1:x2]
cls_id = class_id[labels[n]]
mask = np.full(fill_value=-1, shape=(112, 112), dtype=np.int8)
mask[0:(y2 - y1), 0:(x2 - x1)] = 0
idx = np.where(target == cls_id)
mask[idx] = -1
mask_target[n] = mask
mask_label[n] = labels[n]
return mask_target, mask_label
def expand_bbox_regression_targets(bbox_targets_data, num_classes):
"""
expand from 5 to 4 * num_classes; only the right class has non-zero bbox regression targets
:param bbox_targets_data: [k * 5]
:param num_classes: number of classes
:return: bbox target processed [k * 4 num_classes]
bbox_weights ! only foreground boxes have bbox regression computation!
"""
classes = bbox_targets_data[:, 0]
# (num_rois, 4 * num_classes)
bbox_targets = np.zeros((classes.size, 4 * num_classes), dtype=np.float32)
bbox_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
# find all indexes where class is not background
indexes = np.where(classes > 0)[0]
for index in indexes:
cls = classes[index]
start = int(4 * cls)
end = start + 4
bbox_targets[index, start:end] = bbox_targets_data[index, 1:]
# each roi has 4*num_classes box targets, but only the gt class will have regression target value
bbox_weights[index, start:end] = config.TRAIN.BBOX_WEIGHTS
return bbox_targets, bbox_weights
|
test_request_safety.py
|
import threading
import asyncio
import aiohttp_jinja2
from urllib import request
from aiohttp.test_utils import unittest_run_loop
from ddtrace.pin import Pin
from ddtrace.provider import DefaultContextProvider
from ddtrace.contrib.aiohttp.patch import patch, unpatch
from ddtrace.contrib.aiohttp.middlewares import trace_app
from .utils import TraceTestCase
from ... import assert_is_measured
class TestAiohttpSafety(TraceTestCase):
"""
Ensure that if the ``AsyncioTracer`` is not properly configured,
bad traces are produced but the ``Context`` object will not
leak memory.
"""
def enable_tracing(self):
# aiohttp TestCase with the wrong context provider
trace_app(self.app, self.tracer)
patch()
Pin.override(aiohttp_jinja2, tracer=self.tracer)
self.tracer.configure(context_provider=DefaultContextProvider())
def disable_tracing(self):
unpatch()
@unittest_run_loop
@asyncio.coroutine
def test_full_request(self):
# it should create a root span when there is a handler hit
# with the proper tags
request = yield from self.client.request("GET", "/template/")
assert 200 == request.status
yield from request.text()
# the trace is created
traces = self.tracer.writer.pop_traces()
assert 1 == len(traces)
assert 2 == len(traces[0])
request_span = traces[0][0]
template_span = traces[0][1]
# request
assert_is_measured(request_span)
assert "aiohttp-web" == request_span.service
assert "aiohttp.request" == request_span.name
assert "GET /template/" == request_span.resource
# template
assert "aiohttp-web" == template_span.service
assert "aiohttp.template" == template_span.name
assert "aiohttp.template" == template_span.resource
@unittest_run_loop
@asyncio.coroutine
def test_multiple_full_request(self):
NUMBER_REQUESTS = 10
responses = []
# it should produce a wrong trace, but the Context must
# be finished
def make_requests():
url = self.client.make_url("/delayed/")
response = request.urlopen(str(url)).read().decode("utf-8")
responses.append(response)
# blocking call executed in different threads
ctx = self.tracer.get_call_context()
threads = [threading.Thread(target=make_requests) for _ in range(NUMBER_REQUESTS)]
for t in threads:
t.start()
# yield back to the event loop until all requests are processed
while len(responses) < NUMBER_REQUESTS:
yield from asyncio.sleep(0.001)
for response in responses:
assert "Done" == response
for t in threads:
t.join()
# the trace is wrong but the Context is finished
spans = self.tracer.writer.pop()
assert NUMBER_REQUESTS == len(spans)
assert 0 == len(ctx._trace)
|
feature_computation.py
|
import networkx as nx
import pandas as pd
import numpy as np
import os
import json
import h5py
import multiprocessing
import pdb
from sklearn import preprocessing
# Oth Order Features
def feature_matrix_0(disease_row, fold, disease_analyze, coexp_sim_matrix, go_genes_normalize):
disease_prob = disease_score_matrix[fold, disease_row[disease_analyze],:]
feature_mat = np.zeros((len(gene_nodes),4),dtype='float32')
feature_mat[:,0] = disease_prob
for gene in range(len(disease_prob)):
coexp_score = coexp_sim_matrix[gene]
go_score = go_genes_normalize[gene]
disease_rwr = np.delete(disease_prob, gene)
coexp_score = np.delete(coexp_score,gene)
go_score = np.delete(go_score, gene)
feature_mat[gene,1] = max(disease_rwr*coexp_score)
feature_mat[gene,2] = max(disease_rwr*go_score)
feature_mat[gene,3] = max(disease_rwr*go_score*coexp_score)
return feature_mat
# Higher Order Features
def feature_matrix_1(disease_row, fold, disease_analyze, go_coexp, coexp_go, go_2nd_order, coexp_2nd_order):
disease_prob = disease_score_matrix[fold, disease_row[disease_analyze],:]
feature_mat = list()
for gene in range(len(disease_prob)):
go_coexp_score = go_coexp[gene]
coexp_go_score = coexp_go[gene]
go_2nd_score = go_2nd_order[gene]
coexp_2nd_score = coexp_2nd_order[gene]
go_coexp_score = np.delete(go_coexp_score, gene)
coexp_go_score = np.delete(coexp_go_score,gene)
go_2nd_score = np.delete(go_2nd_score, gene)
coexp_2nd_score = np.delete(coexp_2nd_score, gene)
disease_rwr = np.delete(disease_prob, gene)
feature_mat.append(np.array([max(disease_rwr*go_coexp_score),max(disease_rwr*coexp_go_score),max(disease_rwr*go_2nd_score),max(disease_rwr*coexp_2nd_score)],dtype='float32'))
feature_mat = np.array(feature_mat, dtype='float32')
return feature_mat
# 1st Order Matrix Multiplication
def power_matrix_features(coexp_sim_matrix, go_genes_normalize):
coexp_2nd_order = np.linalg.matrix_power(coexp_sim_matrix,2)
go_2nd_order = np.linalg.matrix_power(go_genes_normalize,2)
coexp_go = np.dot(coexp_sim_matrix, go_genes_normalize)
go_coexp = np.dot(go_genes_normalize, coexp_sim_matrix)
return (go_coexp, coexp_go, go_2nd_order, coexp_2nd_order)
def parallel_disease(disease_row, disease, fold, coexp_matrix, go_genes_normalize, go_coexp, coexp_go, go_2nd_order, coexp_2nd_order):
feature_mat = feature_matrix_0(disease_row, fold, disease, coexp_matrix, go_genes_normalize)
feature_mat_2 = feature_matrix_1(disease_row, fold, disease, go_coexp, coexp_go, go_2nd_order, coexp_2nd_order)
features = np.concatenate((feature_mat, feature_mat_2), axis=1)
df = pd.DataFrame(features)
df.to_csv(feat_directory + '/' + disease + '.txt', sep='\t', header=None)
DATA_ROOT = '../CM-for-disease-gene-association/data/'
with h5py.File(DATA_ROOT + 'random_walk_score.h5' ,'r') as hf:
disease_score_matrix = hf.get('disease_probability')
disease_score_matrix = np.array(disease_score_matrix, dtype='float32')
coexp_sim_matrix = np.load(DATA_ROOT + 'Coexpression_score.npy').astype('float32')
go_sim_matrix = np.load(DATA_ROOT + 'GO_similarity.npy').astype('float32')
# Scale Matrix to range:[0,1]
min_max_go_scaler = preprocessing.MinMaxScaler()
go_sim_scaled_matrix = min_max_scaler.fit_transform(go_sim_matrix)
min_max_co_scaler = preprocessing.MinMaxScaler()
coexp_sim_scaled_matrix = min_max_scaler.fit_transform(coexp_sim_matrix)
with open(DATA_ROOT + 'disgenet/train_disease_gene.dict','r') as f:
train_disease_gene_map = json.load(f)
with open(DATA_ROOT + 'disgenet/test_disease_gene.dict','r') as f:
test_disease_gene_map = json.load(f)
with open(DATA_ROOT + 'disgenet/gene_nodes.dict','r') as f:
gene_nodes = json.load(f)
with open(DATA_ROOT + 'disgenet/disease2id.dict','r') as f:
disease2id = json.load(f)
inv_gene_nodes = {v:n for n,v in gene_nodes.items()}
feature_directory = DATA_ROOT + 'multi-modal-features/'
nm_folds = 3
print('Computing Higher Order Coexpression and GO Matrix')
go_coexp, coexp_go, go_2nd_order, coexp_2nd_order = power_matrix_features(coexp_sim_scaled_matrix, go_sim_scaled_matrix)
for fold in range(nm_folds):
feat_directory = feature_directory + feat + '/fold'+ str(fold)
if len(os.listdir(feat_directory))<len(disease2id[str(fold)]):
print('Creating Features, type: {} fold {} '.format(feat, fold))
disease_row = disease2id[str(fold)]
processes = [multiprocessing.Process(target=parallel_disease, args=(disease_row, disease, fold, coexp_matrix, go_genes_normalize, go_coexp, coexp_go, go_2nd_order, coexp_2nd_order)) for disease in disease_row]
for p in processes:
p.start()
for p in processes:
p.join()
|
datasets.py
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This dataset module supports various formats of datasets, including ImageNet, TFData,
MNIST, Cifar10/100, Manifest, MindRecord, and more. This module loads data with
high performance and parses data precisely. Some of the operations that are
provided to users to preprocess data include shuffle, batch, repeat, map, and zip.
"""
import atexit
import builtins
import glob
import json
import math
import os
import signal
import stat
import time
import uuid
import multiprocessing
from multiprocessing.pool import RUN
import queue
from enum import Enum
from functools import partial
from importlib import import_module
import sys
import threading
import copy
import weakref
import platform
import psutil
import numpy as np
from scipy.io import loadmat
from PIL import Image
import mindspore._c_dataengine as cde
from mindspore._c_expression import typing
from mindspore.common import Tensor
from mindspore import log as logger
from mindspore.parallel._ps_context import _is_role_pserver, _is_role_sched
from mindspore.parallel._utils import _get_device_num
from mindspore.dataset.engine.offload import GetOffloadModel, op_to_model
import mindspore.dataset.transforms.py_transforms as py_transforms
from . import samplers
from .iterators import DictIterator, TupleIterator, DummyIterator, check_iterator_cleanup, _set_iterator_cleanup, \
ITERATORS_LIST, _unset_iterator_cleanup
from .queue import _SharedQueue
from .validators import check_batch, check_shuffle, check_map, check_filter, check_repeat, check_skip, check_zip, \
check_rename, check_numpyslicesdataset, check_device_send, check_take, check_project, check_imagefolderdataset, \
check_mnist_cifar_dataset, check_manifestdataset, check_tfrecorddataset, check_vocdataset, check_cocodataset, \
check_celebadataset, check_minddataset, check_generatordataset, check_sync_wait, check_zip_dataset, \
check_add_column, check_textfiledataset, check_concat, check_random_dataset, check_split, \
check_bucket_batch_by_length, check_cluedataset, check_save, check_csvdataset, check_paddeddataset, \
check_tuple_iterator, check_dict_iterator, check_schema, check_to_device_send, check_flickr_dataset, \
check_sb_dataset, check_flowers102dataset, check_cityscapes_dataset, check_usps_dataset, check_div2k_dataset, \
check_sbu_dataset, check_qmnist_dataset, check_emnist_dataset, check_fake_image_dataset, check_places365_dataset, \
check_photo_tour_dataset, check_ag_news_dataset, check_dbpedia_dataset, check_lj_speech_dataset, \
check_yes_no_dataset
from ..core.config import get_callback_timeout, _init_device_info, get_enable_shared_mem, get_num_parallel_workers, \
get_prefetch_size, get_auto_offload
from ..core.datatypes import mstype_to_detype, mstypelist_to_detypelist
from ..core.validator_helpers import replace_none
from ..core.py_util_helpers import ExceptionHandler
from ..transforms.py_transforms_util import FuncWrapper
try:
context = import_module("mindspore.context")
except ModuleNotFoundError:
context = None
if platform.system().lower() == "darwin":
multiprocessing.set_start_method("fork")
class Shuffle(str, Enum):
GLOBAL: str = "global"
FILES: str = "files"
INFILE: str = "infile"
ShuffleToShuffleMode = {Shuffle.FILES: cde.ShuffleMode.FILES,
Shuffle.GLOBAL: cde.ShuffleMode.GLOBAL,
Shuffle.INFILE: cde.ShuffleMode.INFILE}
def get_offloadable_ops(operations):
"""
Check if operations are supported by offload hardware accelerator.
Args:
operations: list of operations.
Returns:
Dictionary with boolean key for each operation for offload support.
"""
is_offloadable = {}
if not isinstance(operations, list):
operations = [operations]
for op in operations:
name = op.__class__.__name__
if name in op_to_model:
is_offloadable[name] = True
else:
is_offloadable[name] = False
return is_offloadable
def check_offload_map(operations, output_columns):
"""
Check if operations are supported by offload hardware accelerator. If not, see if list of operations can be split
into two: not offload supported and offload supported
Args:
operations: list of operations.
output_columns: list of names assigned to the columns outputted by the last operation.
Returns:
bool, indicates whether to use offload hardware accelarator.
bool, indicates whether list of map operations can be split.
list, first group of non-offload supported operations.
list, second group of offload supported operations.
"""
offloadable_ops = get_offloadable_ops(operations)
offload = True
can_split = False
offload_ops = []
non_offload_ops = []
invalid_ops = []
for op in offloadable_ops:
if offloadable_ops[op] is not True:
offload = False
invalid_ops.append(op)
if not offload:
logger.warning(("In map(), offload is set to True, but offload is not supported for the following "
"operation(s): {}").format(*invalid_ops))
if output_columns:
# Cannot split (currently), unsure which side of operations would alter the output columns
logger.warning("Since output_columns is specified, the list of operations cannot be split. "
"Unsure which operation(s) alter the columns. Setting offload to False.")
else:
# See if the map operator can be split and then offloaded
size = len(offloadable_ops)
idx = size
split_idx = size
op_names = list(offloadable_ops.keys())
for op_name in reversed(op_names):
if not offloadable_ops[op_name]:
# From reverse order, this op cannot be offloaded, therefore split here.
split_idx = idx
break
idx = idx - 1
if split_idx == size:
# The last op in the list cannot be offloaded, therefore nothing can be offloaded.
# Nothing to split.
logger.warning(("The last operation, {}, is not supported by offload, setting offload"
" to False").format(op_names[split_idx - 1]))
elif split_idx != 0:
# There are at least 1 offloadable ops at the end of the list.
# Split map() after the last non-offloadable op and only offload the second list of operations.
can_split = True
non_offload_ops = operations[:split_idx]
offload_ops = operations[split_idx:]
logger.warning(("The list of operations in map() can be split into two: {}, {}\n"
"The second list of operations will be run with offload=True"
).format(op_names[:split_idx], op_names[split_idx:]))
return offload, can_split, non_offload_ops, offload_ops
def shuffle_to_shuffle_mode(shuffle):
"""
Shuffle Enum to Shuffle Mode
Args:
shuffle (Shuffle): shuffle flag to shuffle mode in C layer
Returns:
ShuffleMode, shuffle mode
"""
shuffle_mode = cde.ShuffleMode.GLOBAL # Global shuffle
if not isinstance(shuffle, Shuffle):
if shuffle is None or shuffle:
shuffle_mode = cde.ShuffleMode.GLOBAL # Global shuffle
else:
shuffle_mode = cde.ShuffleMode.FALSE # No shuffle
else:
shuffle_mode = ShuffleToShuffleMode[shuffle]
return shuffle_mode
def shuffle_to_bool(shuffle):
"""
Shuffle Enum to bool
Args:
shuffle (Shuffle): shuffle flag to bool
Returns:
bool, True / False
"""
shuffle_bool = True
if not isinstance(shuffle, Shuffle):
if shuffle is None:
shuffle_bool = None
elif shuffle:
shuffle_bool = True
else:
shuffle_bool = False
else:
shuffle_bool = True
return shuffle_bool
@check_zip
def zip(datasets):
"""
Zip the datasets in the input tuple of datasets.
Args:
datasets (tuple of class Dataset): A tuple of datasets to be zipped together.
The number of datasets must be more than 1.
Returns:
ZipDataset, dataset zipped.
Raises:
ValueError: If the number of datasets is 1.
TypeError: If datasets is not a tuple.
Examples:
>>> # Create a dataset which is the combination of dataset_1 and dataset_2
>>> dataset = ds.zip((dataset_1, dataset_2))
"""
if len(datasets) <= 1:
raise ValueError(
"Can't zip empty or just one dataset!")
for dataset in datasets:
if not isinstance(dataset, Dataset):
raise TypeError("Invalid dataset, expected Dataset object, but got %s!" % type(dataset))
return ZipDataset(datasets)
def _get_operator_process():
"""
Inner implemented method, mainly for passing sub-process id in C layer
Returns:
dict, mapping dict of operator id and corresponding process id.
"""
global _OP_PROCESS
process_info = _OP_PROCESS
op_process = dict()
keys = process_info.keys()
fetched_all = True
for key in keys:
op_process[key] = list(process_info[key][1])
item_full = (len(process_info[key][1]) == process_info[key][0])
fetched_all = fetched_all and item_full
return op_process, fetched_all
def _set_dataset_permissions(file_name, num_files):
"""
set saved dataset files' permissions to 600
the rule of dataset filenames should be the same as those in C++.
"""
num_digits = len(str(num_files - 1))
if num_files == 1:
paths = [file_name]
else:
paths = ["{}{}".format(file_name, str(x).rjust(num_digits, '0')) for x in range(num_files)]
for item in paths:
if os.path.exists(item):
os.chmod(item, stat.S_IRUSR | stat.S_IWUSR)
index_file = item + ".db"
if os.path.exists(index_file):
os.chmod(index_file, stat.S_IRUSR | stat.S_IWUSR)
class Dataset:
"""
Abstract class to represent a dataset in DataEngine's data pipeline.
This class is the base class of SourceDataset and Dataset, and represents
a node in the data flow graph.
Args:
num_parallel_workers (int, optional): Number of workers to process the dataset in parallel
(default=None).
"""
def __init__(self, children=None, num_parallel_workers=None, cache=None):
# Note: children and parent are internal variables, not recommended for external using.
self.children = replace_none(children, [])
if isinstance(self.children, tuple):
self.children = list(self.children)
if not isinstance(self.children, list):
self.children = [self.children]
self.parent = []
for child in self.children:
child.parent.append(weakref.ref(self))
self.num_parallel_workers = num_parallel_workers
self.cache = cache
self._device_iter = 0
self._input_indexs = ()
self.saved_output_types = None
self.saved_output_shapes = None
self.dynamic_setting = [False, None]
self.saved_min_shapes = None
self.saved_max_shapes = None
self._col_names = None
self.dataset_size = None
self._batch_size = None
self._num_classes = None
self._repeat_count = None
self._class_indexing = None
self._sync = False
def create_ir_tree(self):
"""
Internal method to build an IR tree.
Returns:
DatasetNode, the root node of the IR tree.
Dataset, the root dataset of the IR tree.
"""
parent = self.parent
self.parent = []
dataset = copy.deepcopy(self)
global _OP_NAME
_OP_NAME = Dataset._get_operator_id(dataset)
ir_tree = dataset.parse_tree()
self.parent = parent
_init_device_info()
return ir_tree, dataset
def close_pool(self):
"""
Close multiprocessing pool in dataset. If you are familiar with multiprocessing library, you can regard this
as a destructor for a processingPool object.
"""
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.close()
for child in self.children:
child.close_pool()
def notify_watchdog(self):
"""
Close watchdog thread in dataset. Now GeneratorDataset/map/batch will use a thread named watch_dog to monitor
multiprocess, for get_dataset_size/output_shapes/output_types/get_col_name/num_classes, we need notify_watchdog
to close watch_dog thread manually.
"""
if hasattr(self, 'sample_fn') and self.sample_fn is not None:
if self.sample_fn.multi_process:
self.sample_fn._abort_watchdog() # pylint: disable=W0212
if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:
self._abort_watchdog()
for child in self.children:
child.notify_watchdog()
@staticmethod
def _get_operator_id(dataset):
"""
Internal method to iterate the tree and obtain op_id of each operator.
Returns:
Dataset, the root dataset of the tree.
"""
op_name = dict()
generator_process = dict()
op_name[str(dataset)] = 0
op_id = 1
def process_name(datasets, operator_id):
if not datasets:
return 0
temp = []
for item in datasets:
for d in item.children:
temp.append(d)
op_name[str(d)] = operator_id
if isinstance(d, GeneratorDataset) and d.sample_fn and d.sample_fn.pids:
generator_process[operator_id] = [d.num_parallel_workers, set(d.sample_fn.pids)]
operator_id = operator_id + 1
return process_name(temp, operator_id)
process_name([dataset], op_id)
if generator_process:
global _OP_PROCESS
_OP_PROCESS.update(generator_process)
return op_name
def parse_tree(self):
"""
Internal method to parse the API tree into an IR tree.
Returns:
DatasetNode, the root node of the IR tree.
"""
if len(self.parent) > 1:
raise ValueError("The data pipeline is not a tree (i.e., one node has 2 consumers)")
ir_children = [d.parse_tree() for d in self.children]
# Bootstrap can only be performed on a copy of the original dataset node.
# Bootstrap on original dataset node will make all iterators share the same process pool
self.iterator_bootstrap()
ir_node = self.parse(ir_children)
ir_node = self.post_parse(ir_node)
return ir_node
def __safe_deepcopy__(self, memodict, exclude=()):
if id(self) in memodict:
return memodict[id(self)]
cls = self.__class__
new_op = cls.__new__(cls)
memodict[id(self)] = new_op
for arg, value in self.__dict__.items():
if arg in exclude:
setattr(new_op, arg, value)
else:
try:
setattr(new_op, arg, copy.deepcopy(value, memodict))
except TypeError:
setattr(new_op, arg, value)
return new_op
def iterator_bootstrap(self):
pass
@staticmethod
def _noop_mode():
if _is_role_sched() or _is_role_pserver():
return True
return False
def __add__(self, datasets):
return self.concat(datasets)
def to_json(self, filename=""):
"""
Serialize a pipeline into JSON string and dump into file if filename is provided.
Args:
filename (str): filename of JSON file to be saved as.
Returns:
str, JSON string of the pipeline.
"""
ir_tree, _ = self.create_ir_tree()
return json.loads(ir_tree.to_json(filename))
@check_bucket_batch_by_length
def bucket_batch_by_length(self, column_names, bucket_boundaries, bucket_batch_sizes, element_length_function=None,
pad_info=None, pad_to_bucket_boundary=False, drop_remainder=False):
"""
Bucket elements according to their lengths. Each bucket will be padded and batched when
they are full.
A length function is called on each row in the dataset. The row is then
bucketed based on its length and bucket boundaries. When a bucket reaches its
corresponding size specified in bucket_batch_sizes, the entire bucket will be
padded according to pad_info, and then form a batch.
Each batch will be full, except one special case: the last batch for each bucket may not be full.
Args:
column_names (list[str]): Columns passed to element_length_function.
bucket_boundaries (list[int]): A list consisting of the upper boundaries
of the buckets. Must be strictly increasing. If there are n boundaries,
n+1 buckets are created: One bucket for [0, bucket_boundaries[0]), one
bucket for [bucket_boundaries[i], bucket_boundaries[i+1]) for each
0<i<n-1, and last bucket for [bucket_boundaries[n-1], inf).
bucket_batch_sizes (list[int]): A list consisting of the batch sizes for
each bucket. Must contain len(bucket_boundaries)+1 elements.
element_length_function (Callable, optional): A function that takes in
M arguments where M = len(column_names) and returns an integer. If no value
provided, parameter M the len(column_names) must be 1, and the size of the first
dimension of that column will be taken as the length (default=None).
pad_info (dict, optional): The information about how to batch each column. The key
corresponds to the column name, and the value must be a tuple of 2 elements.
The first element corresponds to the shape to pad to, and the second
element corresponds to the value to pad with. If a column is not
specified, then that column will be padded to the longest in the current
batch, and 0 will be used as the padding value. Any None dimensions will
be padded to the longest in the current batch, unless if
pad_to_bucket_boundary is True. If no padding is wanted, set pad_info
to None (default=None).
pad_to_bucket_boundary (bool, optional): If True, will pad each None
dimension in pad_info to the bucket_boundary minus 1. If there are any
elements that fall into the last bucket, an error will occur
(default=False).
drop_remainder (bool, optional): If True, will drop the last batch for each
bucket if it is not a full batch (default=False).
Returns:
BucketBatchByLengthDataset, dataset bucketed and batched by length.
Examples:
>>> # Create a dataset where certain counts rows are combined into a batch
>>> # and drops the last incomplete batch if there is one.
>>> import numpy as np
>>> def generate_2_columns(n):
... for i in range(n):
... yield (np.array([i]), np.array([j for j in range(i + 1)]))
>>>
>>> column_names = ["col1", "col2"]
>>> dataset = ds.GeneratorDataset(generate_2_columns(8), column_names)
>>> bucket_boundaries = [5, 10]
>>> bucket_batch_sizes = [2, 1, 1]
>>> element_length_function = (lambda col1, col2: max(len(col1), len(col2)))
>>> # Will pad col2 to shape [bucket_boundaries[i]] where i is the
>>> # index of the bucket that is currently being batched.
>>> pad_info = {"col2": ([None], -1)}
>>> pad_to_bucket_boundary = True
>>> dataset = dataset.bucket_batch_by_length(column_names, bucket_boundaries,
... bucket_batch_sizes,
... element_length_function, pad_info,
... pad_to_bucket_boundary)
"""
return BucketBatchByLengthDataset(self, column_names, bucket_boundaries, bucket_batch_sizes,
element_length_function, pad_info, pad_to_bucket_boundary, drop_remainder)
@check_batch
def batch(self, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,
input_columns=None, output_columns=None, column_order=None, pad_info=None,
python_multiprocessing=False, max_rowsize=16):
"""
Combine batch_size number of consecutive rows into batches.
For any child node, a batch is treated as a single row.
For any column, all the elements within that column must have the same shape.
If a per_batch_map callable is provided, it will be applied to the batches of tensors.
Note:
The order of using repeat and batch reflects the number of batches and per_batch_map.
It is recommended that the repeat operation applied after the batch operation finished.
Args:
batch_size (int or function): The number of rows each batch is created with. An
int or callable object which takes exactly 1 parameter, BatchInfo.
drop_remainder (bool, optional): Determines whether or not to drop the last block
whose data row number is less than batch size (default=False). If True, and if there are less
than batch_size rows available to make the last batch, then those rows will
be dropped and not propagated to the child node.
num_parallel_workers (int, optional): Number of workers(threads) to process the dataset in parallel
(default=None).
per_batch_map (callable, optional): Per batch map callable. A callable which takes
(list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represents a batch
of Tensors on a given column. The number of lists should match with number of entries in input_columns.
The last parameter of the callable should always be a BatchInfo object. Per_batch_map should return
(list[Tensor], list[Tensor], ...). The length of each list in output should be same as the input.
output_columns is required if the number of output lists is different from input.
input_columns (Union[str, list[str]], optional): List of names of the input columns. The size of the list
should match with signature of per_batch_map callable (default=None).
output_columns (Union[str, list[str]], optional): List of names assigned to the columns
outputted by the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (Union[str, list[str]], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={"col1":([224,224],0)}
would pad column with name "col1" to a tensor of size [224,224] and fill the missing with 0
(default=None).
python_multiprocessing (bool, optional): Parallelize Python function per_batch_map with multi-processing.
This option could be beneficial if the function is computational heavy (default=False).
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
Returns:
BatchDataset, dataset batched.
Examples:
>>> # Create a dataset where every 100 rows are combined into a batch
>>> # and drops the last incomplete batch if there is one.
>>> dataset = dataset.batch(100, True)
>>> # resize image according to its batch number, if it's 5-th batch, resize to (5^2, 5^2) = (25, 25)
>>> def np_resize(col, batchInfo):
... output = col.copy()
... s = (batchInfo.get_batch_num() + 1) ** 2
... index = 0
... for c in col:
... img = Image.fromarray(c.astype('uint8')).convert('RGB')
... img = img.resize((s, s), Image.ANTIALIAS)
... output[index] = np.array(img)
... index += 1
... return (output,)
>>> dataset = dataset.batch(batch_size=8, input_columns=["image"], per_batch_map=np_resize)
"""
return BatchDataset(self, batch_size, drop_remainder, num_parallel_workers, per_batch_map, input_columns,
output_columns, column_order, pad_info, python_multiprocessing, max_rowsize)
@check_sync_wait
def sync_wait(self, condition_name, num_batch=1, callback=None):
"""
Add a blocking condition to the input Dataset. A synchronize action will be applied.
Args:
condition_name (str): The condition name that is used to toggle sending next row.
num_batch (int): the number of batches without blocking at the start of each epoch.
callback (function): The callback function that will be invoked when sync_update is called.
Returns:
SyncWaitDataset, dataset added a blocking condition.
Raises:
RuntimeError: If condition name already exists.
Examples:
>>> import numpy as np
>>> def gen():
... for i in range(100):
... yield (np.array(i),)
>>>
>>> class Augment:
... def __init__(self, loss):
... self.loss = loss
...
... def preprocess(self, input_):
... return input_
...
... def update(self, data):
... self.loss = data["loss"]
>>>
>>> batch_size = 4
>>> dataset = ds.GeneratorDataset(gen, column_names=["input"])
>>>
>>> aug = Augment(0)
>>> dataset = dataset.sync_wait(condition_name="policy", callback=aug.update)
>>> dataset = dataset.map(operations=[aug.preprocess], input_columns=["input"])
>>> dataset = dataset.batch(batch_size)
>>> count = 0
>>> for data in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
... assert data["input"][0] == count
... count += batch_size
... data = {"loss": count}
... dataset.sync_update(condition_name="policy", data=data)
"""
return SyncWaitDataset(self, condition_name, num_batch, callback)
@check_shuffle
def shuffle(self, buffer_size):
"""
Randomly shuffles the rows of this dataset using the following policy:
1. Make a shuffle buffer that contains the first buffer_size rows.
2. Randomly select an element from the shuffle buffer to be the next row
propagated to the child node.
3. Get the next row (if any) from the parent node and put it in the shuffle buffer.
4. Repeat steps 2 and 3 until there are no more rows left in the shuffle buffer.
A random seed can be provided to be used on the first epoch. In every subsequent
epoch, the seed is changed to a new one, randomly generated value.
Args:
buffer_size (int): The size of the buffer (must be larger than 1) for
shuffling. Setting buffer_size equal to the number of rows in the entire
dataset will result in a global shuffle.
Returns:
ShuffleDataset, dataset shuffled.
Raises:
RuntimeError: If exist sync operators before shuffle.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Optionally set the seed for the first epoch
>>> ds.config.set_seed(58)
>>> # Create a shuffled dataset using a shuffle buffer of size 4
>>> dataset = dataset.shuffle(4)
"""
return ShuffleDataset(self, buffer_size)
def flat_map(self, func):
"""
Map `func` to each row in dataset and flatten the result.
The specified `func` is a function that must take one 'Ndarray' as input
and return a 'Dataset'.
Args:
func (function): A function that must take one 'Ndarray' as an argument and
return a 'Dataset'.
Returns:
Dataset, dataset applied by the function.
Examples:
>>> # use NumpySlicesDataset as an example
>>> dataset = ds.NumpySlicesDataset([[0, 1], [2, 3]])
>>>
>>> def flat_map_func(array):
... # create a NumpySlicesDataset with the array
... dataset = ds.NumpySlicesDataset(array)
... # repeat the dataset twice
... dataset = dataset.repeat(2)
... return dataset
>>>
>>> dataset = dataset.flat_map(flat_map_func)
>>> # [[0, 1], [0, 1], [2, 3], [2, 3]]
Raises:
TypeError: If `func` is not a function.
TypeError: If `func` doesn't return a Dataset.
"""
dataset = None
if not hasattr(func, '__call__'):
logger.critical("func must be a function.")
raise TypeError("func must be a function.")
for row_data in self.create_tuple_iterator(output_numpy=True):
if dataset is None:
dataset = func(row_data)
else:
dataset += func(row_data)
if not isinstance(dataset, Dataset):
logger.critical("flat_map must return a Dataset object.")
raise TypeError("flat_map must return a Dataset object.")
return dataset
@check_map
def map(self, operations, input_columns=None, output_columns=None, column_order=None,
num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None,
max_rowsize=16, offload=None):
"""
Apply each operation in operations to this dataset.
The order of operations is determined by the position of each operation in the operations parameter.
operations[0] will be applied first, then operations[1], then operations[2], etc.
Each operation will be passed one or more columns from the dataset as input, and zero or
more columns will be outputted. The first operation will be passed the columns specified
in input_columns as input. If there is more than one operator in operations, the outputted
columns of the previous operation are used as the input columns for the next operation.
The columns outputted by the very last operation will be assigned names specified by
output_columns.
Only the columns specified in column_order will be propagated to the child node. These
columns will be in the same order as specified in column_order.
Args:
operations (Union[list[TensorOp], list[functions]]): List of operations to be
applied on the dataset. Operations are applied in the order they appear in this list.
input_columns (Union[str, list[str]], optional): List of the names of the columns that will be passed to
the first operation as input. The size of this list must match the number of
input columns expected by the first operator. (default=None, the first
operation will be passed however many columns that are required, starting from
the first column).
output_columns (Union[str, list[str]], optional): List of names assigned to the columns outputted by
the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (list[str], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
num_parallel_workers (int, optional): Number of threads used to process the dataset in
parallel (default=None, the value from the configuration will be used).
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker processes. This
option could be beneficial if the Python operation is computational heavy (default=False).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None).
max_rowsize (int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (Default=16).
offload (bool, optional): Flag to indicate whether offload is used (Default=None).
Returns:
MapDataset, dataset after mapping operation.
Examples:
>>> # dataset is an instance of Dataset which has 2 columns, "image" and "label".
>>>
>>> # Define two operations, where each operation accepts 1 input column and outputs 1 column.
>>> decode_op = c_vision.Decode(rgb=True)
>>> random_jitter_op = c_vision.RandomColorAdjust(brightness=(0.8, 0.8), contrast=(1, 1),
... saturation=(1, 1), hue=(0, 0))
>>>
>>> # 1) Simple map example.
>>>
>>> # Apply decode_op on column "image". This column will be replaced by the outputted
>>> # column of decode_op. Since column_order is not provided, both columns "image"
>>> # and "label" will be propagated to the child node in their original order.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"])
>>>
>>> # Decode and rename column "image" to "decoded_image".
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"], output_columns=["decoded_image"])
>>>
>>> # Specify the order of the output columns.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=None, column_order=["label", "image"])
>>>
>>> # Rename column "image" to "decoded_image" and also specify the order of the output columns.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=["decoded_image"], column_order=["label", "decoded_image"])
>>>
>>> # Rename column "image" to "decoded_image" and keep only this column.
>>> dataset = dataset.map(operations=[decode_op], input_columns=["image"],
... output_columns=["decoded_image"], column_order=["decoded_image"])
>>>
>>> # A simple example for mapping pyfunc. Renaming columns and specifying column order
>>> # work in the same way as the previous examples.
>>> dataset = ds.NumpySlicesDataset(data=[[0, 1, 2]], column_names=["data"])
>>> dataset = dataset.map(operations=[(lambda x: x + 1)], input_columns=["data"])
>>>
>>> # 2) Map example with more than one operation.
>>>
>>> # Create a dataset where the images are decoded, then randomly color jittered.
>>> # decode_op takes column "image" as input and outputs one column. The column
>>> # outputted by decode_op is passed as input to random_jitter_op.
>>> # random_jitter_op will output one column. Column "image" will be replaced by
>>> # the column outputted by random_jitter_op (the very last operation). All other
>>> # columns are unchanged. Since column_order is not specified, the order of the
>>> # columns will remain the same.
>>> dataset = dataset.map(operations=[decode_op, random_jitter_op], input_columns=["image"])
>>>
>>> # Rename the column outputted by random_jitter_op to "image_mapped".
>>> # Specifying column order works in the same way as examples in 1).
>>> dataset = dataset.map(operations=[decode_op, random_jitter_op], input_columns=["image"],
... output_columns=["image_mapped"])
>>>
>>> # Map with multiple operations using pyfunc. Renaming columns and specifying column order
>>> # work in the same way as examples in 1).
>>> dataset = ds.NumpySlicesDataset(data=[[0, 1, 2]], column_names=["data"])
>>> dataset = dataset.map(operations=[(lambda x: x * x), (lambda x: x - 1)], input_columns=["data"],
... output_columns=["data_mapped"])
>>>
>>> # 3) Example where number of input columns is not equal to number of output columns.
>>>
>>> # operations[0] is a lambda that takes 2 columns as input and outputs 3 columns.
>>> # operations[1] is a lambda that takes 3 columns as input and outputs 1 column.
>>> # operations[2] is a lambda that takes 1 column as input and outputs 4 columns.
>>> #
>>> # Note: The number of output columns of operation[i] must equal the number of
>>> # input columns of operation[i+1]. Otherwise, this map call will also result
>>> # in an error.
>>> operations = [(lambda x, y: (x, x + y, x + y + 1)),
... (lambda x, y, z: x * y * z),
... (lambda x: (x % 2, x % 3, x % 5, x % 7))]
>>>
>>> # Note: Since the number of input columns is not the same as the number of
>>> # output columns, the output_columns and column_order parameters must be
>>> # specified. Otherwise, this map call will also result in an error.
>>>
>>> dataset = ds.NumpySlicesDataset(data=([[0, 1, 2]], [[3, 4, 5]]), column_names=["x", "y"])
>>>
>>> # Propagate all columns to the child node in this order:
>>> dataset = dataset.map(operations, input_columns=["x", "y"],
... output_columns=["mod2", "mod3", "mod5", "mod7"],
... column_order=["mod2", "mod3", "mod5", "mod7"])
>>>
>>> # Propagate some columns to the child node in this order:
>>> dataset = dataset.map(operations, input_columns=["x", "y"],
... output_columns=["mod2", "mod3", "mod5", "mod7"],
... column_order=["mod7", "mod3", "col2"])
"""
can_split = False
non_offload_ops = []
offload_ops = []
if offload is not None:
offload_flag = offload
else:
offload_flag = get_auto_offload()
if offload_flag:
offload_flag, can_split, non_offload_ops, offload_ops = check_offload_map(operations, output_columns)
if can_split:
non_offload_map_ds = MapDataset(self, non_offload_ops, input_columns, output_columns, column_order,
num_parallel_workers, python_multiprocessing, cache, callbacks,
max_rowsize, offload=False)
return MapDataset(non_offload_map_ds, offload_ops, input_columns, output_columns, column_order,
num_parallel_workers, python_multiprocessing, cache, callbacks, max_rowsize,
offload=True)
return MapDataset(self, operations, input_columns, output_columns, column_order, num_parallel_workers,
python_multiprocessing, cache, callbacks, max_rowsize, offload_flag)
@check_filter
def filter(self, predicate, input_columns=None, num_parallel_workers=None):
"""
Filter dataset by prediction.
Note:
If input_columns not provided or provided with empty, all columns will be used.
Args:
predicate (callable): Python callable which returns a boolean value. If False then filter the element.
input_columns (Union[str, list[str]], optional): List of names of the input columns, when
default=None, the predicate will be applied on all columns in the dataset.
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
Returns:
FilterDataset, dataset filtered.
Examples:
>>> # generator data(0 ~ 63)
>>> # filter the data that greater than or equal to 11
>>> dataset = dataset.filter(predicate=lambda data: data < 11, input_columns = ["data"])
"""
return FilterDataset(self, predicate, input_columns, num_parallel_workers)
@check_repeat
def repeat(self, count=None):
"""
Repeat this dataset `count` times. Repeat infinitely if the count is None or -1.
Note:
The order of using repeat and batch reflects the number of batches. It is recommended that
the repeat operation is used after the batch operation.
Args:
count (int): Number of times the dataset is going to be repeated (default=None).
Returns:
RepeatDataset, dataset repeated.
Examples:
>>> # dataset is an instance object of Dataset
>>>
>>> # Create a dataset where the dataset is repeated for 50 epochs
>>> dataset = dataset.repeat(50)
>>>
>>> # Create a dataset where each epoch is shuffled individually
>>> dataset = dataset.shuffle(10)
>>> dataset = dataset.repeat(50)
>>>
>>> # Create a dataset where the dataset is first repeated for
>>> # 50 epochs before shuffling. The shuffle operator will treat
>>> # the entire 50 epochs as one big dataset.
>>> dataset = dataset.repeat(50)
>>> dataset = dataset.shuffle(10)
"""
return RepeatDataset(self, count)
@check_skip
def skip(self, count):
"""
Skip the first N elements of this dataset.
Args:
count (int): Number of elements in the dataset to be skipped.
Returns:
SkipDataset, dataset that containing rows like origin rows subtract skipped rows.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Create a dataset which skips first 3 elements from data
>>> dataset = dataset.skip(3)
"""
return SkipDataset(self, count)
@check_take
def take(self, count=-1):
"""
Takes at most given numbers of elements from the dataset.
Note:
1. If count is greater than the number of elements in the dataset or equal to -1,
all the elements in dataset will be taken.
2. The order of using take and batch matters. If take is before batch operation,
then take given number of rows; otherwise take given number of batches.
Args:
count (int, optional): Number of elements to be taken from the dataset (default=-1).
Returns:
TakeDataset, dataset taken.
Examples:
>>> # dataset is an instance object of Dataset
>>> # Create a dataset where the dataset includes 50 elements.
>>> dataset = dataset.take(50)
"""
return TakeDataset(self, count)
def _get_absolute_split_sizes(self, sizes):
"""
Internal method called by split to calculate absolute split sizes and to
do some error checking after calculating absolute split sizes.
Returns:
int, absolute split sizes of the dataset.
"""
# Call get_dataset_size here and check input here because
# don't want to call this once in check_split and another time in
# here again
dataset_size = self.get_dataset_size()
if dataset_size is None or dataset_size <= 0:
raise RuntimeError("dataset_size is unknown, unable to split.")
if not isinstance(sizes, list):
raise RuntimeError("sizes must be a list.")
all_int = all(isinstance(item, int) for item in sizes)
if all_int:
sizes_sum = sum(sizes)
if sizes_sum != dataset_size:
raise RuntimeError("Sum of split sizes {} is not equal to dataset size {}."
.format(sizes_sum, dataset_size))
return sizes
absolute_sizes = []
for item in sizes:
absolute_size = int(round(item * dataset_size))
if absolute_size == 0:
raise RuntimeError("Split percentage {} is too small.".format(item))
absolute_sizes.append(absolute_size)
absolute_sizes_sum = sum(absolute_sizes)
# if we still need more rows, give them to the first split.
# if we have too many rows, remove the extras from the first split that has
# enough rows.
size_difference = int(dataset_size - absolute_sizes_sum)
if size_difference > 0:
absolute_sizes[0] += size_difference
else:
for i, _ in enumerate(absolute_sizes):
if absolute_sizes[i] + size_difference > 0:
absolute_sizes[i] += size_difference
break
if sum(absolute_sizes) != dataset_size:
raise RuntimeError("Sum of calculated split sizes {} is not equal to dataset size {}."
.format(absolute_sizes_sum, dataset_size))
return absolute_sizes
@check_split
def split(self, sizes, randomize=True):
"""
Split the dataset into smaller, non-overlapping datasets.
This is a general purpose split function which can be called from any operator in the pipeline.
There is another, optimized split function, which will be called automatically if ds.split is
called where ds is a MappableDataset.
Args:
sizes (Union[list[int], list[float]]): If a list of integers [s1, s2, …, sn] is
provided, the dataset will be split into n datasets of size s1, size s2, …, size sn
respectively. If the sum of all input sizes does not equal the original dataset size, an
error will throw.
If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1
and must sum to 1, otherwise an error will throw. The dataset will be split into n
Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the
original dataset.
If after rounding:
- Any size equals 0, an error will occur.
- The sum of split sizes < K, the difference of K - sigma(round(fi * k)) will be added to the first
split.
- The sum of split sizes > K, the difference of sigma(round(fi * K)) - K will be removed from the first
large enough split such that it will have at least 1 row after removing the difference.
randomize (bool, optional): Determines whether or not to split the data randomly (default=True).
If True, the data will be randomly split. Otherwise, each split will be created with
consecutive rows from the dataset.
Note:
1. Dataset cannot be sharded if split is going to be called.
2. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.
Shuffling the dataset may not be deterministic, which means the data in each split
will be different in each epoch.
Raises:
RuntimeError: If get_dataset_size returns None or is not supported for this dataset.
RuntimeError: If `sizes` is list of integers and sum of all elements in sizes does not
equal the dataset size.
RuntimeError: If `sizes` is list of float and there is a split with size 0 after calculations.
RuntimeError: If the dataset is sharded prior to calling split.
ValueError: If `sizes` is list of float and not all floats are between 0 and 1, or if the
floats don't sum to 1.
Returns:
tuple(Dataset), a tuple of datasets that have been split.
Examples:
>>> # TextFileDataset is not a mappable dataset, so this non-optimized split will be called.
>>> # Since many datasets have shuffle on by default, set shuffle to False if split will be called!
>>> dataset = ds.TextFileDataset(text_file_dataset_dir, shuffle=False)
>>> train_dataset, test_dataset = dataset.split([0.9, 0.1])
"""
if self.is_shuffled():
logger.warning("Dataset is shuffled before split.")
if self.is_sharded():
raise RuntimeError("Dataset should not be sharded before split.")
absolute_sizes = self._get_absolute_split_sizes(sizes)
splits = []
rows_to_skip = 0
for size in absolute_sizes:
ds = copy.deepcopy(self)
if randomize:
# want to shuffle the same way every epoch before split
# in alter_tree, shuffle buffer is minimum 10000, so use 10000 here
ds = ds.shuffle(10000)
ds.reshuffle_each_epoch = False
if rows_to_skip > 0:
ds = ds.skip(rows_to_skip)
ds = ds.take(size)
splits.append(ds)
rows_to_skip += size
return tuple(splits)
@check_zip_dataset
def zip(self, datasets):
"""
Zip the datasets in the sense of input tuple of datasets. Columns in the input datasets must have different
name.
Args:
datasets (Union[tuple, class Dataset]): A tuple of datasets or a single class Dataset
to be zipped together with this dataset.
Returns:
ZipDataset, dataset zipped.
Examples:
>>> # Create a dataset which is the combination of dataset and dataset_1
>>> dataset = dataset.zip(dataset_1)
"""
if isinstance(datasets, tuple):
datasets = (self, *datasets)
elif isinstance(datasets, Dataset):
datasets = (self, datasets)
else:
raise TypeError("Invalid datasets, expected Dataset object or tuple of Dataset, but got %s!" % datasets)
return ZipDataset(datasets)
@check_concat
def concat(self, datasets):
"""
Concatenate the dataset objects in the input list.
Performing "+" operation on dataset objects can achieve the same effect.
Note:
The column name, and rank and type of the column data must be the same in the input datasets.
Args:
datasets (Union[list, class Dataset]): A list of datasets or a single class Dataset
to be concatenated together with this dataset.
Returns:
ConcatDataset, dataset concatenated.
Examples:
>>> # Create a dataset by concatenating dataset_1 and dataset_2 with "+" operator
>>> dataset = dataset_1 + dataset_2
>>> # Create a dataset by concatenating dataset_1 and dataset_2 with concat operation
>>> dataset = dataset_1.concat(dataset_2)
"""
if isinstance(datasets, Dataset):
datasets = [self] + [datasets]
elif isinstance(datasets, list):
datasets = [self] + datasets
else:
raise TypeError("Invalid datasets, expected Dataset object or list of Dataset, but got %s!" % datasets)
return ConcatDataset(datasets)
@check_rename
def rename(self, input_columns, output_columns):
"""
Rename the columns in input datasets.
Args:
input_columns (Union[str, list[str]]): List of names of the input columns.
output_columns (Union[str, list[str]]): List of names of the output columns.
Returns:
RenameDataset, dataset renamed.
Examples:
>>> # dataset is an instance object of Dataset
>>> input_columns = ["input_col1", "input_col2", "input_col3"]
>>> output_columns = ["output_col1", "output_col2", "output_col3"]
>>>
>>> # Create a dataset where input_col1 is renamed to output_col1, and
>>> # input_col2 is renamed to output_col2, and input_col3 is renamed
>>> # to output_col3.
>>> dataset = dataset.rename(input_columns=input_columns, output_columns=output_columns)
"""
return RenameDataset(self, input_columns, output_columns)
@check_project
def project(self, columns):
"""
Project certain columns in input dataset.
The specified columns will be selected from the dataset and passed into
the pipeline with the order specified. The other columns are discarded.
Args:
columns(Union[str, list[str]]): List of names of the columns to project.
Returns:
ProjectDataset, dataset projected.
Examples:
>>> # dataset is an instance object of Dataset
>>> columns_to_project = ["column3", "column1", "column2"]
>>>
>>> # Create a dataset that consists of column3, column1, column2
>>> # in that order, regardless of the original order of columns.
>>> dataset = dataset.project(columns=columns_to_project)
"""
return ProjectDataset(self, columns)
def build_vocab(self, columns, freq_range, top_k, special_tokens, special_first):
"""
Function to create a Vocab from source dataset
Build a vocab from a dataset. This would collect all the unique words in a dataset and return a vocab
which contains top_k most frequent words (if top_k is specified)
Args:
columns(Union[str, list[str]]): Column names to get words from.
freq_range(tuple[int]): A tuple of integers (min_frequency, max_frequency). Words within the frequency
range will be stored.
Naturally 0 <= min_frequency <= max_frequency <= total_words. min_frequency/max_frequency
can be set to default, which corresponds to 0/total_words separately.
top_k(int): Number of words to be built into vocab. top_k most frequent words are
taken. The top_k is taken after freq_range. If not enough top_k, all words will be taken
special_tokens(list[str]): A list of strings, each one is a special token.
special_first(bool): Whether special_tokens will be prepended/appended to vocab, If special_tokens
is specified and special_first is set to default, special_tokens will be prepended.
Returns:
Vocab, vocab built from the dataset.
Examples:
>>> import numpy as np
>>>
>>> def gen_corpus():
... # key: word, value: number of occurrences, reason for using letters is so their order is apparent
... corpus = {"Z": 4, "Y": 4, "X": 4, "W": 3, "U": 3, "V": 2, "T": 1}
... for k, v in corpus.items():
... yield (np.array([k] * v, dtype='S'),)
>>> column_names = ["column1"]
>>> dataset = ds.GeneratorDataset(gen_corpus, column_names)
>>> dataset = dataset.build_vocab(columns=["column1"],
... freq_range=(1, 10), top_k=5,
... special_tokens=["<pad>", "<unk>"],
... special_first=True)
"""
vocab = cde.Vocab()
columns = replace_none(columns, [])
if not isinstance(columns, list):
columns = [columns]
freq_range = replace_none(freq_range, (0, 9223372036854775807))
if freq_range[0] is None:
freq_range = (0, freq_range[1])
if freq_range[1] is None:
freq_range = (freq_range[0], 9223372036854775807)
special_tokens = replace_none(special_tokens, [])
top_k = replace_none(top_k, 9223372036854775807)
ir_tree, api_tree = self.create_ir_tree()
# vocab node
vocab_node = cde.BuildVocabNode(ir_tree, vocab, columns, freq_range, top_k, special_tokens, special_first)
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
# build vocab
consumer = cde.PythonBuildVocabConsumer()
consumer.Init(vocab_node)
runtime_context.AssignConsumer(consumer)
consumer.Start()
del api_tree
return vocab
def build_sentencepiece_vocab(self, columns, vocab_size, character_coverage, model_type, params):
"""
Function to create a SentencePieceVocab from source dataset
Args:
columns(list[str]): Column names to get words from.
vocab_size(int): Vocabulary size.
character_coverage(int): Percentage of characters covered by the model, must be between
0.98 and 1.0 Good defaults are: 0.9995 for languages with rich character sets like
Japanese or Chinese character sets, and 1.0 for other languages with small character sets
like English or Latin.
model_type(SentencePieceModel): Model type. Choose from unigram (default), bpe, char, or word.
The input sentence must be pretokenized when using word type.
params(dict): Any extra optional parameters of sentencepiece library according to your raw data
Returns:
SentencePieceVocab, vocab built from the dataset.
Examples:
>>> from mindspore.dataset.text import SentencePieceModel
>>>
>>> # DE_C_INTER_SENTENCEPIECE_MODE is a mapping dict
>>> from mindspore.dataset.text.utils import DE_C_INTER_SENTENCEPIECE_MODE
>>> dataset = ds.TextFileDataset("/path/to/sentence/piece/vocab/file", shuffle=False)
>>> dataset = dataset.build_sentencepiece_vocab(["text"], 5000, 0.9995,
... DE_C_INTER_SENTENCEPIECE_MODE[SentencePieceModel.UNIGRAM],
... {})
"""
vocab = cde.SentencePieceVocab()
ir_tree, api_tree = self.create_ir_tree()
# vocab node
vocab_node = cde.BuildSentenceVocabNode(ir_tree, vocab, columns, vocab_size, character_coverage, model_type,
params)
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
# build vocab
consumer = cde.PythonBuildVocabConsumer()
consumer.Init(vocab_node)
runtime_context.AssignConsumer(consumer)
consumer.Start()
del api_tree
return vocab
def apply(self, apply_func):
"""
Apply a function in this dataset.
Args:
apply_func (function): A function that must take one 'Dataset' as an argument and
return a preprocessed 'Dataset'.
Returns:
Dataset, dataset applied by the function.
Examples:
>>> # dataset is an instance object of Dataset
>>>
>>> # Declare an apply_func function which returns a Dataset object
>>> def apply_func(data):
... data = data.batch(2)
... return data
>>>
>>> # Use apply to call apply_func
>>> dataset = dataset.apply(apply_func)
Raises:
TypeError: If apply_func is not a function.
TypeError: If apply_func doesn't return a Dataset.
"""
if not hasattr(apply_func, '__call__'):
raise TypeError("apply_func must be a function.")
dataset = apply_func(self)
if not isinstance(dataset, Dataset):
raise TypeError("apply_func must return a dataset.")
return dataset
@check_device_send
def device_que(self, send_epoch_end=True, create_data_info_queue=False):
"""
Return a transferred Dataset that transfers data through a device.
Args:
send_epoch_end (bool, optional): Whether to send end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not(default=False).
Note:
If device is Ascend, features of data will be transferred one by one. The limitation
of data transmission per time is 256M.
Returns:
TransferDataset, dataset for transferring.
"""
return self.to_device(send_epoch_end=send_epoch_end, create_data_info_queue=create_data_info_queue)
@check_device_send
def to_device(self, send_epoch_end=True, create_data_info_queue=False):
"""
Transfer data from CPU to GPU or Ascend or other devices.
Args:
send_epoch_end (bool, optional): Whether to send the end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not(default=False).
Note:
If device is Ascend, features of data will be transferred one by one. The limitation
of data transmission per second is 256M.
Returns:
TransferDataset, dataset for transferring.
Raises:
RuntimeError: If distribution file path is given but failed to read.
"""
return TransferDataset(self, send_epoch_end, create_data_info_queue)
@check_save
def save(self, file_name, num_files=1, file_type='mindrecord'):
"""
Save the dynamic data processed by the dataset pipeline in common dataset format.
Supported dataset formats: 'mindrecord' only
Implicit type casting exists when saving data as 'mindrecord'. The transform table shows how to do type casting.
.. list-table:: Implicit Type Casting when Saving as 'mindrecord'
:widths: 25 25 50
:header-rows: 1
* - Type in 'dataset'
- Type in 'mindrecord'
- Details
* - bool
- None
- Not supported
* - int8
- int32
-
* - uint8
- bytes(1D uint8)
- Drop dimension
* - int16
- int32
-
* - uint16
- int32
-
* - int32
- int32
-
* - uint32
- int64
-
* - int64
- int64
-
* - uint64
- None
- Not supported
* - float16
- float32
-
* - float32
- float32
-
* - float64
- float64
-
* - string
- string
- Multi-dimensional string not supported
Note:
1. To save the samples in order, set dataset's shuffle to False and num_files to 1.
2. Before calling the function, do not use batch operator, repeat operator or data augmentation operators
with random attribute in map operator.
3. When array dimension is variable, one-dimensional arrays or
multi-dimensional arrays with variable dimension 0 are supported.
4. Mindrecord does not support DE_UINT64, multi-dimensional DE_UINT8(drop dimension) nor
multi-dimensional DE_STRING.
Args:
file_name (str): Path to dataset file.
num_files (int, optional): Number of dataset files (default=1).
file_type (str, optional): Dataset format (default='mindrecord').
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
consumer = cde.PythonSaveToDisk(file_name, num_files, file_type)
consumer.Init(ir_tree)
runtime_context.AssignConsumer(consumer)
consumer.Save()
_set_dataset_permissions(file_name, num_files)
del api_tree
@check_tuple_iterator
def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
"""
Create an iterator over the dataset. The datatype retrieved back will be a list of ndarrays.
To specify which columns to list and the order needed, use columns_list. If columns_list
is not provided, the order of the columns will remain unchanged.
Args:
columns (list[str], optional): List of columns to be used to specify the order of columns
(default=None, means all columns).
num_epochs (int, optional): Maximum number of epochs that iterator can be iterated.
(default=-1, iterator can be iterated infinite number of epochs)
output_numpy (bool, optional): Whether or not to output NumPy datatype.
If output_numpy=False, iterator will output MSTensor (default=False).
do_copy (bool, optional): when output data type is mindspore.Tensor,
use this param to select the conversion method, only take False for better performance (default=True).
Returns:
TupleIterator, tuple iterator over the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> iterator = dataset.create_tuple_iterator()
>>> for item in iterator:
... # item is a list
... print(type(item))
... break
<class 'list'>
"""
if output_numpy is None:
output_numpy = False
if Dataset._noop_mode():
return DummyIterator(self, 'tuple')
return TupleIterator(self, columns, num_epochs, output_numpy, do_copy)
@check_dict_iterator
def create_dict_iterator(self, num_epochs=-1, output_numpy=False):
"""
Create an iterator over the dataset. The data retrieved will be a dictionary datatype.
The order of the columns in the dictionary may not be the same as the original order.
Args:
num_epochs (int, optional): Maximum number of epochs that iterator can be iterated
(default=-1, iterator can be iterated infinite number of epochs).
output_numpy (bool, optional): Whether or not to output NumPy datatype,
if output_numpy=False, iterator will output MSTensor (default=False).
Returns:
DictIterator, dictionary iterator over the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> iterator = dataset.create_dict_iterator()
>>> for item in iterator:
... # item is a dict
... print(type(item))
... break
<class 'dict'>
"""
if output_numpy is None:
output_numpy = False
if Dataset._noop_mode():
return DummyIterator(self, 'dict')
return DictIterator(self, num_epochs, output_numpy)
def __iter__(self):
"""Create an iterator over the dataset."""
return self.create_tuple_iterator(num_epochs=1)
@property
def input_indexs(self):
"""
Get Input Index Information
Returns:
tuple, tuple of the input index information.
Examples:
>>> # dataset is an instance object of Dataset
>>> # set input_indexs
>>> dataset.input_indexs = 10
>>> print(dataset.input_indexs)
10
"""
if self._input_indexs != ():
return self._input_indexs
# find input_indexes of children
children_input_index = [child.input_indexs for child in self.children]
# in case of more than one child, return the first input_indexes
for cix in children_input_index:
if cix != ():
return cix
# if all children's input_indexes are () or the node is a leaf
return self._input_indexs
@input_indexs.setter
def input_indexs(self, value):
self._input_indexs = value
def copy_batch_size(self, value):
self._batch_size = value
def _init_tree_getters(self):
"""
Get pipeline information.
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
getter = cde.TreeGetters()
getter.Init(ir_tree)
runtime_context.AssignConsumer(getter)
return getter, runtime_context, api_tree
def __init_size_getter(self):
"""
Get pipeline information.
"""
ir_tree, api_tree = self.create_ir_tree()
runtime_context = cde.PythonRuntimeContext()
runtime_context.Init()
getter = cde.DatasetSizeGetters()
getter.Init(ir_tree)
runtime_context.AssignConsumer(getter)
return getter, runtime_context, api_tree
def get_col_names(self):
"""
Return the names of the columns in dataset.
Returns:
list, list of column names in the dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> col_names = dataset.get_col_names()
"""
if self._col_names is None:
runtime_getter = self._init_tree_getters()
self._col_names = runtime_getter[0].GetColumnNames()
self.close_pool()
runtime_getter[2].notify_watchdog()
return self._col_names
def output_shapes(self):
"""
Get the shapes of output data.
Returns:
list, list of shapes of each column.
Examples:
>>> # dataset is an instance object of Dataset
>>> output_shapes = dataset.output_shapes()
"""
if self.saved_output_shapes is None:
runtime_getter = self._init_tree_getters()
self.saved_output_shapes = runtime_getter[0].GetOutputShapes()
self.saved_output_types = runtime_getter[0].GetOutputTypes()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self.dynamic_setting[0]:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_output_shapes
def output_types(self):
"""
Get the types of output data.
Returns:
list, list of data types.
Examples:
>>> # dataset is an instance object of Dataset
>>> output_types = dataset.output_types()
"""
if self.saved_output_types is None:
runtime_getter = self._init_tree_getters()
self.saved_output_shapes = runtime_getter[0].GetOutputShapes()
self.saved_output_types = runtime_getter[0].GetOutputTypes()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self.dynamic_setting[0]:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_output_types
def get_dataset_size(self):
"""
Return the number of batches in an epoch.
Returns:
int, number of batches.
Examples:
>>> # dataset is an instance object of Dataset
>>> dataset_size = dataset.get_dataset_size()
"""
if self.dataset_size is None:
runtime_getter = self.__init_size_getter()
self.dataset_size = runtime_getter[0].GetDatasetSize(False)
self.close_pool()
runtime_getter[2].notify_watchdog()
return self.dataset_size
def set_dynamic_columns(self, columns=None):
"""
Set dynamic shape information of source data, it should be set after the pipeline is defined.
Args:
columns (dict): A dict contains shape information of each column in dataset.
The value of shape[i] is :py:obj:`None` indicates that the data length of shape[i] is dynamic.
Examples:
>>> import numpy as np
>>>
>>> def generator1():
>>> for i in range(1, 100):
>>> yield np.ones((16, i, 83)), np.array(i)
>>>
>>> dataset = ds.GeneratorDataset(generator1, ["data1", "data2"])
>>> dataset.set_dynamic_columns(columns={"data1": [16, None, 83], "data2": []})
"""
if not isinstance(columns, dict):
raise TypeError("Pass a dict to set dynamic shape, example: {\"data1\": [16, None, 256]}")
self.dynamic_setting[0] = True
self.dynamic_setting[1] = columns
def dynamic_min_max_shapes(self):
"""
Get minimum and maximum data length of dynamic source data, for dynamic graph compilation.
Returns:
lists, min_shapes, max_shapes of source data.
Examples:
>>> import numpy as np
>>>
>>> def generator1():
>>> for i in range(1, 100):
>>> yield np.ones((16, i, 83)), np.array(i)
>>>
>>> dataset = ds.GeneratorDataset(generator1, ["data1", "data2"])
>>> dataset.set_dynamic_columns(columns={"data1": [16, None, 83], "data2": []})
>>> min_shapes, max_shapes = dataset.dynamic_min_max_shapes()
"""
if self.saved_min_shapes is None or self.saved_max_shapes is None:
self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes = self._dynamic_output_shapes()
return self.saved_min_shapes, self.saved_max_shapes
@staticmethod
def __check_dynamic_column_name(dynamic_columns, dataset_columns):
for column in dynamic_columns:
if column not in dataset_columns:
raise RuntimeError("dynamic column [" + column + "] does not match any column in dataset: " +
str(dataset_columns))
@staticmethod
def __check_dynamic_column_shape(data, col, dynamic_columns):
shape_mismatch = "dynamic column [" + col + "] with shape " + str(dynamic_columns[col]) + \
" does not match dataset column [" + col + "] with shape " + str(list(data[col].shape))
if data[col].ndim != len(dynamic_columns[col]):
raise RuntimeError(shape_mismatch)
for dim in range(len(dynamic_columns[col])):
if dynamic_columns[col][dim] is not None and dynamic_columns[col][dim] != data[col].shape[dim]:
raise RuntimeError(shape_mismatch)
def _dynamic_output_shapes(self):
"""
Get dynamic information of source data.
Returns:
lists, dynamic_shapes, min_shapes, max_shapes of source data.
"""
if not self.dynamic_setting[1]:
raise RuntimeError("dynamic_columns is not set, call set_dynamic_columns() by final Dataset Op.")
if self.saved_output_shapes is not None and self.saved_min_shapes is not None and \
self.saved_max_shapes is not None:
return self.saved_output_shapes, self.saved_min_shapes, self.saved_max_shapes
logger.warning("Calculating dynamic shape of input data, this will take a few minutes...")
# Assume data1 shape is dynamic, data2 shape is fix
# {"data1": [batch_size, None, feat_len], "data2": [batch_size, feat_len]}
dynamic_columns = self.dynamic_setting[1]
# ["data1", "data2"]
dataset_columns = self.get_col_names()
Dataset.__check_dynamic_column_name(dynamic_columns, dataset_columns)
# Shape[1] of data1 is variable
# {"data1": {(batch_size, 100, feat_len), (16, 200, 83)}, "data2": {(batch_size, feat_len)}}
column_shape_set = {col: set() for col in dataset_columns}
dataset_size_counter = 0
for data in self.create_dict_iterator(num_epochs=1, output_numpy=True):
dataset_size_counter += 1
for col in data.keys():
if col in dynamic_columns:
Dataset.__check_dynamic_column_shape(data, col, dynamic_columns)
column_shape_set[col].add(tuple(data[col].shape))
# we get dataset_size after dryrun
self.dataset_size = dataset_size_counter
min_shapes, max_shapes, dynamic_shapes = list(), list(), list()
for col, shape_set in column_shape_set.items():
if len(shape_set) > 1:
if col not in dynamic_columns:
raise RuntimeError("column [" + col + "] has dynamic shape but not set by set_dynamic_columns()" +
", shapes of [" + col + "]: " + str(list(shape_set)))
shape_npy = np.array(list(shape_set))
max_shape = shape_npy.max(axis=0)
min_shape = shape_npy.min(axis=0)
# Set min shape to 1 due to unknown shuffle
min_shape = np.where(np.equal(dynamic_columns[col], None), 1, min_shape)
# Set dynamic dim to -1 for ME
dynamic_shape = np.where(np.equal(dynamic_columns[col], None), -1, dynamic_columns[col])
max_shapes.append(max_shape.tolist())
min_shapes.append(min_shape.tolist())
dynamic_shapes.append(dynamic_shape.tolist())
else:
# Also append fix shape to keep order of column shape
fix_shape = list(list(shape_set)[0])
max_shapes.append(fix_shape)
min_shapes.append(fix_shape)
dynamic_shapes.append(fix_shape)
if col in dynamic_columns:
logger.warning("column [" + col + "] has no dynamic shape but set by set_dynamic_columns()")
# Set min shape to 1 due to unknown shuffle
min_shapes[-1] = np.where(np.equal(dynamic_columns[col], None), 1, fix_shape).tolist()
# Set dynamic dim to -1 for ME
dynamic_shapes[-1] = np.where(np.equal(dynamic_columns[col], None), -1, fix_shape).tolist()
return dynamic_shapes, min_shapes, max_shapes
def num_classes(self):
"""
Get the number of classes in a dataset.
Returns:
int, number of classes.
Examples:
>>> # dataset is an instance object of Dataset
>>> num_classes = dataset.num_classes()
"""
if self._num_classes is None:
runtime_getter = self._init_tree_getters()
self._num_classes = runtime_getter[0].GetNumClasses()
self.close_pool()
runtime_getter[2].notify_watchdog()
if self._num_classes == -1:
return None
return self._num_classes
def get_sync_notifiers(self):
if self.children:
return self.children[0].get_sync_notifiers()
return {}
def disable_sync(self):
if self.children:
return self.children[0].disable_sync()
return {}
def is_sync(self):
if self.children:
return self.children[0].is_sync()
return False
def sync_update(self, condition_name, num_batch=None, data=None):
"""
Release a blocking condition and trigger callback with given data.
Args:
condition_name (str): The condition name that is used to toggle sending next row.
num_batch (Union[int, None]): The number of batches (rows) that are released.
When num_batch is None, it will default to the number specified by the
sync_wait operator (default=None).
data (Any): The data passed to the callback, user defined (default=None).
"""
if (not isinstance(num_batch, int) and num_batch is not None) or \
(isinstance(num_batch, int) and num_batch <= 0):
# throwing exception, disable all sync_wait in pipeline
self.disable_sync()
raise RuntimeError("Sync_update batch size can only be positive integer, got : {}.".format(num_batch))
notifiers_dict = self.get_sync_notifiers()
if not isinstance(condition_name, str):
raise TypeError("Argument condition_name with value {} is not of type str, but got {}."
.format(condition_name, type(condition_name)))
if condition_name not in notifiers_dict:
# throwing exception, disable all sync_wait in pipeline
self.disable_sync()
raise RuntimeError("Condition name not found.")
if num_batch is not None:
num_batch *= self.get_batch_size()
notifiers_dict[condition_name](num_batch, data)
def get_batch_size(self):
"""
Return the size of batch.
Returns:
int, the number of data in a batch.
Examples:
>>> # dataset is an instance object of Dataset
>>> batch_size = dataset.get_batch_size()
"""
if self._batch_size is None:
runtime_getter = self._init_tree_getters()
self._batch_size = runtime_getter[0].GetBatchSize()
if self._batch_size is None:
self._batch_size = 1
return self._batch_size
def get_repeat_count(self):
"""
Get the replication times in RepeatDataset (default is 1).
Returns:
int, the count of repeat.
Examples:
>>> # dataset is an instance object of Dataset
>>> repeat_count = dataset.get_repeat_count()
"""
if self._repeat_count is None:
runtime_getter = self._init_tree_getters()
self._repeat_count = runtime_getter[0].GetRepeatCount()
if self._repeat_count is None:
self._repeat_count = 1
return self._repeat_count
def get_class_indexing(self):
"""
Return the class index.
Returns:
dict, a str-to-int mapping from label name to index.
dict, a str-to-list<int> mapping from label name to index for Coco ONLY. The second number
in the list is used to indicate the super category.
Examples:
>>> # dataset is an instance object of Dataset
>>> class_indexing = dataset.get_class_indexing()
"""
if self.children:
return self.children[0].get_class_indexing()
return {}
def reset(self):
"""Reset the dataset for next epoch."""
def is_shuffled(self):
"""Returns True if the dataset or its children is shuffled."""
for input_dataset in self.children:
if input_dataset.is_shuffled():
return True
return False
def is_sharded(self):
"""Returns True if the dataset or its children is sharded."""
for input_dataset in self.children:
if input_dataset.is_sharded():
return True
return False
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def post_parse(self, ir_node):
if self.cache:
ir_node = ir_node.set_cache_client(self.cache.cache_client)
if self.num_parallel_workers:
ir_node = ir_node.set_num_workers(self.num_parallel_workers)
return ir_node
class SourceDataset(Dataset):
"""
Abstract class to represent a source dataset which produces content to the data pipeline.
"""
def __init__(self, num_parallel_workers=None, num_samples=None, shuffle=True, num_shards=None, shard_id=None,
cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, cache=cache)
self.num_samples = replace_none(num_samples, 0)
self.num_shards = replace_none(num_shards, 1)
self.shard_id = replace_none(shard_id, 0)
if shuffle is not None and not isinstance(shuffle, (bool, Shuffle)):
raise TypeError("shuffle must be of boolean or enum of 'Shuffle' values like 'Shuffle.GLOBAL' or "
"'Shuffle.FILES' or 'Shuffle.INFILE'.")
self.shuffle_flag = 2 # Global shuffle
if not isinstance(shuffle, Shuffle):
if shuffle is None or shuffle:
self.shuffle_flag = 2 # Global shuffle
else:
self.shuffle_flag = 0 # No shuffle
else:
if shuffle == Shuffle.GLOBAL:
self.shuffle_flag = 2 # Global shuffle
elif shuffle == Shuffle.FILES:
self.shuffle_flag = 1 # Files shuffle
elif shuffle == Shuffle.INFILE:
self.shuffle_flag = 3 # Infile shuffle
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
@staticmethod
def _find_files(patterns):
"""
Utility function to search for files with the given glob patterns.
Args:
patterns (Union[str, list[str]]): String or list of patterns to be searched.
Returns:
list, list of files.
"""
if not isinstance(patterns, list):
patterns = [patterns]
file_list = []
unmatched_patterns = []
for pattern in patterns:
matches = [match for match in glob.glob(pattern, recursive=True) if os.path.isfile(match)]
if matches:
file_list.extend(matches)
else:
unmatched_patterns.append(pattern)
if unmatched_patterns:
raise ValueError("The following patterns did not match any files: {}.".format(unmatched_patterns))
if file_list: # not empty
return file_list
raise ValueError("The list of path names matching the patterns is empty.")
def is_shuffled(self):
return self.shuffle_flag > 0
def is_sharded(self):
if self.num_shards is not None:
return self.num_shards > 1
return False
class MappableDataset(SourceDataset):
"""
Abstract class to represent a source dataset which supports use of samplers.
"""
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def __init__(self, num_parallel_workers=None, sampler=None, num_samples=None, shuffle=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.shuffle_flag = replace_none(shuffle, True)
self.sampler = samplers.select_sampler(num_samples, sampler, shuffle, num_shards, shard_id)
def add_sampler(self, new_sampler):
"""
Add a sampler for current dataset,.
Args:
new_sampler (Sampler): The sampler to be added as the parent sampler for current dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> # use a DistributedSampler instead
>>> new_sampler = ds.DistributedSampler(10, 2)
>>> dataset.add_sampler(new_sampler)
"""
# note: By adding a sampler, the sampled IDs will flow to new_sampler
# after first passing through the current samplers attached to this dataset.
self.dataset_size = None
new_sampler.add_child(self.sampler)
self.sampler = new_sampler
def use_sampler(self, new_sampler):
"""
Make the current dataset use the new_sampler provided by other API.
Args:
new_sampler (Sampler): The sampler to use for the current dataset.
Examples:
>>> # dataset is an instance object of Dataset
>>> # use a DistributedSampler instead
>>> new_sampler = ds.DistributedSampler(10, 2)
>>> dataset.use_sampler(new_sampler)
"""
if new_sampler is None:
raise TypeError("Input sampler can not be None.")
if not isinstance(new_sampler, (samplers.BuiltinSampler, samplers.Sampler)):
raise TypeError("Input sampler is not an instance of a sampler.")
self.dataset_size = None
self.sampler = self.sampler.child_sampler
self.add_sampler(new_sampler)
def is_shuffled(self):
return self.sampler.is_shuffled()
def is_sharded(self):
return self.sampler.is_sharded()
@check_split
def split(self, sizes, randomize=True):
"""
Split the dataset into smaller, non-overlapping datasets.
Args:
sizes (Union[list[int], list[float]]): If a list of integers [s1, s2, …, sn] is
provided, the dataset will be split into n datasets of size s1, size s2, …, size sn
respectively. If the sum of all sizes does not equal the original dataset size, an
error will occur.
If a list of floats [f1, f2, …, fn] is provided, all floats must be between 0 and 1
and must sum to 1, otherwise an error will occur. The dataset will be split into n
Datasets of size round(f1*K), round(f2*K), …, round(fn*K) where K is the size of the
original dataset.
If after rounding:
- Any size equals 0, an error will occur.
- The sum of split sizes < K, the difference will be added to the first split.
- The sum of split sizes > K, the difference will be removed from the first large
enough split such that it will have at least 1 row after removing the difference.
randomize (bool, optional): Determines whether or not to split the data randomly (default=True).
If True, the data will be randomly split. Otherwise, each split will be created with
consecutive rows from the dataset.
Note:
1. There is an optimized split function, which will be called automatically when the dataset
that calls this function is a MappableDataset.
2. Dataset should not be sharded if split is going to be called. Instead, create a
DistributedSampler and specify a split to shard after splitting. If the dataset is
sharded after a split, it is strongly recommended setting the same seed in each instance
of execution, otherwise each shard may not be part of the same split (see Examples).
3. It is strongly recommended to not shuffle the dataset, but use randomize=True instead.
Shuffling the dataset may not be deterministic, which means the data in each split
will be different in each epoch. Furthermore, if sharding occurs after split, each
shard may not be part of the same split.
Raises:
RuntimeError: If get_dataset_size returns None or is not supported for this dataset.
RuntimeError: If `sizes` is list of integers and sum of all elements in sizes does not
equal the dataset size.
RuntimeError: If `sizes` is list of float and there is a split with size 0 after calculations.
RuntimeError: If the dataset is sharded prior to calling split.
ValueError: If `sizes` is list of float and not all floats are between 0 and 1, or if the
floats don't sum to 1.
Returns:
tuple(Dataset), a tuple of datasets that have been split.
Examples:
>>> # Since many datasets have shuffle on by default, set shuffle to False if split will be called!
>>> dataset = ds.ImageFolderDataset(image_folder_dataset_dir, shuffle=False)
>>>
>>> # Set the seed, and tell split to use this seed when randomizing.
>>> # This is needed because sharding will be done later
>>> ds.config.set_seed(58)
>>> train_dataset, test_dataset = dataset.split([0.9, 0.1])
>>>
>>> # To shard the train dataset, use a DistributedSampler
>>> train_sampler = ds.DistributedSampler(10, 2)
>>> train_dataset.use_sampler(train_sampler)
"""
if self.is_shuffled():
logger.warning("Dataset is shuffled before split.")
if self.is_sharded():
raise RuntimeError("Dataset should not be sharded before split.")
absolute_sizes = self._get_absolute_split_sizes(sizes)
splits = []
current_split_start_index = 0
for size in absolute_sizes:
ds = copy.deepcopy(self)
ds.dataset_size = None
if randomize:
# want to shuffle the same way every epoch before split, we are assuming
# that the user will call set_seed
random_sampler = samplers.RandomSampler()
random_sampler.reshuffle_each_epoch = False
ds.add_sampler(random_sampler)
subset_sampler = samplers.SequentialSampler(current_split_start_index, size)
ds.add_sampler(subset_sampler)
# add sequential sampler, so that if user calls use_sampler, we will
# get rid of the sequential sampler instead of something we need
ds.add_sampler(samplers.SequentialSampler())
splits.append(ds)
current_split_start_index += size
return tuple(splits)
class BucketBatchByLengthDataset(Dataset):
"""
The result of applying BucketBatchByLength operator to the input dataset.
"""
def __init__(self, input_dataset, column_names, bucket_boundaries, bucket_batch_sizes, element_length_function,
pad_info, pad_to_bucket_boundary, drop_remainder):
super().__init__(children=input_dataset)
self.column_names = to_list(column_names)
self.bucket_boundaries = replace_none(bucket_boundaries, [])
self.bucket_batch_sizes = replace_none(bucket_batch_sizes, [])
self.element_length_function = element_length_function
self.pad_info = replace_none(pad_info, {})
self.pad_to_bucket_boundary = replace_none(pad_to_bucket_boundary, False)
self.drop_remainder = replace_none(drop_remainder, False)
def parse(self, children=None):
return cde.BucketBatchByLengthNode(children[0], self.column_names, self.bucket_boundaries,
self.bucket_batch_sizes, self.element_length_function, self.pad_info,
self.pad_to_bucket_boundary, self.drop_remainder)
class BatchDataset(Dataset):
"""
The result of applying Batch operator to the input dataset.
Args:
input_dataset (Dataset): Input Dataset to be batched.
batch_size (Union[int, function]): The number of rows each batch is created with. An
int or callable which takes exactly 1 parameter, BatchInfo.
drop_remainder (bool, optional): Determines whether or not to drop the last
possibly incomplete batch (default=False). If True, and if there are less
than batch_size rows available to make the last batch, then those rows will
be dropped and not propagated to the child node.
num_parallel_workers (int, optional): Number of workers to process the dataset in parallel (default=None).
per_batch_map (callable, optional): Per batch map callable. A callable which takes
(list[Tensor], list[Tensor], ..., BatchInfo) as input parameters. Each list[Tensor] represents a batch of
Tensors on a given column. The number of lists should match with number of entries in input_columns. The
last parameter of the callable must always be a BatchInfo object.
input_columns (Union[str, list[str]], optional): List of names of the input columns. The size of the list must
match with signature of per_batch_map callable.
output_columns (Union[str, list[str]], optional): List of names assigned to the columns outputted by
the last operation. This parameter is mandatory if len(input_columns) !=
len(output_columns). The size of this list must match the number of output
columns of the last operation. (default=None, output columns will have the same
name as the input columns, i.e., the columns will be replaced).
column_order (Union[str, list[str]], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
pad_info (dict, optional): Whether to perform padding on selected columns. pad_info={"col1":([224,224],0)}
will pad column with name "col1" to a tensor of size [224,224] and fill the missing with 0.
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
"""
def __init__(self, input_dataset, batch_size, drop_remainder=False, num_parallel_workers=None, per_batch_map=None,
input_columns=None, output_columns=None, column_order=None, pad_info=None,
python_multiprocessing=False, max_rowsize=16):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers)
if BatchDataset._is_ancestor_of_repeat(input_dataset):
logger.warning("Repeat is located before batch, data from two epochs can be batched together.")
BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)
# if batch_size is callable, set batch_size to 1 and batch_size_func to that callable function
self.batch_size = batch_size if not callable(batch_size) else 1
self.batch_size_func = None if not callable(batch_size) else batch_size
self.drop_remainder = replace_none(drop_remainder, False)
self.per_batch_map = per_batch_map
self.input_columns = to_list(input_columns)
self.output_columns = to_list(output_columns)
self.column_order = to_list(column_order)
self.pad = bool(pad_info is not None)
self.pad_info = replace_none(pad_info, dict())
self.python_multiprocessing = python_multiprocessing
self.process_pool = None
self.hook = None
self.pids = []
self.eot = None
self.watch_dog = None
self.max_rowsize = max_rowsize
def parse(self, children=None):
return cde.BatchNode(children[0], self.batch_size, self.drop_remainder, self.pad, self.input_columns,
self.output_columns, self.column_order, self.batch_size_func, self.per_batch_map,
self.pad_info)
@staticmethod
def _is_ancestor_of_repeat(dataset):
"""
Utility function to find the case where repeat is used before batch.
Args:
dataset (Dataset): Dataset to be checked.
Returns:
bool, whether repeat is used before batch.
"""
if isinstance(dataset, RepeatDataset):
return True
flag = False
for input_dataset in dataset.children:
flag = flag | BatchDataset._is_ancestor_of_repeat(input_dataset)
return flag
@staticmethod
def _update_batch_size_for_syncwait(dataset, batch_size):
"""
Utility function to notify batch size to sync_wait.
Args:
dataset (Dataset): Dataset to be checked.
batch_size (int): batch size to notify.
"""
if isinstance(dataset, SyncWaitDataset):
dataset.update_sync_batch_size(batch_size)
for input_dataset in dataset.children:
BatchDataset._update_batch_size_for_syncwait(input_dataset, batch_size)
def __deepcopy__(self, memodict):
return self.__safe_deepcopy__(memodict, exclude=("per_batch_map", "batch_size_func", "__transfer_dataset__"))
# Iterator bootstrap will be called on iterator construction.
# A deep copy of Dataset object is created prior of iterator_bootstrap.
# This method will create per iterator process pool and bind pyfunc execution to the pool.
def iterator_bootstrap(self):
"""
Per iterator bootstrap callback.
"""
if self.python_multiprocessing:
if self.per_batch_map is None:
logger.warning("per_batch_map is None so python_multiprocessing does not work.")
return
arg_q_list = []
res_q_list = []
# If user didn't specify num_parallel_workers, set it to default
if self.num_parallel_workers is not None:
num_parallel = self.num_parallel_workers
else:
num_parallel = get_num_parallel_workers()
if get_enable_shared_mem():
_check_shm_usage(num_parallel, 1, self.max_rowsize * self.batch_size, 2)
for _ in range(num_parallel):
arg_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize * self.batch_size))
res_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize * self.batch_size))
# Construct pool with the callable list
# The callable list and _pyfunc_worker_init are used to pass lambda function in to subprocesses
self.process_pool = multiprocessing.Pool(processes=num_parallel,
initializer=_pyfunc_worker_init,
initargs=([self.per_batch_map], arg_q_list, res_q_list))
idx = 0
global _OP_NAME, _OP_PROCESS, _LOCK
op_id = _OP_NAME[str(self)]
process_id = {op_id: [self.num_parallel_workers, set()]}
# obtain process id from multiprocessing.pool
for pool in self.process_pool._pool: # pylint: disable=W0212
process_id[op_id][1].add(pool.pid)
self.pids.append(pool.pid)
with _LOCK:
_OP_PROCESS.update(process_id)
# Wrap per_batch_map into _PythonCallable
self.per_batch_map = _PythonCallable(self.per_batch_map, idx, self.process_pool, arg_q_list, res_q_list)
self.hook = _ExceptHookHandler()
# batch will launch a watch dog thread to monitoring sub processes
self._launch_watch_dog()
atexit.register(_mp_pool_exit_preprocess)
# If Python version greater than 3.8, we need to close ThreadPool in atexit for unclean pool teardown.
if sys.version_info >= (3, 8):
atexit.register(self.process_pool.close)
else:
if self.per_batch_map is not None:
self.per_batch_map = FuncWrapper(self.per_batch_map)
def _launch_watch_dog(self):
if platform.system().lower() != 'windows':
self.eot = threading.Event()
self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.pids))
self.watch_dog.daemon = True
self.watch_dog.start()
def _abort_watchdog(self):
if not self.eot.is_set():
self.eot.set()
def __del__(self):
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.close()
if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:
self._abort_watchdog()
class BatchInfo(cde.CBatchInfo):
"""
The information object associates with the current batch of tensors.
"""
def get_batch_num(self):
"""
Return the batch number of the current batch.
"""
return
def get_epoch_num(self):
"""
Return the epoch number of the current batch.
"""
return
class BlockReleasePair:
"""
The blocking condition class used by SyncWaitDataset.
Args:
init_release_rows (int): Number of lines to allow through the pipeline.
callback (function): The callback function that will be called when release is called (default=None).
"""
def __init__(self, init_release_rows, callback=None):
if isinstance(init_release_rows, int) and init_release_rows <= 0:
raise ValueError("release_rows need to be greater than 0.")
self.row_count = -init_release_rows
self.cv = threading.Condition()
self.callback = callback
self.default_rows = init_release_rows
self.disable = False
def __deepcopy__(self, memodict):
return self
def reset(self):
with self.cv:
self.row_count = -self.default_rows
self.cv.notify_all()
def update_batched_size(self, batch_size):
# sanity check
if isinstance(batch_size, int) and batch_size <= 0:
raise ValueError("batch_size need to be greater than 0.")
# should only use before the pipeline creates
self.row_count *= batch_size
self.default_rows *= batch_size
def block_func(self):
"""
Function for handing blocking condition.
Returns:
bool, True.
"""
with self.cv:
# if disable is true, the always evaluate to true
not_time_out = self.cv.wait_for(lambda: (self.row_count < 0 or self.disable),
timeout=get_callback_timeout())
# time_out will be False if time out occurs
if not not_time_out:
logger.warning("Timeout happened in sync_wait, maybe dataset.sync_update(condition=...) "
"is not added after dataset.create_dict_iterator(...), now disabling lock.")
self.disable = True
self.row_count += 1
return True
def release_func(self, pass_rows=None, data=None):
with self.cv:
if pass_rows is None:
pass_rows = self.default_rows
self.row_count -= pass_rows
if self.callback is not None:
self.callback(data)
self.cv.notify_all()
def disable_lock(self):
with self.cv:
self.disable = True
self.cv.notify_all()
class SyncWaitDataset(Dataset):
"""
The result of adding a blocking condition to the input Dataset.
Args:
input_dataset (Dataset): Input dataset to apply flow control.
num_batch (int): Number of batches without blocking at the start of each epoch.
condition_name (str): Condition name that is used to toggle sending next row.
callback (function): Callback function that will be invoked when sync_update is called (default=None).
Raises:
RuntimeError: If condition name already exists.
"""
def __init__(self, input_dataset, condition_name, num_batch, callback=None):
super().__init__(children=input_dataset)
# set to the default value, waiting for the batch to update it
self._condition_name = condition_name
if isinstance(num_batch, int) and num_batch <= 0:
raise ValueError("num_batch need to be greater than 0.")
self._pair = BlockReleasePair(num_batch, callback)
if self._condition_name in self.children[0].get_sync_notifiers():
raise RuntimeError("Condition name is already in use.")
logger.info("Please remember to add dataset.sync_update(condition=%s), otherwise hanging will result. "
"If dataset.sync_update(condition=%s) has already been added, you can ignore the info.",
condition_name, condition_name)
def parse(self, children=None):
return cde.SyncWaitNode(children[0], self._condition_name, self._pair.block_func)
def get_sync_notifiers(self):
return {**self.children[0].get_sync_notifiers(), **{self._condition_name: self._pair.release_func}}
def is_sync(self):
return True
def update_sync_batch_size(self, batch_size):
if isinstance(batch_size, int) and batch_size <= 0:
raise ValueError("num_batch need to be greater than 0.")
self._pair.update_batched_size(batch_size)
def disable_sync(self):
logger.info("Disabling Sync")
self._pair.disable_lock()
@staticmethod
def _is_ancestor_of_batch(dataset):
"""
Utility function to find the case where sync_wait is used before batch.
Args:
dataset (Dataset): Dataset to be checked.
Returns:
bool, whether sync_wait is used before batch.
"""
if isinstance(dataset, BatchDataset):
return True
flag = False
for input_dataset in dataset.children:
flag = flag | SyncWaitDataset._is_ancestor_of_batch(input_dataset)
return flag
def iterator_bootstrap(self):
self._pair.reset()
class ShuffleDataset(Dataset):
"""
The result of applying Shuffle operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be shuffled.
buffer_size (int): Size of the buffer.
Raises:
RuntimeError: If exist sync operators before shuffle.
"""
def __init__(self, input_dataset, buffer_size):
super().__init__(children=input_dataset)
self.buffer_size = buffer_size
self.reshuffle_each_epoch = True
if self.is_sync():
raise RuntimeError("No shuffle after sync operators.")
def parse(self, children=None):
return cde.ShuffleNode(children[0], self.buffer_size, self.reshuffle_each_epoch)
def is_shuffled(self):
return True
# This wait function is for cleaning zombie subprocesses
def wait_pid():
"""
This function is used by the main process to release subprocess resources.
"""
try:
while True:
child_pid, _ = os.waitpid(-1, os.WNOHANG)
if child_pid == 0:
break
except OSError:
# waitpid may be failed for some reasons so we ignore this error
pass
# Dataset need _watch_dog thread to monitoring fork multi-processing,
# and thread can't be a member function otherwise python won't collect and release resources.
def _watch_dog(eot, pids):
"""
This thread is for monitoring subprocesses forked by GeneratorDataset/map/batch
"""
while not eot.is_set():
subprocess_exit_num = 0
# Monitoring and count how many subprocesses already exit
for pid in pids:
try:
p = psutil.Process(pid)
if p.status() == psutil.STATUS_ZOMBIE:
subprocess_exit_num += 1
except psutil.NoSuchProcess:
subprocess_exit_num += 1
# If find subprocess exit, we will wait for 30s and do some waitpid operations
if subprocess_exit_num > 0:
start = time.time()
while time.time() - start < 30:
# We need to distinguishing get_dataset_size or train finished normally and hang scenario.
# If get_dataset_size or train finished normally, _stop_subprocess can be execute and
# self.need_abort can be set to True. If main process is hang in get(), self.need_abort
# will never set to True, then we wait for 30s and kill main process
if eot.is_set():
return
# Sometimes subprocess may be zombie, so in 30s we can wait and do some useful tasks(waitpid).
wait_pid()
# multiprocessing.queue may hang in .get() forever when put() process was killed.
# We have to exit main process otherwise main process will hang.
logger.critical("The subprocess of dataset may exit unexpected or be killed, "
"main process will exit.")
os.kill(os.getpid(), signal.SIGTERM)
# Pyfunc collection for multiprocess pyfunc
# This global variable will only be used within subprocesses
_GLOBAL_PYFUNC_LIST = []
_ARGS_QUEUE = []
_RET_QUEUE = []
_OP_NAME = dict()
_OP_PROCESS = dict()
_LOCK = threading.Lock()
# Pyfunc worker init function
# Python multiprocessing library forbid sending lambda function through pipe.
# This init function allow us to add all Python function to a global collection and then fork afterwards.
def _pyfunc_worker_init(pyfunc_list, args_queue, ret_queue):
global _GLOBAL_PYFUNC_LIST
global _ARGS_QUEUE
global _RET_QUEUE
_GLOBAL_PYFUNC_LIST = pyfunc_list
_ARGS_QUEUE = args_queue
_RET_QUEUE = ret_queue
# Pyfunc worker execution function
# All exceptions will be raised to main processes
def _pyfunc_worker_exec(index, qid, *args):
"""
Internal function for call certain pyfunc in Python process.
"""
# Some threads in multiprocess.pool can't process sigint signal,
# and will occur hang problem, so ctrl+c will pass to parent process.
signal.signal(signal.SIGINT, signal.SIG_IGN)
if qid != -1:
# Pass arguments through the Queue instead of directly to remote process
args = _ARGS_QUEUE[qid].get()
try:
r = _GLOBAL_PYFUNC_LIST[index](*args)
except Exception:
return ExceptionHandler(where="in map(or batch) worker and execute python function")
if isinstance(r, tuple):
_RET_QUEUE[qid].put(r)
else:
_RET_QUEUE[qid].put((r,))
return [qid]
# not using shared memory for passing arguments, call function directly
result = None
try:
result = _GLOBAL_PYFUNC_LIST[index](*args)
except Exception:
result = ExceptionHandler(where="in map(or batch) worker and execute python function")
return result
# PythonCallable wrapper for multiprocess pyfunc
class _PythonCallable:
"""
Internal Python function wrapper for multiprocessing pyfunc.
"""
def __init__(self, py_callable, idx, pool=None, arg_q=None, res_q=None):
# Original Python callable from user.
self.py_callable = py_callable
# Process pool created for current iterator.
self.pool = pool
# Python callable index for subprocess _GLOBAL_PYFUNC_LIST
self.idx = idx
if pool is not None:
self.queuemap = {}
self.arg_q = arg_q
self.res_q = res_q
self.next_queue = 0
def __call__(self, *args):
if self._pool_is_running() and check_iterator_cleanup() is False:
result, qid, ret = self._send(*args)
if ret:
return result
# todo this check might be wrong
while check_iterator_cleanup() is False:
try:
return self._receive(result, qid)
except multiprocessing.TimeoutError:
continue
except KeyboardInterrupt:
_set_iterator_cleanup()
self.pool.close()
self.pool.join()
raise Exception("Multiprocess MapOp worker receives KeyboardInterrupt.")
return (None,)
# Invoke original Python callable in master process in case the pool is gone.
return self.py_callable(*args)
def to_json(self):
return self.py_callable.to_json()
def _send(self, *args):
"""
The map/batch operator will use multiprocessing-pool apply_async interface to execute python function
in a sub process, apply_async will release GIL temporarily. For better performance, we use shared memory
feature and pass shared queue instead of multiprocess args.
"""
ret = False
qid = None
if self.arg_q != []:
tid = threading.get_ident()
# Need to register each thread to use a different queue to send data to pool
if not tid in self.queuemap:
qid = self.next_queue
self.next_queue = self.next_queue + 1
self.queuemap[tid] = qid
else:
qid = self.queuemap[tid]
self.arg_q[qid].put(args)
# This call will send the tensors along with Python callable index to the process pool.
# Block, yield GIL. Current thread will reacquire GIL once result is returned.
if self._pool_is_running() and check_iterator_cleanup() is False:
result = self.pool.apply_async(_pyfunc_worker_exec, [self.idx, qid, []])
else:
ret = True
result = self.py_callable(*args)
else:
result = self.pool.apply_async(_pyfunc_worker_exec, [self.idx, -1, *args])
return result, qid, ret
def _receive(self, result, qid):
"""
The map/batch operator will use multiprocessing-pool get interface to sync output data from a sub process,
get interface will reacquire GIL. For better performance, we use shared memory feature and get data from
shared queue directly.
"""
if self.arg_q != []:
r = result.get(30)
if isinstance(r, ExceptionHandler):
r.reraise()
if r[0] != qid:
raise Exception("In PyCallable, got results from wrong thread")
r = self.res_q[qid].get()
return r
r = result.get(30)
if isinstance(r, ExceptionHandler):
r.reraise()
return r
def _pool_is_running(self):
# note here: the RUN state of python3.7 and python3.8 is different:
# python3.7: RUN = 0
# python3.8: RUN = "RUN"
# so we use self.pool._state == RUN instead and we can't use _state == 0 any more.
if self.pool is not None and self.pool._state == RUN: # pylint: disable=W0212
return True
return False
def _mp_pool_exit_preprocess():
if check_iterator_cleanup() is False:
# Set the iterator_cleanup flag to True before exiting, and wait 3s for all apply_async
# applied to the multiprocessing task to prevent multiprocessing from hang when exiting
_set_iterator_cleanup()
time.sleep(3)
class _ExceptHookHandler:
def __init__(self):
sys.excepthook = self.__handler_exception
def __handler_exception(self, ex_type, value, tb):
logger.critical("Uncaught exception: ", exc_info=(ex_type, value, tb))
_mp_pool_exit_preprocess()
class MapDataset(Dataset):
"""
The result of applying the Map operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be mapped.
operations (TensorOp): A function mapping a nested structure of tensors
to another nested structure of tensor (default=None).
input_columns (Union[str, list[str]]): List of names of the input columns
(default=None, the operations will be applied on the first columns in the dataset).
The size of the list should match the number of inputs of the first operator.
output_columns (Union[str, list[str]], optional): List of names of the output columns.
The size of the list should match the number of outputs of the last operator
(default=None, output columns will be the input columns, i.e., the columns will
be replaced).
column_order (list[str], optional): Specifies the list of all the columns you need in the whole
dataset. The parameter is required when len(input_column) != len(output_column). Caution: the list here
is not just the columns specified in parameter input_columns and output_columns.
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker process. This
option could be beneficial if the Python operation is computational heavy (default=False).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None)
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default=16).
offload (bool, optional): Flag to indicate whether offload is used (Default=False).
Raises:
ValueError: If len(input_columns) != len(output_columns) and column_order is not specified.
"""
def __init__(self, input_dataset, operations=None, input_columns=None, output_columns=None, column_order=None,
num_parallel_workers=None, python_multiprocessing=False, cache=None, callbacks=None, max_rowsize=16,
offload=False):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers, cache=cache)
self.operations = to_list(operations)
self.operations = py_transforms.Compose.reduce(self.operations)
self.input_columns = to_list(input_columns)
self.output_columns = to_list(output_columns)
self.column_order = replace_none(column_order, [])
# If output_columns were not provided then use input_columns
self.output_columns = self.input_columns if not self.output_columns else self.output_columns
if self.input_columns and self.output_columns \
and len(self.input_columns) != len(self.output_columns) \
and not self.column_order:
raise ValueError("When length of input_columns and output_columns are not equal,"
" column_order must be specified.")
self.python_multiprocessing = python_multiprocessing
self.process_pool = None
self.hook = None
self.pids = []
self.eot = None
self.watch_dog = None
self.callbacks = to_list(callbacks)
self.max_rowsize = max_rowsize
self.offload = offload
def parse(self, children=None):
operations = []
for op in self.operations:
if op and getattr(op, 'parse', None):
operations.append(op.parse())
else:
operations.append(op)
callbacks = [cb.create_runtime_obj() for cb in self.callbacks]
return cde.MapNode(children[0], operations, self.input_columns, self.output_columns, self.column_order,
callbacks, self.max_rowsize, self.offload)
def __deepcopy__(self, memodict):
return self.__safe_deepcopy__(memodict, exclude=("operations", "callbacks", "__transfer_dataset__"))
# Iterator bootstrap will be called on iterator construction.
# A deep copy of Dataset object is created prior of iterator_bootstrap.
# This method will create per iterator process pool and bind pyfunc execution to the pool.
def iterator_bootstrap(self):
"""
Per iterator bootstrap callback.
"""
if self.python_multiprocessing:
iter_specific_operations = []
callable_list = []
arg_q_list = []
res_q_list = []
# If user didn't specify num_parallel_workers, set it to default
num_parallel = get_num_parallel_workers()
if self.num_parallel_workers is not None:
num_parallel = self.num_parallel_workers
if get_enable_shared_mem():
_check_shm_usage(num_parallel, 1, self.max_rowsize, 2)
for _ in range(num_parallel):
arg_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize))
res_q_list.append(_SharedQueue(1, max_rowsize=self.max_rowsize))
# Pass #1, look for Python callables and build list
for op in self.operations:
# our c transforms is now callable and should not be run in Python multithreading
if MapDataset.__operation_valid_for_multiprocessing(op):
callable_list.append(op)
if callable_list:
# Construct pool with the callable list
# The callable list and _pyfunc_worker_init are used to pass lambda function in to subprocesses
self.process_pool = multiprocessing.Pool(processes=num_parallel,
initializer=_pyfunc_worker_init,
initargs=(callable_list, arg_q_list, res_q_list))
# Pass #2
idx = 0
global _OP_NAME, _OP_PROCESS, _LOCK
op_id = _OP_NAME[str(self)]
# obtain process id from multiprocessing.pool
process_id = {op_id: [self.num_parallel_workers, set()]}
for pool in self.process_pool._pool: # pylint: disable=W0212
process_id[op_id][1].add(pool.pid)
self.pids.append(pool.pid)
with _LOCK:
_OP_PROCESS.update(process_id)
for op in self.operations:
# our c transforms is now callable and should not be run in Python multithreading
if MapDataset.__operation_valid_for_multiprocessing(op):
# Wrap Python callable into _PythonCallable
iter_specific_operations.append(_PythonCallable(op, idx, self.process_pool,
arg_q_list, res_q_list))
idx += 1
else:
# CPP ops remain the same
iter_specific_operations.append(op)
self.operations = iter_specific_operations
self.hook = _ExceptHookHandler()
# Map multiprocessing will launch a watch dog thread for monitoring sub processes
self._launch_watch_dog()
atexit.register(_mp_pool_exit_preprocess)
# If Python version greater than 3.8, we need to close ThreadPool in atexit for unclean pool teardown.
if sys.version_info >= (3, 8):
atexit.register(self.process_pool.close)
@staticmethod
def __operation_valid_for_multiprocessing(op):
if callable(op) and str(op).find("c_transform") < 0:
return True
return False
def _launch_watch_dog(self):
if platform.system().lower() != 'windows':
self.eot = threading.Event()
self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.pids))
self.watch_dog.daemon = True
self.watch_dog.start()
def _abort_watchdog(self):
if not self.eot.is_set():
self.eot.set()
def __del__(self):
if hasattr(self, 'process_pool') and self.process_pool is not None:
self.process_pool.close()
self.process_pool.join()
if hasattr(self, 'watch_dog') and self.watch_dog is not None and hasattr(self, 'eot') and self.eot is not None:
self._abort_watchdog()
class FilterDataset(Dataset):
"""
The result of applying filter predicate to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be mapped.
predicate (callable): Python callable which returns a boolean value. If False then filter the element.
input_columns (Union[str, list[str]], optional): List of names of the input columns
(default=None, the predicate will be applied to all columns in the dataset).
num_parallel_workers (int, optional): Number of workers to process the dataset
in parallel (default=None).
"""
def __init__(self, input_dataset, predicate, input_columns=None, num_parallel_workers=None):
super().__init__(children=input_dataset, num_parallel_workers=num_parallel_workers)
self.predicate = lambda *args: bool(predicate(*args))
self.input_columns = to_list(input_columns)
def parse(self, children=None):
return cde.FilterNode(children[0], self.predicate, self.input_columns)
class RepeatDataset(Dataset):
"""
The result of applying Repeat operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be repeated.
count (int): Number of times the dataset will be repeated (default=-1, repeat indefinitely).
"""
def __init__(self, input_dataset, count):
super().__init__(children=input_dataset)
self.count = replace_none(count, -1)
def parse(self, children=None):
return cde.RepeatNode(children[0], self.count)
class SkipDataset(Dataset):
"""
The result of applying Skip operator to the input Dataset.
Args:
input_dataset (Dataset): Input dataset to have elements skipped.
count (int): Number of elements to be skipped in the dataset.
"""
def __init__(self, input_dataset, count):
super().__init__(input_dataset)
self.count = count
def parse(self, children=None):
return cde.SkipNode(children[0], self.count)
class TakeDataset(Dataset):
"""
The result of applying Take operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to have elements taken from.
count (int): Number of elements to be taken from the dataset.
"""
def __init__(self, input_dataset, count):
super().__init__(children=input_dataset)
self.count = count
def parse(self, children=None):
return cde.TakeNode(children[0], self.count)
class ZipDataset(Dataset):
"""
The result of applying Zip operator to the input Dataset.
Args:
datasets (tuple): A tuple of datasets to be zipped together.
Raises:
TypeError: If dataset is not an instance of Dataset.
"""
def __init__(self, datasets):
super().__init__(children=datasets)
def parse(self, children=None):
return cde.ZipNode(children)
def is_sync(self):
return any([c.is_sync() for c in self.children])
class ConcatDataset(Dataset):
"""
The result of applying concat dataset operator to the input Dataset.
Args:
datasets (list): A list of datasets to be concatenated together.
Raises:
TypeError: If dataset is not an instance of Dataset.
ValueError: If there is no samples in the one of the datasets.
"""
def __init__(self, datasets):
super().__init__(children=datasets)
for dataset in datasets:
if not isinstance(dataset, Dataset):
raise TypeError("Invalid dataset, expected Dataset object, but got %s!" % type(dataset))
self.datasets = datasets
self._sampler = samplers.SequentialSampler(num_samples=None)
self.children_sizes_ = [c.get_dataset_size() for c in self.children]
child_index = 0
for item in self.children_sizes_:
if item == 0:
raise ValueError("There are no samples in the dataset number %d. Please make sure there are "
"valid samples in the dataset." % child_index)
child_index += 1
# _children_flag_and_nums: A list of pair<int ,int>.The first element of pair is flag that characterizes
# whether the data set is mappable. The second element of pair is length of the dataset
self._children_flag_and_nums = []
# _children_start_end_index_: A list of pair<int ,int>.The elements of pair are used to characterize
# the valid position of the dataset corresponding to the subscript when sampling
self._children_start_end_index_ = []
for index, child in enumerate(self.children):
tem_list = [-1, -1]
self._children_start_end_index_.append(tem_list)
dataset_len = self.children_sizes_[index]
if isinstance(child, GeneratorDataset) and not hasattr(child.source, "__getitem__"):
dataset_len = 0
self.children_sizes_[index] = 0
if isinstance(child, MappableDataset):
self._children_flag_and_nums.append((0, dataset_len))
else:
self._children_flag_and_nums.append((1, dataset_len))
def parse(self, children=None):
return cde.ConcatNode(children, self._sampler, self._children_flag_and_nums, self._children_start_end_index_)
def use_sampler(self, sampler):
"""
Set the distributedSampler to concat dataset
Args:
sampler (Sampler): The sampler to use for the current dataset.
Currently supported: DistributedSampler.
Raises:
TypeError: If the sampler is not an instance of DistributedSampler
ValueError: If the parameter shuffle of sampler is True
ValueError: If the parameter NumSamples of sampler is not None.
ValueError: If num_shards <=0.
"""
if not isinstance(sampler, samplers.DistributedSampler):
raise TypeError("The parameter %s of concat must be DistributedSampler!" % sampler)
if sampler.is_shuffled():
raise ValueError("The parameter shuffle of DistributedSampler must be False!")
if sampler.num_shards <= 0:
raise ValueError("The parameter num_shards of DistributedSampler must be positive int!")
if sampler.get_num_samples() is not None:
raise ValueError("The parameter num_samples of DistributedSampler is not support to be set!")
self.dataset_size = None
self._sampler = sampler
cumulative_samples_nums = 0
for index, child in enumerate(self.children):
if hasattr(child, 'sampler') and child.sampler.get_num_samples() is not None:
raise ValueError("The parameter NumSamples of %s is not support to be set!" % child)
if isinstance(child, BatchDataset):
raise TypeError("The parameter %s of concat must not be BatchDataset!" % child)
# if child is mappable and the length is greater than 0
if not self._children_flag_and_nums[index][0] and self._children_flag_and_nums[index][1]:
tem_value = cumulative_samples_nums + self._children_flag_and_nums[index][1]
if not self._children_flag_and_nums[index][1] >= sampler.num_shards:
if tem_value < sampler.num_shards:
self._children_start_end_index_[index][0] = cumulative_samples_nums
self._children_start_end_index_[index][1] = tem_value
else:
self._children_start_end_index_[index][0] = cumulative_samples_nums
self._children_start_end_index_[index][1] = tem_value % sampler.num_shards
tem_sampler = copy.deepcopy(sampler)
tem_sampler.set_offset(cumulative_samples_nums)
child.use_sampler(tem_sampler)
cumulative_samples_nums += self.children_sizes_[index]
cumulative_samples_nums %= sampler.num_shards
class RenameDataset(Dataset):
"""
The result of applying Rename operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be Renamed.
input_columns (Union[str, list[str]]): List of names of the input columns.
output_columns (Union[str, list[str]]): List of names of the output columns.
"""
def __init__(self, input_dataset, input_columns, output_columns):
super().__init__(children=input_dataset)
self.input_column_names = to_list(input_columns)
self.output_column_names = to_list(output_columns)
def parse(self, children=None):
return cde.RenameNode(children[0], self.input_column_names, self.output_column_names)
def to_list(items):
if items is None:
return []
if isinstance(items, tuple):
return list(items)
if not isinstance(items, list):
return [items]
return items
class ProjectDataset(Dataset):
"""
The result of applying Project operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be Projected.
columns (Union[str, list[str]]): List of names of the columns to project.
"""
def __init__(self, input_dataset, columns):
super().__init__(children=input_dataset)
self.columns = to_list(columns)
def parse(self, children=None):
return cde.ProjectNode(children[0], self.columns)
class _ToDevice:
"""
Internal class to handle sending data to device.
"""
def __init__(self, dataset, num_epochs):
ir_tree, self.api_tree = dataset.create_ir_tree()
self._runtime_context = cde.PythonRuntimeContext()
self._runtime_context.Init()
self._to_device = cde.ToDevice(num_epochs)
self._to_device.Init(ir_tree)
self._runtime_context.AssignConsumer(self._to_device)
ITERATORS_LIST.append(weakref.ref(self))
_unset_iterator_cleanup()
def send(self):
self._to_device.Send()
def stop_send(self):
"""
send stop send signal to pipeline, it is used when end of sequence is sent at the epoch end.
"""
self._to_device.StopSend()
def continue_send(self):
"""
send continue send signal to pipeline, it is used when end of sequence is sent at the epoch end.
"""
self._to_device.ContinueSend()
def get_data_info(self):
"""
Get type and shape of current batch.
"""
return self._to_device.GetDataInfo()
def release(self):
"""
Manually terminate Device Queue instead of relying on out of scope destruction.
"""
if hasattr(self, '_runtime_context') and self._runtime_context:
if hasattr(self, '_to_device') and self._to_device:
self._runtime_context.Terminate()
del self._to_device
del self._runtime_context
def __deepcopy__(self, memodict):
return self
def get_offload_model(self):
"""
Get offload model containing removed offload ops from pipeline.
"""
offload_model = GetOffloadModel(self._to_device)
return offload_model
class TransferDataset(Dataset):
"""
The result of applying TDT operator to the input Dataset.
Args:
input_dataset (Dataset): Input Dataset to be transferred.
send_epoch_end (bool, optional): Whether to send end of sequence to device or not (default=True).
create_data_info_queue (bool, optional): Whether to create queue which stores
types and shapes of data or not (default=False).
Raises:
TypeError: If device_type is empty.
ValueError: If device_type is not 'Ascend', 'GPU' or 'CPU'.
RuntimeError: If dataset is unknown.
"""
def __init__(self, input_dataset, send_epoch_end=True, create_data_info_queue=False):
super().__init__(children=input_dataset)
self.queue_name = str(uuid.uuid1())
self.device_type = context.get_context("device_target") if context else "CPU"
self.device_id = context.get_context("device_id") if context else 0
self._send_epoch_end = replace_none(send_epoch_end, True)
self._create_data_info_queue = create_data_info_queue
self._to_device = None
def parse(self, children=None):
total_batch = 0
if hasattr(self.children[0], "__total_batch__"):
total_batch = self.children[0].__total_batch__
return cde.TransferNode(children[0], self.queue_name, self.device_type, self.device_id, self._send_epoch_end,
total_batch, self._create_data_info_queue)
def create_dict_iterator(self, num_epochs=-1, output_numpy=False):
raise RuntimeError("TransferDataset is not iterable.")
def create_tuple_iterator(self, columns=None, num_epochs=-1, output_numpy=False, do_copy=True):
raise RuntimeError("TransferDataset is not iterable.")
def __iter__(self):
raise RuntimeError("TransferDataset is not iterable.")
def output_shapes(self):
raise RuntimeError("TransferDataset does not support obtaining output_shapes.")
def output_types(self):
raise RuntimeError("TransferDataset does not support obtaining output_types.")
@check_to_device_send
def send(self, num_epochs=-1):
"""
Send to device
"""
if Dataset._noop_mode():
return
if self._to_device is not None:
del self._to_device
self._to_device = _ToDevice(self, num_epochs)
self._to_device.send()
def stop_send(self):
if self._to_device is not None:
self._to_device.stop_send()
def continue_send(self):
if self._to_device is not None:
self._to_device.continue_send()
def get_data_info(self):
"""
Get type and shape of current batch
"""
if self._to_device is not None:
return self._to_device.get_data_info()
raise RuntimeError("Calling get_data_info with bad state.")
def get_offload_model(self):
if self._to_device is not None:
return self._to_device.get_offload_model()
raise RuntimeError("get_offload_model, _to_device is None")
def release(self):
"""
Manually terminate Device Queue instead of relying on out of scope destruction.
"""
if self._to_device is not None:
self._to_device.release()
class RangeDataset(MappableDataset):
"""
A source dataset that reads and parses datasets stored on disk in a range.
Args:
start (int): Starting index.
stop (int): Ending index.
step (int): Step size in the range specified by start and stop.
"""
def __init__(self, start, stop, step):
super().__init__()
self.start = start
self.stop = stop
self.step = step
def parse(self, children=None):
raise NotImplementedError("Dataset has to implement parse method.")
def is_shuffled(self):
return False
def is_sharded(self):
return False
def get_dataset_size(self):
if self.dataset_size is None:
self.dataset_size = math.ceil((self.stop - self.start) / self.step)
return self.dataset_size
class FashionMnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the FASHION-MNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`. `train` will read from 60,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 70,000 samples.
(default=None, will read all samples)
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> fashion_mnist_dataset_dir = "/path/to/fashion_mnist_dataset_directory"
>>>
>>> # Read 3 samples from FASHIONMNIST dataset
>>> dataset = ds.FashionMnistDataset(dataset_dir=fashion_mnist_dataset_dir, num_samples=3)
>>>
>>> # Note: In FASHIONMNIST dataset, each dictionary has keys "image" and "label"
About Fashion-MNIST dataset:
Fashion-MNIST is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and
a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes.
We intend Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking
machine learning algorithms. It shares the same image size and structure of training and testing splits.
Here is the original Fashion-MNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── fashionmnist_dataset_dir
├── t10k-images-idx3-ubyte
├── t10k-labels-idx1-ubyte
├── train-images-idx3-ubyte
└── train-labels-idx1-ubyte
Citation:
.. code-block::
@online{xiao2017/online,
author = {Han Xiao and Kashif Rasul and Roland Vollgraf},
title = {Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms},
date = {2017-08-28},
year = {2017},
eprintclass = {cs.LG},
eprinttype = {arXiv},
eprint = {cs.LG/1708.07747},
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.FashionMnistNode(self.dataset_dir, self.usage, self.sampler)
class ImageFolderDataset(MappableDataset):
"""
A source dataset that reads images from a tree of directories.
All images within one folder have the same label.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
extensions (list[str], optional): List of file extensions to be
included in the dataset (default=None).
class_indexing (dict, optional): A str-to-int mapping from folder name to index
(default=None, the folder names will be sorted
alphabetically and each class will be given a
unique index starting from 0).
decode (bool, optional): Decode the images after reading (default=False).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If class_indexing is not a dictionary.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- The shape of the image column is [image_size] if decode flag is False, or [H,W,C] otherwise.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> image_folder_dataset_dir = "/path/to/image_folder_dataset_directory"
>>>
>>> # 1) Read all samples (image files) in image_folder_dataset_dir with 8 threads
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
... num_parallel_workers=8)
>>>
>>> # 2) Read all samples (image files) from folder cat and folder dog with label 0 and 1
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
... class_indexing={"cat":0, "dog":1})
>>>
>>> # 3) Read all samples (image files) in image_folder_dataset_dir with extensions .JPEG and .png (case sensitive)
>>> dataset = ds.ImageFolderDataset(dataset_dir=image_folder_dataset_dir,
... extensions=[".JPEG", ".png"])
About ImageFolderDataset:
You can construct the following directory structure from your dataset files and read by MindSpore's API.
.. code-block::
.
└── image_folder_dataset_directory
├── class1
│ ├── 000000000001.jpg
│ ├── 000000000002.jpg
│ ├── ...
├── class2
│ ├── 000000000001.jpg
│ ├── 000000000002.jpg
│ ├── ...
├── class3
│ ├── 000000000001.jpg
│ ├── 000000000002.jpg
│ ├── ...
├── classN
├── ...
"""
@check_imagefolderdataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None, sampler=None,
extensions=None, class_indexing=None, decode=False, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.extensions = replace_none(extensions, [])
self.class_indexing = replace_none(class_indexing, {})
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.ImageFolderNode(self.dataset_dir, self.decode, self.sampler, self.extensions, self.class_indexing)
class MnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the MNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 60,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 70,000 samples.
(default=None, will read all samples)
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> mnist_dataset_dir = "/path/to/mnist_dataset_directory"
>>>
>>> # Read 3 samples from MNIST dataset
>>> dataset = ds.MnistDataset(dataset_dir=mnist_dataset_dir, num_samples=3)
>>>
>>> # Note: In mnist_dataset dataset, each dictionary has keys "image" and "label"
About MNIST dataset:
The MNIST database of handwritten digits has a training set of 60,000 examples,
and a test set of 10,000 examples. It is a subset of a larger set available from
NIST. The digits have been size-normalized and centered in a fixed-size image.
Here is the original MNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── mnist_dataset_dir
├── t10k-images-idx3-ubyte
├── t10k-labels-idx1-ubyte
├── train-images-idx3-ubyte
└── train-labels-idx1-ubyte
Citation:
.. code-block::
@article{lecun2010mnist,
title = {MNIST handwritten digit database},
author = {LeCun, Yann and Cortes, Corinna and Burges, CJ},
journal = {ATT Labs [Online]},
volume = {2},
year = {2010},
howpublished = {http://yann.lecun.com/exdb/mnist}
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.MnistNode(self.dataset_dir, self.usage, self.sampler)
class PhotoTourDataset(MappableDataset):
"""
A source dataset for reading and parsing the PhotoTour dataset.
The generated dataset with different usage has different output columns.
If train, the generated dataset has one column :py:obj:`[image]`,
else three columns :py:obj:`[image1, image2, matches]`.
The tensor of column :py:obj:`image`, :py:obj:`image1` and :py:obj:`image2` is of the uint8 type.
The tensor of column :py:obj:`matches` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
name (str): Name of the dataset to load,
should be one of 'notredame', 'yosemite', 'liberty', 'notredame_harris',
'yosemite_harris' or 'liberty_harris'.
usage (str, optional): Usage of the dataset, can be `train` or `test` (Default=None, will be set to 'train').
When usage is `train`, number of samples for each `name` is
{'notredame': 468159, 'yosemite': 633587, 'liberty': 450092, 'liberty_harris': 379587,
'yosemite_harris': 450912, 'notredame_harris': 325295}.
When usage is `test`, will read 100,000 samples for testing.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If usage is not in ["train", "test"].
ValueError: If name is not in ["notredame", "yosemite", "liberty",
"notredame_harris", "yosemite_harris", "liberty_harris"].
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. `sampler` and `shuffle` are mutually exclusive. The table
below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 64 64 1
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> # Read 3 samples from PhotoTour dataset.
>>> dataset = ds.PhotoTourDataset(dataset_dir="/path/to/photo_tour_dataset_directory",
... name='liberty', usage='train', num_samples=3)
>>>
>>> # In PhotoTourDataset dataset, if usage is 'train', each dictionary has key "image",
>>> # else has keys "image1" "image2" and "matches".
About PhotoTour dataset:
The data is taken from Photo Tourism reconstructions from Trevi Fountain (Rome), Notre Dame (Paris) and Half
Dome (Yosemite). Each dataset consists of a series of corresponding patches, which are obtained by projecting
3D points from Photo Tourism reconstructions back into the original images.
The dataset consists of 1024 x 1024 bitmap (.bmp) images, each containing a 16 x 16 array of image patches.
Each patch is sampled as 64 x 64 grayscale, with a canonical scale and orientation. For details of how the scale
and orientation is established, please see the paper. An associated metadata file info.txt contains the match
information. Each row of info.txt corresponds to a separate patch, with the patches ordered from left to right and
top to bottom in each bitmap image. The first number on each row of info.txt is the 3D point ID from which that
patch was sampled -- patches with the same 3D point ID are projected from the same 3D point (into different images).
The second number in info.txt corresponds to the image from which the patch was sampled, and is not used at present.
You can unzip the original PhotoTour dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── photo_tour_dataset_directory
├── liberty/
│ ├── info.txt // two columns: 3D_point_ID, unused
│ ├── m50_100000_100000_0.txt // seven columns: patch_ID1, 3D_point_ID1, unused1,
│ │ // patch_ID2, 3D_point_ID2, unused2, unused3
│ ├── patches0000.bmp // 1024*1024 pixels, with 16 * 16 patches.
│ ├── patches0001.bmp
│ ├── ...
├── yosemite/
│ ├── ...
├── notredame/
│ ├── ...
├── liberty_harris/
│ ├── ...
├── yosemite_harris/
│ ├── ...
├── notredame_harris/
│ ├── ...
Citation:
.. code-block::
@INPROCEEDINGS{4269996,
author={Winder, Simon A. J. and Brown, Matthew},
booktitle={2007 IEEE Conference on Computer Vision and Pattern Recognition},
title={Learning Local Image Descriptors},
year={2007},
volume={},
number={},
pages={1-8},
doi={10.1109/CVPR.2007.382971}
}
"""
@check_photo_tour_dataset
def __init__(self, dataset_dir, name, usage=None, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.name = name
self.usage = replace_none(usage, "train")
def parse(self, children=None):
return cde.PhotoTourNode(self.dataset_dir, self.name, self.usage, self.sampler)
class Places365Dataset(MappableDataset):
"""
A source dataset for reading and parsing the Places365 dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train-standard`, `train-challenge` or `val`
(default=None, will be set to 'train-standard').
small (bool, optional): Use 256 * 256 images (True) or high resolution images (False) (default=False).
decode (bool, optional): Decode the images after reading (default=True).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
ValueError: If usage is not in ["train-standard", "train-challenge", "val"].
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> place365_dataset_dir = "/path/to/place365_dataset_directory"
>>>
>>> # Read 3 samples from Places365 dataset
>>> dataset = ds.Places365Dataset(dataset_dir=place365_dataset_dir, usage='train-standard',
... small=True, decode=True, num_samples=3)
>>>
>>> # In places365 dataset, each dictionary has keys "image" and "label".
About Places365 dataset:
Convolutional neural networks (CNNs) trained on the Places2 Database can be used for scene recognition as well as
generic deep scene features for visual recognition.
The author releases the data of Places365-Standard and the data of Places365-Challenge to the public.
Places365-Standard is the core set of Places2 Database, which has been used to train the Places365-CNNs. The author
will add other kinds of annotation on the Places365-Standard in the future. Places365-Challenge is the competition
set of Places2 Database, which has 6.2 million extra images compared to the Places365-Standard.
The Places365-Challenge will be used for the Places Challenge 2016.
You can unzip the original Places365 dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└─├── categories_places365.txt
├── places365_train-standard.txt
├── places365_train-challenge.txt
├── val_large/
│ ├── Places365_val_00000001.jpg
│ ├── Places365_val_00000002.jpg
│ ├── Places365_val_00000003.jpg
│ ├── ...
├── val_256/
│ ├── ...
├── data_large_standard/
│ ├── ...
├── data_256_standard/
│ ├── ...
├── data_large_challenge/
│ ├── ...
├── data_256_challenge /
│ ├── ...
Citation:
.. code-block::
article{zhou2017places,
title={Places: A 10 million Image Database for Scene Recognition},
author={Zhou, Bolei and Lapedriza, Agata and Khosla, Aditya and Oliva, Aude and Torralba, Antonio},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
year={2017},
publisher={IEEE}
}
"""
@check_places365_dataset
def __init__(self, dataset_dir, usage=None, small=True, decode=False, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = os.path.abspath(dataset_dir)
self.usage = replace_none(usage, "train-standard")
self.small = small
self.decode = decode
def parse(self, children=None):
return cde.Places365Node(self.dataset_dir, self.usage, self.small, self.decode, self.sampler)
class QMnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the QMNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar when `compat` is True else a tensor both of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test`, `test10k`, `test50k`, `nist`
or `all` (default=None, will read all samples).
compat (bool, optional): Whether the label for each example is class number (compat=True) or the full QMNIST
information (compat=False) (default=True).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> qmnist_dataset_dir = "/path/to/qmnist_dataset_directory"
>>>
>>> # Read 3 samples from QMNIST train dataset
>>> dataset = ds.QMnistDataset(dataset_dir=qmnist_dataset_dir, num_samples=3)
>>>
>>> # Note: In QMNIST dataset, each dictionary has keys "image" and "label"
About QMNIST dataset:
The QMNIST dataset was generated from the original data found in the NIST Special Database 19 with the goal to
match the MNIST preprocessing as closely as possible.
Through an iterative process, researchers tried to generate an additional 50k images of MNIST-like data.
They started with a reconstruction process given in the paper and used the Hungarian algorithm to find the best
matches between the original MNIST samples and their reconstructed samples.
Here is the original QMNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── qmnist_dataset_dir
├── qmnist-train-images-idx3-ubyte
├── qmnist-train-labels-idx2-int
├── qmnist-test-images-idx3-ubyte
├── qmnist-test-labels-idx2-int
├── xnist-images-idx3-ubyte
└── xnist-labels-idx2-int
Citation:
.. code-block::
@incollection{qmnist-2019,
title = "Cold Case: The Lost MNIST Digits",
author = "Chhavi Yadav and L\'{e}on Bottou",\
booktitle = {Advances in Neural Information Processing Systems 32},
year = {2019},
publisher = {Curran Associates, Inc.},
}
"""
@check_qmnist_dataset
def __init__(self, dataset_dir, usage=None, compat=True, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
self.compat = compat
def parse(self, children=None):
return cde.QMnistNode(self.dataset_dir, self.usage, self.compat, self.sampler)
class MindDataset(MappableDataset):
"""
A source dataset for reading and parsing MindRecord dataset.
The columns of generated dataset depend on the source MindRecord files.
Args:
dataset_file (Union[str, list[str]]): If dataset_file is a str, it represents for
a file name of one component of a mindrecord source, other files with identical source
in the same path will be found and loaded automatically. If dataset_file is a list,
it represents for a list of dataset files to be read directly.
columns_list (list[str], optional): List of columns to be read (default=None).
num_parallel_workers (int, optional): The number of readers (default=None).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=None, performs global shuffle).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are three levels of shuffling:
- Shuffle.GLOBAL: Global shuffle of all rows of data in dataset.
- Shuffle.FILES: Shuffle the file sequence but keep the order of data within each file.
- Shuffle.INFILE: Keep the file sequence the same but shuffle the data within each file.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, 'num_samples' reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, sampler is exclusive
with shuffle and block_reader). Support list: SubsetRandomSampler,
PkSampler, RandomSampler, SequentialSampler, DistributedSampler.
padded_sample (dict, optional): Samples will be appended to dataset, where
keys are the same as column_list.
num_padded (int, optional): Number of padding samples. Dataset size
plus num_padded should be divisible by num_shards.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, all samples).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> mind_dataset_dir = ["/path/to/mind_dataset_file"] # contains 1 or multiple MindRecord files
>>> dataset = ds.MindDataset(dataset_file=mind_dataset_dir)
"""
def parse(self, children=None):
return cde.MindDataNode(self.dataset_file, self.columns_list, self.sampler, self.new_padded_sample,
self.num_padded, shuffle_to_shuffle_mode(self.shuffle_option))
@check_minddataset
def __init__(self, dataset_file, columns_list=None, num_parallel_workers=None, shuffle=None, num_shards=None,
shard_id=None, sampler=None, padded_sample=None, num_padded=None, num_samples=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle_to_bool(shuffle), num_shards=num_shards, shard_id=shard_id, cache=cache)
if shuffle is not None and not isinstance(shuffle, (bool, Shuffle)):
raise TypeError("shuffle must be of boolean or enum of 'Shuffle' values like 'Shuffle.GLOBAL' or "
"'Shuffle.FILES' or 'Shuffle.INFILE'.")
if num_samples and shuffle in (Shuffle.FILES, Shuffle.INFILE):
raise ValueError("'Shuffle.FILES' or 'Shuffle.INFILE' and 'num_samples' "
"cannot be specified at the same time.")
self.shuffle_option = shuffle
if isinstance(dataset_file, list):
self.load_dataset = False
else:
self.load_dataset = True
self.dataset_file = dataset_file
self.columns_list = replace_none(columns_list, [])
if shuffle is False:
logger.warning("WARN: global shuffle is not used.")
if sampler is not None:
if isinstance(sampler, (
samplers.SubsetRandomSampler, samplers.SubsetSampler, samplers.PKSampler,
samplers.DistributedSampler,
samplers.RandomSampler, samplers.SequentialSampler)) is False:
raise ValueError("The sampler is not supported yet.")
self.padded_sample = padded_sample
self.num_padded = replace_none(num_padded, 0)
self.new_padded_sample = {}
if padded_sample:
for k, v in padded_sample.items():
if isinstance(v, np.ndarray):
self.new_padded_sample[k] = v.tobytes()
else:
self.new_padded_sample[k] = v
def _iter_fn(dataset, num_samples):
"""
Generator function wrapper for iterable dataset.
"""
if num_samples is not None and num_samples != 0:
ds_iter = iter(dataset)
for _ in range(num_samples):
try:
val = next(ds_iter)
except StopIteration:
return
# convert output tensors to ndarrays
yield _convert_row(val)
else:
for val in dataset:
# convert output tensors to ndarrays
yield _convert_row(val)
def _generator_fn(generator, num_samples):
"""
Generator function wrapper for generator function dataset.
"""
if num_samples is not None and num_samples != 0:
gen_iter = generator()
for _ in range(num_samples):
try:
val = next(gen_iter)
except StopIteration:
return
yield val
else:
gen_iter = generator()
for val in gen_iter:
yield val
def _cpp_sampler_fn(sample_ids, dataset):
"""
Generator function wrapper for mappable dataset with cpp sampler.
"""
if not isinstance(sample_ids, np.ndarray):
raise RuntimeError("Sample IDs are not in a numpy array.")
if sample_ids.size == 0:
raise RuntimeError("Sampler passed an empty sample IDs list.")
for i in sample_ids:
val = dataset[i]
# convert output tensors to ndarrays
yield _convert_row(val)
def _cpp_sampler_fn_mp(sample_ids, sample_fn):
"""
Multiprocessing generator function wrapper for mappable dataset with cpp sampler.
"""
if not isinstance(sample_ids, np.ndarray):
raise RuntimeError("Sample IDs are not in a numpy array.")
if sample_ids.size == 0:
raise RuntimeError("Sampler passed an empty sample IDs list.")
return sample_fn.process(sample_ids)
def _fill_worker_indices(workers, indices, idx):
"""
Worker index queue filler, fill worker index queue in round robin order.
"""
num_worker = len(workers)
while idx < len(indices):
try:
workers[idx % num_worker].put(indices[idx])
idx += 1
except queue.Full:
break
return idx
def _check_shm_usage(num_worker, queue_size, max_rowsize, num_queues=1):
"""
Check sufficient shared memory is available for shared memory queues
when training in parallel mode.
"""
threshold_ratio = 0.8
if platform.system().lower() not in {"windows", "darwin"}:
shm_estimate_usage = _get_device_num() * num_worker * num_queues * \
(queue_size + 2) * max_rowsize * 1024 * 1024
try:
shm_available = psutil.disk_usage('/dev/shm').free
if shm_estimate_usage >= threshold_ratio * shm_available:
raise RuntimeError(
"Insufficient shared memory available. Required: {}, Available: {}. "
"The required memory can't exceed 80% of the available shared memory. "
"Recommend to set_enable_shared_mem to False, reduce max_rowsize or reduce num_parallel_workers."
.format(shm_estimate_usage, shm_available))
except FileNotFoundError:
raise RuntimeError("Expected /dev/shm to exist.")
def _convert_row(row):
"""
Convert Op return value to numpy
"""
value = []
if isinstance(row, dict):
raise ValueError("Return value in user defined python function should be numpy array, but got dict.")
# convert each column in row into numpy array
for x in row:
if isinstance(x, bytes): # got image bytes from a file
value.append(np.frombuffer(x, np.uint8))
elif isinstance(x, Tensor): # got mindspore.Tensor
value.append(x.asnumpy())
elif isinstance(x, dict):
raise ValueError("Return value in user defined python function should be numpy array, but got dict.")
else:
value.append(np.array(x, copy=False))
return tuple(value)
class SamplerFn:
"""
Multiprocessing or multithread generator function wrapper master process.
"""
def __init__(self, dataset, num_worker, multi_process, max_rowsize):
self.workers = []
self.num_worker = num_worker
self.multi_process = multi_process
self.need_join = False
self.ppid = os.getpid()
self.pids = []
self.check_interval = 300 # the interval of check queue's size
# Event for end of epoch
if multi_process is True:
try:
self.eof = multiprocessing.Event()
except Exception:
raise RuntimeError("Init multiprocessing.Event() failed, This might be caused by insufficient shm,"
+ " and the recommended shm size is at least 5 GB.")
else:
self.eof = threading.Event()
# Create workers
# get default queue size and adjust queuesize per worker if there are large # workers
queue_size = get_prefetch_size()
queue_size = min(queue_size, queue_size * 4 // num_worker)
queue_size = max(2, queue_size)
if multi_process and get_enable_shared_mem():
_check_shm_usage(num_worker, queue_size, max_rowsize)
for _ in range(num_worker):
if multi_process is True:
try:
worker = _GeneratorWorkerMp(dataset, self.eof, max_rowsize, queue_size)
except Exception:
raise RuntimeError("Init multiprocessing.Queue() failed, This might be caused by insufficient shm,"
+ " and the recommended shm size is at least 5 GB.")
worker.daemon = True
# When multi processes fork a subprocess, the lock of the main process is copied to the subprocess,
# which may cause deadlock. Therefore, the subprocess startup is performed in che initialization phase.
# In this phase, the main process is not locked.
worker.start()
self.pids.append(worker.pid)
self.need_join = True
else:
worker = _GeneratorWorkerMt(dataset, self.eof)
worker.daemon = True
self.workers.append(worker)
if multi_process is True and platform.system().lower() != 'windows':
self.eot = threading.Event()
self.watch_dog = threading.Thread(target=_watch_dog, args=(self.eot, self.pids))
self.watch_dog.daemon = True
self.watch_dog.start()
def process(self, indices):
"""
The main process, start the child process or child thread, and fill the index queue.
Get the result and return.
"""
for w in self.workers:
# Check whether the queue of the subprocess is empty.
if not w.queue_empty():
raise Exception("The queue of the subprocess is not empty.")
# Start all workers
if not w.is_alive():
w.start()
# Fill initial index queues
idx_cursor = 0
idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)
# Fetch results
for i in range(len(indices)):
if self.eof.is_set():
self._stop_subprocess()
return
if self.multi_process is True and not psutil.pid_exists(self.workers[i % self.num_worker].pid):
self._stop_subprocess()
return
# Fetch result and put index
try:
# To avoid get timeout from queue, check the res_queue size.
start_time = int(time.time())
wait_count = 1
while self.workers[i % self.num_worker].res_queue.empty():
time.sleep(0.1)
cost_time = int(time.time()) - start_time
if cost_time / self.check_interval >= wait_count:
wait_count += 1
logger.warning("It has been waiting for " + str(cost_time) + "s because the multi "
"thread/process of the generator generates data had been hung by gil lock.")
result = self.workers[i % self.num_worker].get()
if isinstance(result, ExceptionHandler):
result.reraise()
except queue.Empty:
self._stop_subprocess()
raise Exception("Generator worker process timeout.")
except KeyboardInterrupt:
self._stop_subprocess()
raise Exception("Generator worker receives KeyboardInterrupt.")
if self.eof.is_set():
self._stop_subprocess()
return
if idx_cursor < len(indices):
idx_cursor = _fill_worker_indices(self.workers, indices, idx_cursor)
yield _convert_row(result)
def _stop_subprocess(self):
# Only the main process can call join
if self.need_join is True and self.ppid == os.getpid():
self.eof.set()
self.need_join = False
for w in self.workers:
if psutil.pid_exists(w.pid):
w.join()
self._abort_watchdog()
def _abort_watchdog(self):
if hasattr(self, 'eot') and self.eot is not None and not self.eot.is_set():
self.eot.set()
def __del__(self):
self._stop_subprocess()
def _subprocess_handle(eof, signum, frame):
threading.Thread(target=eof.set()).start()
def _generator_worker_loop(dataset, idx_queue, result_queue, eof, is_multiprocessing):
"""
Multithread or multiprocess generator worker process loop.
"""
if is_multiprocessing:
signal.signal(signal.SIGTERM, partial(_subprocess_handle, eof))
while True:
# Fetch index, block
try:
idx = idx_queue.get(timeout=1)
except KeyboardInterrupt:
if is_multiprocessing:
eof.set()
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
raise Exception("Generator worker receives KeyboardInterrupt.")
except queue.Empty:
if eof.is_set():
if is_multiprocessing:
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
return
# If end-of-file (eof) is not set, continue to get data from idx_queue
continue
if idx is None:
# When the queue is out of scope from master process, a None item can be fetched from the queue.
# Upon receiving None, worker process should check if eof is set.
if not eof.is_set():
raise Exception("")
return
if eof.is_set():
if is_multiprocessing:
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
return
# Fetch data, any exception from __getitem__ will terminate worker and timeout master process
try:
result = dataset[idx]
except Exception:
result = ExceptionHandler(where="in GeneratorDataset worker process")
# Send data, block
while True:
try:
result_queue.put(result, timeout=5)
except KeyboardInterrupt:
if is_multiprocessing:
eof.set()
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
raise Exception("Generator worker receives KeyboardInterrupt.")
except queue.Full:
if eof.is_set():
if is_multiprocessing:
idx_queue.cancel_join_thread()
result_queue.cancel_join_thread()
return
# If eof is not set, continue to put data to result_queue
continue
break
del result, idx
class _GeneratorWorkerMt(threading.Thread):
"""
Worker process for multi-thread Generator.
"""
def __init__(self, dataset, eof):
self.idx_queue = queue.Queue(16)
self.res_queue = queue.Queue(16)
super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, False))
def put(self, item):
"""
Put function for worker index queue. Never block. Raise queue.Full on failure.
"""
self.idx_queue.put_nowait(item)
def get(self):
"""
Get function for worker result queue. Block with timeout.
"""
return self.res_queue.get(timeout=30)
def queue_empty(self):
if not self.idx_queue.empty():
logger.warning("idx_queue is not empty")
return False
if not self.res_queue.empty():
logger.warning("res_queue is not empty")
return False
return True
class _GeneratorWorkerMp(multiprocessing.Process):
"""
Worker process for multiprocess Generator.
"""
def __init__(self, dataset, eof, max_rowsize, queue_size):
self.idx_queue = multiprocessing.Queue(queue_size)
if get_enable_shared_mem():
self.res_queue = _SharedQueue(queue_size, max_rowsize=max_rowsize)
else:
self.res_queue = multiprocessing.Queue(queue_size)
self.idx_queue._joincancelled = True # pylint: disable=W0212
self.res_queue._joincancelled = True # pylint: disable=W0212
super().__init__(target=_generator_worker_loop, args=(dataset, self.idx_queue, self.res_queue, eof, True))
def put(self, item):
"""
Put function for worker index queue. Never block. Raise queue.Full on failure.
"""
self.idx_queue.put_nowait(item)
def get(self):
"""
Get function for worker result queue. Block with timeout.
"""
# Relax 10s to 30s, since it sometimes will cause "Generator worker process timeout"
# when we run too many iterators with infinite epoch(num_epoch=-1)
return self.res_queue.get(timeout=30)
def queue_empty(self):
if not self.idx_queue.empty():
logger.warning("idx_queue is not empty.")
return False
if not self.res_queue.empty():
logger.warning("res_queue is not empty.")
return False
return True
class GeneratorDataset(MappableDataset):
"""
A source dataset that generates data from Python by invoking Python data source each epoch.
The column names and column types of generated dataset depend on Python data defined by users.
Args:
source (Union[Callable, Iterable, Random Accessible]):
A generator callable object, an iterable Python object or a random accessible Python object.
Callable source is required to return a tuple of NumPy arrays as a row of the dataset on source().next().
Iterable source is required to return a tuple of NumPy arrays as a row of the dataset on
iter(source).next().
Random accessible source is required to return a tuple of NumPy arrays as a row of the dataset on
source[idx].
column_names (Union[str, list[str]], optional): List of column names of the dataset (default=None). Users are
required to provide either column_names or schema.
column_types (list[mindspore.dtype], optional): List of column data types of the dataset (default=None).
If provided, sanity check will be performed on generator output.
schema (Union[Schema, str], optional): Path to the JSON schema file or schema object (default=None). Users are
required to provide either column_names or schema. If both are provided, schema will be used.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible
input is required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
Random accessible input is required. When this argument is specified, `num_samples` reflects the maximum
sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only
when num_shards is also specified. Random accessible input is required.
python_multiprocessing (bool, optional): Parallelize Python operations with multiple worker process. This
option could be beneficial if the Python operation is computational heavy (default=True).
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default 6 MB).
Raises:
RuntimeError: If source raises an exception during execution.
RuntimeError: If len of column_names does not match output len of source.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> import numpy as np
>>>
>>> # 1) Multidimensional generator function as callable input.
>>> def generator_multidimensional():
... for i in range(64):
... yield (np.array([[i, i + 1], [i + 2, i + 3]]),)
>>>
>>> dataset = ds.GeneratorDataset(source=generator_multidimensional, column_names=["multi_dimensional_data"])
>>>
>>> # 2) Multi-column generator function as callable input.
>>> def generator_multi_column():
... for i in range(64):
... yield np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]])
>>>
>>> dataset = ds.GeneratorDataset(source=generator_multi_column, column_names=["col1", "col2"])
>>>
>>> # 3) Iterable dataset as iterable input.
>>> class MyIterable:
... def __init__(self):
... self._index = 0
... self._data = np.random.sample((5, 2))
... self._label = np.random.sample((5, 1))
...
... def __next__(self):
... if self._index >= len(self._data):
... raise StopIteration
... else:
... item = (self._data[self._index], self._label[self._index])
... self._index += 1
... return item
...
... def __iter__(self):
... self._index = 0
... return self
...
... def __len__(self):
... return len(self._data)
>>>
>>> dataset = ds.GeneratorDataset(source=MyIterable(), column_names=["data", "label"])
>>>
>>> # 4) Random accessible dataset as random accessible input.
>>> class MyAccessible:
... def __init__(self):
... self._data = np.random.sample((5, 2))
... self._label = np.random.sample((5, 1))
...
... def __getitem__(self, index):
... return self._data[index], self._label[index]
...
... def __len__(self):
... return len(self._data)
>>>
>>> dataset = ds.GeneratorDataset(source=MyAccessible(), column_names=["data", "label"])
>>>
>>> # list, dict, tuple of Python is also random accessible
>>> dataset = ds.GeneratorDataset(source=[(np.array(0),), (np.array(1),), (np.array(2),)], column_names=["col"])
"""
@check_generatordataset
def __init__(self, source, column_names=None, column_types=None, schema=None, num_samples=None,
num_parallel_workers=1, shuffle=None, sampler=None, num_shards=None, shard_id=None,
python_multiprocessing=True, max_rowsize=6):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id)
if isinstance(source, builtins.zip):
# Although zip is iteratable, it does not have the feature of repeated iteration, so pass it to the array.
self.source = [item for item in source]
else:
self.source = source
self.prepared_source = None # source to be sent to C++
self.python_multiprocessing = python_multiprocessing
self.column_names = to_list(column_names)
if column_types is not None:
self.column_types = mstypelist_to_detypelist(column_types)
else:
self.column_types = []
self.schema = schema
if schema is not None:
self.schema = schema
if not isinstance(schema, Schema):
self.schema = Schema(schema)
# Move get dataset_size by len from parse to here, because self.source will
# lose attribution of '__len__' after deepcopy.
self.source_len = -1 # unknown
if hasattr(self.source, "__len__"):
self.source_len = len(self.source)
self.max_rowsize = max_rowsize
self.sample_fn = None
def __deepcopy__(self, memodict):
if id(self) in memodict:
return memodict[id(self)]
new_op = self.__safe_deepcopy__(memodict, exclude=("source", "__transfer_dataset__"))
sample_fn = None
if new_op.sampler is not None and hasattr(self.source, "__getitem__"):
# The reason why there is a try catch here is because when the new op is being constructed with shared
# memory enabled, there will be an exception thrown if there is not enough shared memory available
if self.source_len == -1:
raise RuntimeError("Attempt to construct a random access dataset, '__len__' method is required!")
try:
if new_op.num_parallel_workers > 1:
self.__validate_memory_usage()
sample_fn = SamplerFn(self.source, new_op.num_parallel_workers, self.python_multiprocessing,
self.max_rowsize)
new_op.prepared_source = (lambda sample_ids: _cpp_sampler_fn_mp(sample_ids, sample_fn))
else:
new_op.prepared_source = (lambda sample_ids: _cpp_sampler_fn(sample_ids, self.source))
new_op.sample_fn = sample_fn
except RuntimeError as e:
raise Exception(str(e))
else:
try:
new_op.sampler = None
new_op.sample_fn = sample_fn
new_op.source_len = min(new_op.source_len,
new_op.num_samples) if new_op.num_samples != 0 else new_op.source_len
iter(self.source)
except TypeError:
# Use generator function if input callable
new_op.prepared_source = (lambda: _generator_fn(self.source, new_op.num_samples))
else:
# Use iterator function if input is iterable
# Random accessible input is also iterable
new_op.prepared_source = (lambda: _iter_fn(self.source, new_op.num_samples))
return new_op
def is_shuffled(self):
return self.sampler.is_shuffled()
def is_sharded(self):
return self.sampler.is_sharded()
def parse(self, children=None):
if self.schema is None:
return cde.GeneratorNode(self.prepared_source, self.column_names, self.column_types, self.source_len,
self.sampler, self.num_parallel_workers)
schema = self.schema
if isinstance(schema, Schema):
schema = self.schema.cpp_schema
return cde.GeneratorNode(self.prepared_source, schema, self.source_len, self.sampler,
self.num_parallel_workers)
def __validate_memory_usage(self):
"""
Check memory usage when mulit-processing mode, when 85% prompt warning and 100% raise error.
"""
if self.python_multiprocessing:
# if use num_parallel_workers is to large when python_multiprocessing=True which would cause
# OOM error get the num_shards
valid_num_shards = 1
if isinstance(self.sampler, samplers.DistributedSampler):
valid_num_shards = self.sampler.num_shards
elif self.num_shards is not None:
valid_num_shards = self.num_shards
# get process memory usage
process = psutil.Process(os.getpid())
process_memory = process.memory_info().rss
sys_memory = psutil.virtual_memory().total
total_memory_maybe_used = process_memory * (self.num_parallel_workers + 1) * valid_num_shards
if total_memory_maybe_used / sys_memory > 0.85:
valid_num_worker = math.floor(sys_memory * 0.85 / valid_num_shards / process_memory - 1)
valid_num_worker = 1 if valid_num_worker <= 0 else valid_num_worker
if total_memory_maybe_used / sys_memory > 1.0:
info = "GeneratorDataset num_parallel_workers: " + str(self.num_parallel_workers) + \
" is too large which maybe cause a lot of memory occupation (>100%) during" \
" multi process running. Therefore, it is recommended to" \
" reduce num_parallel_workers to " + str(valid_num_worker) + " or smaller."
raise RuntimeError(info)
info = "GeneratorDataset num_parallel_workers: " + str(self.num_parallel_workers) + \
" is too large which maybe cause a lot of memory occupation (>85%) during multi " \
"process running. Therefore, it is recommended to reduce num_parallel_workers to " \
+ str(valid_num_worker) + " or smaller."
logger.warning(info)
class TFRecordDataset(SourceDataset):
"""
A source dataset for reading and parsing datasets stored on disk in TFData format.
The columns of generated dataset depend on the source TFRecord files.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for a
pattern of files. The list will be sorted in a lexicographical order.
schema (Union[str, Schema], optional): Path to the JSON schema file or schema object (default=None).
If the schema is not provided, the meta data from the TFData file is considered the schema.
columns_list (list[str], optional): List of columns to be read (default=None, read all columns).
num_samples (int, optional): The number of samples (rows) to be included in the dataset (default=None).
If num_samples is None and numRows(parsed from schema) does not exist, read the full dataset;
If num_samples is None and numRows(parsed from schema) is greater than 0, read numRows rows;
If both num_samples and numRows(parsed from schema) are greater than 0, read num_samples rows.
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
shard_equal_rows (bool, optional): Get equal rows for all shards(default=False). If shard_equal_rows
is false, number of rows of each shard may be not equal, and may lead to a failure in distributed training.
When the number of samples of per TFRecord file are not equal, it is suggested to set to true.
This argument should only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> from mindspore import dtype as mstype
>>>
>>> tfrecord_dataset_dir = ["/path/to/tfrecord_dataset_file"] # contains 1 or multiple TFRecord files
>>> tfrecord_schema_file = "/path/to/tfrecord_schema_file"
>>>
>>> # 1) Get all rows from tfrecord_dataset_dir with no explicit schema.
>>> # The meta-data in the first row will be used as a schema.
>>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir)
>>>
>>> # 2) Get all rows from tfrecord_dataset_dir with user-defined schema.
>>> schema = ds.Schema()
>>> schema.add_column(name='col_1d', de_type=mstype.int64, shape=[2])
>>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir, schema=schema)
>>>
>>> # 3) Get all rows from tfrecord_dataset_dir with schema file.
>>> dataset = ds.TFRecordDataset(dataset_files=tfrecord_dataset_dir, schema=tfrecord_schema_file)
"""
@check_tfrecorddataset
def __init__(self, dataset_files, schema=None, columns_list=None, num_samples=None, num_parallel_workers=None,
shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, shard_equal_rows=False, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
self.schema = schema
self.columns_list = replace_none(columns_list, [])
self.shard_equal_rows = replace_none(shard_equal_rows, False)
if self.schema is not None and (self.num_samples is None or self.num_samples == 0):
self.num_samples = Schema.get_num_rows(self.schema)
def parse(self, children=None):
schema = self.schema.cpp_schema if isinstance(self.schema, Schema) else self.schema
return cde.TFRecordNode(self.dataset_files, schema, self.columns_list, self.num_samples, self.shuffle_flag,
self.num_shards, self.shard_id, self.shard_equal_rows)
class ManifestDataset(MappableDataset):
"""
A source dataset for reading images from a Manifest file.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of uint64 type.
Args:
dataset_file (str): File to be read.
usage (str, optional): Acceptable usages include `train`, `eval` and `inference` (default= `train`).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
class_indexing (dict, optional): A str-to-int mapping from label name to index
(default=None, the folder names will be sorted alphabetically and each
class will be given a unique index starting from 0).
decode (bool, optional): decode the images after reading (default=False).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max number of samples per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If class_indexing is not a dictionary.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- The shape of the image column is [image_size] if decode flag is False, or [H,W,C] otherwise.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> manifest_dataset_dir = "/path/to/manifest_dataset_file"
>>>
>>> # 1) Read all samples specified in manifest_dataset_dir dataset with 8 threads for training
>>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir, usage="train", num_parallel_workers=8)
>>>
>>> # 2) Read samples (specified in manifest_file.manifest) for shard 0 in a 2-way distributed training setup
>>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir, num_shards=2, shard_id=0)
"""
@check_manifestdataset
def __init__(self, dataset_file, usage="train", num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, class_indexing=None, decode=False, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_file = dataset_file
self.decode = replace_none(decode, False)
self.usage = replace_none(usage, "train")
self.class_indexing = replace_none(class_indexing, {})
def parse(self, children=None):
return cde.ManifestNode(self.dataset_file, self.usage, self.sampler, self.class_indexing, self.decode)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-int mapping from label name to index.
Examples:
>>> manifest_dataset_dir = "/path/to/manifest_dataset_file"
>>>
>>> dataset = ds.ManifestDataset(dataset_file=manifest_dataset_dir)
>>> class_indexing = dataset.get_class_indexing()
"""
if self.class_indexing is None or not self.class_indexing:
if self._class_indexing is None:
runtime_getter = self._init_tree_getters()
self._class_indexing = runtime_getter[0].GetClassIndexing()
self.class_indexing = {}
for pair in self._class_indexing:
self.class_indexing[pair[0]] = pair[1][0]
return self.class_indexing
class AGNewsDataset(SourceDataset):
"""
A source dataset that reads and parses AG News datasets.
The generated dataset has three columns: :py:obj:`[index, title, description]`.
The tensor of column :py:obj:`index` is of the string type.
The tensor of column :py:obj:`title` is of the string type.
The tensor of column :py:obj:`description` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Acceptable usages include `train`, `test` and `all` (default=None, all samples).
num_samples (int, optional): Number of samples (rows) to read (default=None, reads the full dataset).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, 'num_samples' reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Examples:
>>> ag_news_dataset_dir = "/path/to/ag_news_dataset_file"
>>> dataset = ds.AGNewsDataset(dataset_dir=ag_news_dataset_dir, usage='all')
About AGNews dataset:
AG is a collection of over 1 million news articles. The news articles were collected
by ComeToMyHead from over 2,000 news sources in over 1 year of activity. ComeToMyHead
is an academic news search engine that has been in operation since July 2004.
The dataset is provided by academics for research purposes such as data mining
(clustering, classification, etc.), information retrieval (ranking, searching, etc.),
xml, data compression, data streaming, and any other non-commercial activities.
AG's news topic classification dataset was constructed by selecting the four largest
classes from the original corpus. Each class contains 30,000 training samples and
1,900 test samples. The total number of training samples in train.csv is 120,000
and the number of test samples in test.csv is 7,600.
You can unzip the dataset files into the following structure and read by MindSpore's API:
.. code-block::
.
└── ag_news_dataset_dir
├── classes.txt
├── train.csv
├── test.csv
└── readme.txt
Citation:
.. code-block::
@misc{zhang2015characterlevel,
title={Character-level Convolutional Networks for Text Classification},
author={Xiang Zhang and Junbo Zhao and Yann LeCun},
year={2015},
eprint={1509.01626},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
@check_ag_news_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None,
num_parallel_workers=None, shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.AGNewsNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class Cifar10Dataset(MappableDataset):
"""
A source dataset for reading and parsing Cifar10 dataset.
This api only supports parsing Cifar10 file in binary version now.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 50,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 60,000 samples
(default=None, all samples).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> cifar10_dataset_dir = "/path/to/cifar10_dataset_directory"
>>>
>>> # 1) Get all samples from CIFAR10 dataset in sequence
>>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from CIFAR10 dataset
>>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from CIFAR10 dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.Cifar10Dataset(dataset_dir=cifar10_dataset_dir, num_shards=2, shard_id=0)
>>>
>>> # In CIFAR10 dataset, each dictionary has keys "image" and "label"
About CIFAR-10 dataset:
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes,
with 6000 images per class. There are 50000 training images and 10000 test images.
The 10 different classes represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks.
Here is the original CIFAR-10 dataset structure.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── cifar-10-batches-bin
├── data_batch_1.bin
├── data_batch_2.bin
├── data_batch_3.bin
├── data_batch_4.bin
├── data_batch_5.bin
├── test_batch.bin
├── readme.html
└── batches.meta.txt
Citation:
.. code-block::
@techreport{Krizhevsky09,
author = {Alex Krizhevsky},
title = {Learning multiple layers of features from tiny images},
institution = {},
year = {2009},
howpublished = {http://www.cs.toronto.edu/~kriz/cifar.html}
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.Cifar10Node(self.dataset_dir, self.usage, self.sampler)
class Cifar100Dataset(MappableDataset):
"""
A source dataset for reading and parsing Cifar100 dataset.
The generated dataset has three columns :py:obj:`[image, coarse_label, fine_label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`coarse_label` and :py:obj:`fine_labels` are each a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all` . `train` will read from 50,000
train samples, `test` will read from 10,000 test samples, `all` will read from all 60,000 samples
(default=None, all samples).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, 'num_samples' reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and shuffle
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> cifar100_dataset_dir = "/path/to/cifar100_dataset_directory"
>>>
>>> # 1) Get all samples from CIFAR100 dataset in sequence
>>> dataset = ds.Cifar100Dataset(dataset_dir=cifar100_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from CIFAR100 dataset
>>> dataset = ds.Cifar100Dataset(dataset_dir=cifar100_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # In CIFAR100 dataset, each dictionary has 3 keys: "image", "fine_label" and "coarse_label"
About CIFAR-100 dataset:
This dataset is just like the CIFAR-10, except it has 100 classes containing 600 images
each. There are 500 training images and 100 testing images per class. The 100 classes in
the CIFAR-100 are grouped into 20 superclasses. Each image comes with a "fine" label (the
class to which it belongs) and a "coarse" label (the superclass to which it belongs).
Here is the original CIFAR-100 dataset structure.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── cifar-100-binary
├── train.bin
├── test.bin
├── fine_label_names.txt
└── coarse_label_names.txt
Citation:
.. code-block::
@techreport{Krizhevsky09,
author = {Alex Krizhevsky},
title = {Learning multiple layers of features from tiny images},
institution = {},
year = {2009},
howpublished = {http://www.cs.toronto.edu/~kriz/cifar.html}
}
"""
@check_mnist_cifar_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.Cifar100Node(self.dataset_dir, self.usage, self.sampler)
class RandomDataset(SourceDataset):
"""
A source dataset that generates random data.
Args:
total_rows (int, optional): Number of samples for the dataset to generate
(default=None, number of samples is random).
schema (Union[str, Schema], optional): Path to the JSON schema file or schema object (default=None).
If the schema is not provided, the random dataset generates a random schema.
columns_list (list[str], optional): List of columns to be read (default=None, read all columns)
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, all samples).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, 'num_samples' reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
"""
@check_random_dataset
def __init__(self, total_rows=None, schema=None, columns_list=None, num_samples=None, num_parallel_workers=None,
cache=None, shuffle=None, num_shards=None, shard_id=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.total_rows = total_rows
if schema is not None:
self.total_rows = replace_none(total_rows, Schema.get_num_rows(schema))
self.schema = schema
self.columns_list = replace_none(columns_list, [])
def parse(self, children=None):
schema = self.schema.cpp_schema if isinstance(self.schema, Schema) else self.schema
return cde.RandomNode(self.total_rows, schema, self.columns_list)
class Schema:
"""
Class to represent a schema of a dataset.
Args:
schema_file(str): Path of the schema file (default=None).
Returns:
Schema object, schema info about dataset.
Raises:
RuntimeError: If schema file failed to load.
Examples:
>>> from mindspore import dtype as mstype
>>>
>>> # Create schema; specify column name, mindspore.dtype and shape of the column
>>> schema = ds.Schema()
>>> schema.add_column(name='col1', de_type=mstype.int64, shape=[2])
"""
@check_schema
def __init__(self, schema_file=None):
self.schema_file = replace_none(schema_file, "")
self.cpp_schema = cde.SchemaObj(self.schema_file)
@check_add_column
def add_column(self, name, de_type, shape=None):
"""
Add new column to the schema.
Args:
name (str): The new name of the column.
de_type (str): Data type of the column.
shape (list[int], optional): Shape of the column
(default=None, [-1] which is an unknown shape of rank 1).
Raises:
ValueError: If column type is unknown.
"""
if isinstance(de_type, typing.Type):
de_type = mstype_to_detype(de_type)
col_type = str(de_type)
else:
col_type = str(cde.DataType(de_type))
if shape is None:
self.cpp_schema.add_column(name, col_type)
else:
self.cpp_schema.add_column(name, col_type, shape)
def parse_columns(self, columns):
"""
Parse the columns and add it to self.
Args:
columns (Union[dict, list[dict], tuple[dict]]): Dataset attribute information, decoded from schema file.
- list[dict], 'name' and 'type' must be in keys, 'shape' optional.
- dict, columns.keys() as name, columns.values() is dict, and 'type' inside, 'shape' optional.
Raises:
RuntimeError: If failed to parse columns.
RuntimeError: If column's name field is missing.
RuntimeError: If column's type field is missing.
Examples:
>>> schema = Schema()
>>> columns1 = [{'name': 'image', 'type': 'int8', 'shape': [3, 3]},
>>> {'name': 'label', 'type': 'int8', 'shape': [1]}]
>>> schema.parse_columns(columns1)
>>> columns2 = {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}}
>>> schema.parse_columns(columns2)
"""
self.cpp_schema.parse_columns(json.dumps(columns, indent=2))
def to_json(self):
"""
Get a JSON string of the schema.
Returns:
str, JSON string of the schema.
"""
return self.cpp_schema.to_json()
def from_json(self, json_obj):
"""
Get schema file from JSON object.
Args:
json_obj(dictionary): Object of JSON parsed.
Raises:
RuntimeError: if there is unknown item in the object.
RuntimeError: if dataset type is missing in the object.
RuntimeError: if columns are missing in the object.
"""
self.cpp_schema.from_string(json.dumps(json_obj, indent=2))
def __str__(self):
return self.to_json()
@staticmethod
def get_num_rows(schema):
schema_obj = schema
if not isinstance(schema_obj, Schema):
schema_obj = Schema(schema_obj)
return schema_obj.cpp_schema.get_num_rows()
class USPSDataset(SourceDataset):
"""
A source dataset for reading and parsing the USPS dataset.
The generated dataset has two columns: :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is of a scalar of uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be "train", "test" or "all". "train" will read from 7,291
train samples, "test" will read from 2,007 test samples, "all" will read from all 9,298 samples.
(default=None, will read all samples)
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is not valid or does not exist or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If usage is invalid.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> usps_dataset_dir = "/path/to/usps_dataset_directory"
>>>
>>> # Read 3 samples from USPS dataset
>>> dataset = ds.USPSDataset(dataset_dir=usps_dataset_dir, num_samples=3)
>>>
>>> # Note: In USPS dataset, each dictionary has keys "image" and "label"
About USPS dataset:
USPS is a digit dataset automatically scanned from envelopes by the U.S. Postal Service
containing a total of 9,298 16×16 pixel grayscale samples.
The images are centered, normalized and show a broad range of font styles.
Here is the original USPS dataset structure.
You can download and unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── usps_dataset_dir
├── usps
├── usps.t
Citation:
.. code-block::
@article{hull1994database,
title={A database for handwritten text recognition research},
author={Hull, Jonathan J.},
journal={IEEE Transactions on pattern analysis and machine intelligence},
volume={16},
number={5},
pages={550--554},
year={1994},
publisher={IEEE}
}
"""
@check_usps_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.USPSNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class VOCDataset(MappableDataset):
"""
A source dataset for reading and parsing VOC dataset.
The generated dataset with different task setting has different output columns:
- task = :py:obj:`Detection`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \
:py:obj:`[label, dtype=uint32]`, :py:obj:`[difficult, dtype=uint32]`, :py:obj:`[truncate, dtype=uint32]`.
- task = :py:obj:`Segmentation`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[target,dtype=uint8]`.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
task (str, optional): Set the task type of reading voc data, now only support `Segmentation` or `Detection`
(default= `Segmentation`).
usage (str, optional): Set the task type of ImageSets(default= `train`). If task is `Segmentation`, image and
annotation list will be loaded in ./ImageSets/Segmentation/usage + ".txt"; If task is `Detection`, image and
annotation list will be loaded in ./ImageSets/Main/usage + ".txt"; if task and usage are not set, image and
annotation list will be loaded in ./ImageSets/Segmentation/train.txt as default.
class_indexing (dict, optional): A str-to-int mapping from label name to index, only valid in
`Detection` task (default=None, the folder names will be sorted alphabetically and each
class will be given a unique index starting from 0).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
extra_metadata(bool, optional): Flag to add extra meta-data to row. If True, an additional column named
:py:obj:`[_meta-filename, dtype=string]` will be output at the end (default=False).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If xml of Annotations is an invalid format.
RuntimeError: If xml of Annotations loss attribution of `object`.
RuntimeError: If xml of Annotations loss attribution of `bndbox`.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If task is not equal 'Segmentation' or 'Detection'.
ValueError: If task equal 'Segmentation' but class_indexing is not None.
ValueError: If txt related to mode is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- Column '[_meta-filename, dtype=string]' won't be output unless an explicit rename dataset op
is added to remove the prefix('_meta-').
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> voc_dataset_dir = "/path/to/voc_dataset_directory"
>>>
>>> # 1) Read VOC data for segmentation training
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Segmentation", usage="train")
>>>
>>> # 2) Read VOC data for detection training
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Detection", usage="train")
>>>
>>> # 3) Read all VOC dataset samples in voc_dataset_dir with 8 threads in random order
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Detection", usage="train",
... num_parallel_workers=8)
>>>
>>> # 4) Read then decode all VOC dataset samples in voc_dataset_dir in sequence
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir, task="Detection", usage="train",
... decode=True, shuffle=False)
>>>
>>> # In VOC dataset, if task='Segmentation', each dictionary has keys "image" and "target"
>>> # In VOC dataset, if task='Detection', each dictionary has keys "image" and "annotation"
About VOC dataset.
The PASCAL Visual Object Classes (VOC) challenge is a benchmark in visual
object category recognition and detection, providing the vision and machine
learning communities with a standard dataset of images and annotation, and
standard evaluation procedures.
You can unzip the original VOC-2012 dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── voc2012_dataset_dir
├── Annotations
│ ├── 2007_000027.xml
│ ├── 2007_000032.xml
│ ├── ...
├── ImageSets
│ ├── Action
│ ├── Layout
│ ├── Main
│ └── Segmentation
├── JPEGImages
│ ├── 2007_000027.jpg
│ ├── 2007_000032.jpg
│ ├── ...
├── SegmentationClass
│ ├── 2007_000032.png
│ ├── 2007_000033.png
│ ├── ...
└── SegmentationObject
├── 2007_000032.png
├── 2007_000033.png
├── ...
Citation:
.. code-block::
@article{Everingham10,
author = {Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.},
title = {The Pascal Visual Object Classes (VOC) Challenge},
journal = {International Journal of Computer Vision},
volume = {88},
year = {2012},
number = {2},
month = {jun},
pages = {303--338},
biburl = {http://host.robots.ox.ac.uk/pascal/VOC/pubs/everingham10.html#bibtex},
howpublished = {http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html}
}
"""
@check_vocdataset
def __init__(self, dataset_dir, task="Segmentation", usage="train", class_indexing=None, num_samples=None,
num_parallel_workers=None, shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None,
cache=None, extra_metadata=False):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.task = replace_none(task, "Segmentation")
self.usage = replace_none(usage, "train")
self.class_indexing = replace_none(class_indexing, {})
self.decode = replace_none(decode, False)
self.extra_metadata = extra_metadata
def parse(self, children=None):
return cde.VOCNode(self.dataset_dir, self.task, self.usage, self.class_indexing, self.decode, self.sampler,
self.extra_metadata)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-int mapping from label name to index.
Examples:
>>> voc_dataset_dir = "/path/to/voc_dataset_directory"
>>>
>>> dataset = ds.VOCDataset(dataset_dir=voc_dataset_dir)
>>> class_indexing = dataset.get_class_indexing()
"""
if self.task != "Detection":
raise NotImplementedError("Only 'Detection' support get_class_indexing.")
if self.class_indexing is None or not self.class_indexing:
if self._class_indexing is None:
runtime_getter = self._init_tree_getters()
self._class_indexing = runtime_getter[0].GetClassIndexing()
self.class_indexing = {}
for pair in self._class_indexing:
self.class_indexing[pair[0]] = pair[1][0]
return self.class_indexing
class CocoDataset(MappableDataset):
"""
A source dataset for reading and parsing COCO dataset.
CocoDataset supports four kinds of tasks, which are Object Detection, Keypoint Detection, Stuff Segmentation and
Panoptic Segmentation of 2017 Train/Val/Test dataset.
The generated dataset with different task setting has different output columns:
- task = :py:obj:`Detection`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \
:py:obj:`[category_id, dtype=uint32]`, :py:obj:`[iscrowd, dtype=uint32]`.
- task = :py:obj:`Stuff`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[segmentation,dtype=float32]`, \
:py:obj:`[iscrowd,dtype=uint32]`.
- task = :py:obj:`Keypoint`, output columns: :py:obj:`[image, dtype=uint8]`, \
:py:obj:`[keypoints, dtype=float32]`, :py:obj:`[num_keypoints, dtype=uint32]`.
- task = :py:obj:`Panoptic`, output columns: :py:obj:`[image, dtype=uint8]`, :py:obj:`[bbox, dtype=float32]`, \
:py:obj:`[category_id, dtype=uint32]`, :py:obj:`[iscrowd, dtype=uint32]`, :py:obj:`[area, dtype=uint32]`.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
annotation_file (str): Path to the annotation JSON file.
task (str, optional): Set the task type for reading COCO data. Supported task types:
`Detection`, `Stuff`, `Panoptic` and `Keypoint` (default= `Detection`).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the configuration file).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the dataset
(default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
extra_metadata(bool, optional): Flag to add extra meta-data to row. If True, an additional column will be
output at the end :py:obj:`[_meta-filename, dtype=string]` (default=False).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
RuntimeError: If parse JSON file failed.
ValueError: If task is not in [`Detection`, `Stuff`, `Panoptic`, `Keypoint`].
ValueError: If annotation_file is not exist.
ValueError: If dataset_dir is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- Column '[_meta-filename, dtype=string]' won't be output unless an explicit rename dataset op is added
to remove the prefix('_meta-').
- CocoDataset doesn't support PKSampler.
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> coco_dataset_dir = "/path/to/coco_dataset_directory/images"
>>> coco_annotation_file = "/path/to/coco_dataset_directory/annotation_file"
>>>
>>> # 1) Read COCO data for Detection task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Detection')
>>>
>>> # 2) Read COCO data for Stuff task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Stuff')
>>>
>>> # 3) Read COCO data for Panoptic task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Panoptic')
>>>
>>> # 4) Read COCO data for Keypoint task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Keypoint')
>>>
>>> # In COCO dataset, each dictionary has keys "image" and "annotation"
About COCO dataset:
COCO(Microsoft Common Objects in Context) is a large-scale object detection, segmentation, and captioning dataset
with several features: Object segmentation, Recognition in context, Superpixel stuff segmentation,
330K images (>200K labeled), 1.5 million object instances, 80 object categories, 91 stuff categories,
5 captions per image, 250,000 people with keypoints. In contrast to the popular ImageNet dataset, COCO has fewer
categories but more instances in per category.
You can unzip the original COCO-2017 dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── coco_dataset_directory
├── train2017
│ ├── 000000000009.jpg
│ ├── 000000000025.jpg
│ ├── ...
├── test2017
│ ├── 000000000001.jpg
│ ├── 000000058136.jpg
│ ├── ...
├── val2017
│ ├── 000000000139.jpg
│ ├── 000000057027.jpg
│ ├── ...
└── annotations
├── captions_train2017.json
├── captions_val2017.json
├── instances_train2017.json
├── instances_val2017.json
├── person_keypoints_train2017.json
└── person_keypoints_val2017.json
Citation:
.. code-block::
@article{DBLP:journals/corr/LinMBHPRDZ14,
author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and
Lubomir D. Bourdev and Ross B. Girshick and James Hays and
Pietro Perona and Deva Ramanan and Piotr Doll{\'{a}}r and C. Lawrence Zitnick},
title = {Microsoft {COCO:} Common Objects in Context},
journal = {CoRR},
volume = {abs/1405.0312},
year = {2014},
url = {http://arxiv.org/abs/1405.0312},
archivePrefix = {arXiv},
eprint = {1405.0312},
timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
biburl = {https://dblp.org/rec/journals/corr/LinMBHPRDZ14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
@check_cocodataset
def __init__(self, dataset_dir, annotation_file, task="Detection", num_samples=None, num_parallel_workers=None,
shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None, cache=None,
extra_metadata=False):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.annotation_file = annotation_file
self.task = replace_none(task, "Detection")
self.decode = replace_none(decode, False)
self.extra_metadata = extra_metadata
def parse(self, children=None):
return cde.CocoNode(self.dataset_dir, self.annotation_file, self.task, self.decode, self.sampler,
self.extra_metadata)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-list<int> mapping from label name to index.
Examples:
>>> coco_dataset_dir = "/path/to/coco_dataset_directory/images"
>>> coco_annotation_file = "/path/to/coco_dataset_directory/annotation_file"
>>>
>>> # Read COCO data for Detection task
>>> dataset = ds.CocoDataset(dataset_dir=coco_dataset_dir,
... annotation_file=coco_annotation_file,
... task='Detection')
>>>
>>> class_indexing = dataset.get_class_indexing()
"""
if self.task not in {"Detection", "Panoptic"}:
raise NotImplementedError("Only 'Detection' and 'Panoptic' support get_class_indexing.")
if self._class_indexing is None:
runtime_getter = self._init_tree_getters()
self._class_indexing = dict(runtime_getter[0].GetClassIndexing())
return self._class_indexing
class CelebADataset(MappableDataset):
"""
A source dataset for reading and parsing CelebA dataset.
Only support to read `list_attr_celeba.txt` currently, which is the attribute annotations of the dataset.
The generated dataset has two columns: :py:obj:`[image, attr]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`attr` is of the uint32 type and one hot encoded.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_parallel_workers (int, optional): Number of workers to read the data (default=None, will use value set in
the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None).
usage (str, optional): Specify the `train`, `valid`, `test` part or `all` parts of dataset
(default= `all`, will read all samples).
sampler (Sampler, optional): Object used to choose samples from the dataset (default=None).
decode (bool, optional): decode the images after reading (default=False).
extensions (list[str], optional): List of file extensions to be included in the dataset (default=None).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will include all images).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> celeba_dataset_dir = "/path/to/celeba_dataset_directory"
>>>
>>> # Read 5 samples from CelebA dataset
>>> dataset = ds.CelebADataset(dataset_dir=celeba_dataset_dir, usage='train', num_samples=5)
>>>
>>> # Note: In celeba dataset, each data dictionary owns keys "image" and "attr"
About CelebA dataset:
CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset
with more than 200K celebrity images, each with 40 attribute annotations.
The images in this dataset cover large pose variations and background clutter.
CelebA has large diversities, large quantities, and rich annotations, including
* 10,177 number of identities,
* 202,599 number of face images, and
* 5 landmark locations, 40 binary attributes annotations per image.
The dataset can be employed as the training and test sets for the following computer
vision tasks: face attribute recognition, face detection, landmark (or facial part)
localization, and face editing & synthesis.
Original CelebA dataset structure:
.. code-block::
.
└── CelebA
├── README.md
├── Img
│ ├── img_celeba.7z
│ ├── img_align_celeba_png.7z
│ └── img_align_celeba.zip
├── Eval
│ └── list_eval_partition.txt
└── Anno
├── list_landmarks_celeba.txt
├── list_landmarks_align_celeba.txt
├── list_bbox_celeba.txt
├── list_attr_celeba.txt
└── identity_CelebA.txt
You can unzip the dataset files into the following structure and read by MindSpore's API.
.. code-block::
.
└── celeba_dataset_directory
├── list_attr_celeba.txt
├── 000001.jpg
├── 000002.jpg
├── 000003.jpg
├── ...
Citation:
.. code-block::
@article{DBLP:journals/corr/LiuLWT14,
author = {Ziwei Liu and Ping Luo and Xiaogang Wang and Xiaoou Tang},
title = {Deep Learning Face Attributes in the Wild},
journal = {CoRR},
volume = {abs/1411.7766},
year = {2014},
url = {http://arxiv.org/abs/1411.7766},
archivePrefix = {arXiv},
eprint = {1411.7766},
timestamp = {Tue, 10 Dec 2019 15:37:26 +0100},
biburl = {https://dblp.org/rec/journals/corr/LiuLWT14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org},
howpublished = {http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html}
}
"""
@check_celebadataset
def __init__(self, dataset_dir, num_parallel_workers=None, shuffle=None, usage='all', sampler=None, decode=False,
extensions=None, num_samples=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.decode = replace_none(decode, False)
self.extensions = replace_none(extensions, [])
self.usage = replace_none(usage, "all")
def parse(self, children=None):
if self.usage != "all":
dataset_dir = os.path.realpath(self.dataset_dir)
partition_file = os.path.join(dataset_dir, "list_eval_partition.txt")
if os.path.exists(partition_file) is False:
raise RuntimeError("Partition file can not be found when usage is not 'all'.")
return cde.CelebANode(self.dataset_dir, self.usage, self.sampler, self.decode, self.extensions)
class CLUEDataset(SourceDataset):
"""
A source dataset that reads and parses CLUE datasets.
Supported CLUE classification tasks: `AFQMC`, `TNEWS`, `IFLYTEK`, `CMNLI`, `WSC` and `CSL`.
The generated dataset with different task setting has different output columns:
- task = :py:obj:`AFQMC`
- usage = :py:obj:`train`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[sentence1, dtype=string]`, :py:obj:`[sentence2, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- task = :py:obj:`TNEWS`
- usage = :py:obj:`train`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`, :py:obj:`[keywords, dtype=string]`.
- task = :py:obj:`IFLYTEK`
- usage = :py:obj:`train`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=string]`, \
:py:obj:`[sentence, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[label, dtype=string]`, \
:py:obj:`[label_des, dtype=string]`, :py:obj:`[sentence, dtype=string]`.
- task = :py:obj:`CMNLI`
- usage = :py:obj:`train`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[sentence1, dtype=string]`, :py:obj:`[sentence2, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[sentence1, dtype=string]`, \
:py:obj:`[sentence2, dtype=string]`, :py:obj:`[label, dtype=string]`.
- task = :py:obj:`WSC`
- usage = :py:obj:`train`, output columns: :py:obj:`[span1_index, dtype=uint8]`, \
:py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \
:py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, \
:py:obj:`[text, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[span1_index, dtype=uint8]`, \
:py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \
:py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, :py:obj:`[text, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[span1_index, dtype=uint8]`, \
:py:obj:`[span2_index, dtype=uint8]`, :py:obj:`[span1_text, dtype=string]`, \
:py:obj:`[span2_text, dtype=string]`, :py:obj:`[idx, dtype=uint8]`, \
:py:obj:`[text, dtype=string]`, :py:obj:`[label, dtype=string]`.
- task = :py:obj:`CSL`
- usage = :py:obj:`train`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`, :py:obj:`[label, dtype=string]`.
- usage = :py:obj:`test`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`.
- usage = :py:obj:`eval`, output columns: :py:obj:`[id, dtype=uint8]`, \
:py:obj:`[abst, dtype=string]`, :py:obj:`[keyword, dtype=string]`, :py:obj:`[label, dtype=string]`.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for
a pattern of files. The list will be sorted in a lexicographical order.
task (str, optional): The kind of task, one of `AFQMC`, `TNEWS`, `IFLYTEK`, `CMNLI`, `WSC` and `CSL`.
(default=AFQMC).
usage (str, optional): Specify the `train`, `test` or `eval` part of dataset (default="train").
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> clue_dataset_dir = ["/path/to/clue_dataset_file"] # contains 1 or multiple clue files
>>> dataset = ds.CLUEDataset(dataset_files=clue_dataset_dir, task='AFQMC', usage='train')
About CLUE dataset:
CLUE, a Chinese Language Understanding Evaluation benchmark. It contains multiple
tasks, including single-sentence classification, sentence pair classification, and machine
reading comprehension.
You can unzip the dataset files into the following structure and read by MindSpore's API,
such as afqmc dataset:
.. code-block::
.
└── afqmc_public
├── train.json
├── test.json
└── dev.json
Citation:
.. code-block::
@article{CLUEbenchmark,
title = {CLUE: A Chinese Language Understanding Evaluation Benchmark},
author = {Liang Xu, Xuanwei Zhang, Lu Li, Hai Hu, Chenjie Cao, Weitang Liu, Junyi Li, Yudong Li,
Kai Sun, Yechen Xu, Yiming Cui, Cong Yu, Qianqian Dong, Yin Tian, Dian Yu, Bo Shi, Jun Zeng,
Rongzhao Wang, Weijian Xie, Yanting Li, Yina Patterson, Zuoyu Tian, Yiwen Zhang, He Zhou,
Shaoweihua Liu, Qipeng Zhao, Cong Yue, Xinrui Zhang, Zhengliang Yang, Zhenzhong Lan},
journal = {arXiv preprint arXiv:2004.05986},
year = {2020},
howpublished = {https://github.com/CLUEbenchmark/CLUE}
}
"""
@check_cluedataset
def __init__(self, dataset_files, task='AFQMC', usage='train', num_samples=None, num_parallel_workers=None,
shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.usage = replace_none(usage, 'train')
self.task = replace_none(task, 'AFQMC')
def parse(self, children=None):
return cde.CLUENode(self.dataset_files, self.task, self.usage, self.num_samples, self.shuffle_flag,
self.num_shards, self.shard_id)
class CSVDataset(SourceDataset):
"""
A source dataset that reads and parses comma-separated values (CSV) datasets.
The columns of generated dataset depend on the source CSV files.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search
for a pattern of files. The list will be sorted in a lexicographical order.
field_delim (str, optional): A string that indicates the char delimiter to separate fields (default=',').
column_defaults (list, optional): List of default values for the CSV field (default=None). Each item
in the list is either a valid type (float, int, or string). If this is not provided, treats all
columns as string type.
column_names (list[str], optional): List of column names of the dataset (default=None). If this
is not provided, infers the column_names from the first row of CSV file.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> csv_dataset_dir = ["/path/to/csv_dataset_file"] # contains 1 or multiple csv files
>>> dataset = ds.CSVDataset(dataset_files=csv_dataset_dir, column_names=['col1', 'col2', 'col3', 'col4'])
"""
@check_csvdataset
def __init__(self, dataset_files, field_delim=',', column_defaults=None, column_names=None, num_samples=None,
num_parallel_workers=None, shuffle=Shuffle.GLOBAL, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
self.field_delim = replace_none(field_delim, ',')
self.column_defaults = replace_none(column_defaults, [])
self.column_names = replace_none(column_names, [])
def parse(self, children=None):
return cde.CSVNode(self.dataset_files, self.field_delim, self.column_defaults, self.column_names,
self.num_samples, self.shuffle_flag, self.num_shards, self.shard_id)
class SBUDataset(MappableDataset):
"""
A source dataset for reading and parsing the SBU dataset.
The generated dataset has two columns :py:obj:`[image, caption]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`caption` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
decode (bool, optional): Decode the images after reading (default=False).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> sbu_dataset_dir = "/path/to/sbu_dataset_directory"
>>> # Read 3 samples from SBU dataset
>>> dataset = ds.SBUDataset(dataset_dir=sbu_dataset_dir, num_samples=3)
About SBU dataset:
SBU dataset is a large captioned photo collection.
It contains one million images with associated visually relevant captions.
You should manually download the images using official download.m by replacing 'urls{i}(24, end)' with
'urls{i}(24:1:end)' and keep the directory as below.
.. code-block::
.
└─ dataset_dir
├── SBU_captioned_photo_dataset_captions.txt
├── SBU_captioned_photo_dataset_urls.txt
└── sbu_images
├── m_3326_3596303505_3ce4c20529.jpg
├── ......
└── m_2522_4182181099_c3c23ab1cc.jpg
Citation:
.. code-block::
@inproceedings{Ordonez:2011:im2text,
Author = {Vicente Ordonez and Girish Kulkarni and Tamara L. Berg},
Title = {Im2Text: Describing Images Using 1 Million Captioned Photographs},
Booktitle = {Neural Information Processing Systems ({NIPS})},
Year = {2011},
}
"""
@check_sbu_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None, decode=False,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.SBUNode(self.dataset_dir, self.decode, self.sampler)
class _Flowers102Dataset:
"""
Mainly for loading Flowers102 Dataset, and return one row each time.
"""
def __init__(self, dataset_dir, task, usage, decode):
self.dataset_dir = os.path.realpath(dataset_dir)
self.task = task
self.usage = usage
self.decode = decode
if self.task == "Classification":
self.column_names = ["image", "label"]
else:
self.column_names = ["image", "segmentation", "label"]
labels_path = os.path.join(self.dataset_dir, "imagelabels.mat")
setid_path = os.path.join(self.dataset_dir, "setid.mat")
# minus one to transform 1~102 to 0 ~ 101
self.labels = (loadmat(labels_path)["labels"][0] - 1).astype(np.uint32)
self.setid = loadmat(setid_path)
if self.usage == 'train':
self.indices = self.setid["trnid"][0].tolist()
elif self.usage == 'test':
self.indices = self.setid["tstid"][0].tolist()
elif self.usage == 'valid':
self.indices = self.setid["valid"][0].tolist()
elif self.usage == 'all':
self.indices = self.setid["trnid"][0].tolist()
self.indices += self.setid["tstid"][0].tolist()
self.indices += self.setid["valid"][0].tolist()
else:
raise ValueError("Input usage is not within the valid set of ['train', 'valid', 'test', 'all'].")
def __getitem__(self, index):
# range: 1 ~ 8189
image_path = os.path.join(self.dataset_dir, "jpg", "image_" + str(self.indices[index]).zfill(5) + ".jpg")
if not os.path.exists(image_path):
raise RuntimeError("Can not find image file: " + image_path)
if self.decode is True:
image = np.asarray(Image.open(image_path).convert("RGB"))
else:
image = np.fromfile(image_path, dtype=np.uint8)
label = self.labels[self.indices[index] - 1]
if self.task == "Segmentation":
segmentation_path = \
os.path.join(self.dataset_dir, "segmim", "segmim_" + str(self.indices[index]).zfill(5) + ".jpg")
if not os.path.exists(segmentation_path):
raise RuntimeError("Can not find segmentation file: " + segmentation_path)
if self.decode is True:
segmentation = np.asarray(Image.open(segmentation_path).convert("RGB"))
else:
segmentation = np.fromfile(segmentation_path, dtype=np.uint8)
return image, segmentation, label
return image, label
def __len__(self):
return len(self.indices)
class Flowers102Dataset(GeneratorDataset):
"""
A source dataset for reading and parsing Flowers102 dataset.
The generated dataset has two columns :py:obj:`[image, label]` or three :py:obj:`[image, segmentation, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`segmentation` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar or a tensor of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
task (str): Specify the 'Classification' or 'Segmentation' task (default='Classification').
usage (str): Specify the 'train', 'valid', 'test' part or 'all' parts of dataset
(default='all', will read all samples).
num_samples (int, optional): The number of samples to be included in the dataset (default=None, all images).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
decode (bool, optional): Whether or not to decode the images and segmentations after reading (default=False).
sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible
input is required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
Random accessible input is required. When this argument is specified, 'num_samples' reflects the max
sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only
when num_shards is also specified. Random accessible input is required.
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> flowers102_dataset_dir = "/path/to/flowers102_dataset_directory"
>>> dataset = ds.Flowers102Dataset(dataset_dir=flowers102_dataset_dir,
... task="Classification",
... usage="all",
... decode=True)
About Flowers102 dataset:
Flowers102 dataset consists of 102 flower categories.
The flowers commonly occur in the United Kingdom.
Each class consists of between 40 and 258 images.
Here is the original Flowers102 dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── flowes102_dataset_dir
├── imagelabels.mat
├── setid.mat
├── jpg
├── image_00001.jpg
├── image_00002.jpg
├── ...
├── segmim
├── segmim_00001.jpg
├── segmim_00002.jpg
├── ...
Citation:
.. code-block::
@InProceedings{Nilsback08,
author = "Maria-Elena Nilsback and Andrew Zisserman",
title = "Automated Flower Classification over a Large Number of Classes",
booktitle = "Indian Conference on Computer Vision, Graphics and Image Processing",
month = "Dec",
year = "2008",
}
"""
@check_flowers102dataset
def __init__(self, dataset_dir, task="Classification", usage="all", num_samples=None, num_parallel_workers=1,
shuffle=None, decode=False, sampler=None, num_shards=None, shard_id=None):
self.dataset_dir = os.path.realpath(dataset_dir)
self.task = replace_none(task, "Classification")
self.usage = replace_none(usage, "all")
self.decode = replace_none(decode, False)
dataset = _Flowers102Dataset(self.dataset_dir, self.task, self.usage, self.decode)
super().__init__(dataset, column_names=dataset.column_names, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
def get_class_indexing(self):
"""
Get the class index.
Returns:
dict, a str-to-int mapping from label name to index.
"""
class_names = [
"pink primrose", "hard-leaved pocket orchid", "canterbury bells",
"sweet pea", "english marigold", "tiger lily", "moon orchid",
"bird of paradise", "monkshood", "globe thistle", "snapdragon",
"colt's foot", "king protea", "spear thistle", "yellow iris",
"globe-flower", "purple coneflower", "peruvian lily", "balloon flower",
"giant white arum lily", "fire lily", "pincushion flower", "fritillary",
"red ginger", "grape hyacinth", "corn poppy", "prince of wales feathers",
"stemless gentian", "artichoke", "sweet william", "carnation",
"garden phlox", "love in the mist", "mexican aster", "alpine sea holly",
"ruby-lipped cattleya", "cape flower", "great masterwort", "siam tulip",
"lenten rose", "barbeton daisy", "daffodil", "sword lily", "poinsettia",
"bolero deep blue", "wallflower", "marigold", "buttercup", "oxeye daisy",
"common dandelion", "petunia", "wild pansy", "primula", "sunflower",
"pelargonium", "bishop of llandaff", "gaura", "geranium", "orange dahlia",
"pink-yellow dahlia?", "cautleya spicata", "japanese anemone",
"black-eyed susan", "silverbush", "californian poppy", "osteospermum",
"spring crocus", "bearded iris", "windflower", "tree poppy", "gazania",
"azalea", "water lily", "rose", "thorn apple", "morning glory",
"passion flower", "lotus", "toad lily", "anthurium", "frangipani",
"clematis", "hibiscus", "columbine", "desert-rose", "tree mallow",
"magnolia", "cyclamen", "watercress", "canna lily", "hippeastrum",
"bee balm", "ball moss", "foxglove", "bougainvillea", "camellia", "mallow",
"mexican petunia", "bromelia", "blanket flower", "trumpet creeper",
"blackberry lily"
]
class_dict = {}
for i, class_name in enumerate(class_names):
class_dict[class_name] = i
return class_dict
class LJSpeechDataset(MappableDataset):
"""
A source dataset for reading and parsing LJSpeech dataset.
The generated dataset has four columns :py:obj:`[waveform, sample_rate, transcription, normalized_transcript]`.
The tensor of column :py:obj:`waveform` is a tensor of the float32 type.
The tensor of column :py:obj:`sample_rate` is a scalar of the int32 type.
The tensor of column :py:obj:`transcription` is a scalar of the string type.
The tensor of column :py:obj:`normalized_transcript` is a scalar of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of audios to be included in the dataset
(default=None, all audios).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> lj_speech_dataset_dir = "/path/to/lj_speech_dataset_directory"
>>>
>>> # 1) Get all samples from LJSPEECH dataset in sequence
>>> dataset = ds.LJSpeechDataset(dataset_dir=lj_speech_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from LJSPEECH dataset
>>> dataset = ds.LJSpeechDataset(dataset_dir=lj_speech_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from LJSPEECH dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.LJSpeechDataset(dataset_dir=lj_speech_dataset_dir, num_shards=2, shard_id=0)
>>>
>>> # In LJSPEECH dataset, each dictionary has keys "waveform", "sample_rate", "transcription"
>>> # and "normalized_transcript"
About LJSPEECH dataset:
This is a public domain speech dataset consisting of 13,100 short audio clips of a single speaker
reading passages from 7 non-fiction books. A transcription is provided for each clip.
Clips vary in length from 1 to 10 seconds and have a total length of approximately 24 hours.
The texts were published between 1884 and 1964, and are in the public domain.
The audio was recorded in 2016-17 by the LibriVox project and is also in the public domain.
Here is the original LJSPEECH dataset structure.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── LJSpeech-1.1
├── README
├── metadata.csv
└── wavs
├── LJ001-0001.wav
├── LJ001-0002.wav
├── LJ001-0003.wav
├── LJ001-0004.wav
├── LJ001-0005.wav
├── LJ001-0006.wav
├── LJ001-0007.wav
├── LJ001-0008.wav
...
├── LJ050-0277.wav
└── LJ050-0278.wav
Citation:
.. code-block::
@misc{lj_speech17,
author = {Keith Ito and Linda Johnson},
title = {The LJ Speech Dataset},
howpublished = {url{https://keithito.com/LJ-Speech-Dataset}},
year = 2017
}
"""
@check_lj_speech_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
def parse(self, children=None):
return cde.LJSpeechNode(self.dataset_dir, self.sampler)
class TextFileDataset(SourceDataset):
"""
A source dataset that reads and parses datasets stored on disk in text format.
The generated dataset has one column :py:obj:`[text]` with type string.
Args:
dataset_files (Union[str, list[str]]): String or list of files to be read or glob strings to search for a
pattern of files. The list will be sorted in a lexicographical order.
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_files are not valid or do not exist.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
Examples:
>>> text_file_dataset_dir = ["/path/to/text_file_dataset_file"] # contains 1 or multiple text files
>>> dataset = ds.TextFileDataset(dataset_files=text_file_dataset_dir)
"""
@check_textfiledataset
def __init__(self, dataset_files, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_files = self._find_files(dataset_files)
self.dataset_files.sort()
def parse(self, children=None):
return cde.TextFileNode(self.dataset_files, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class _NumpySlicesDataset:
"""
Mainly for dealing with several kinds of formats of Python data, and return one row each time.
"""
def __init__(self, data, column_list=None):
self.column_list = None
# Convert dict data into tuple
if isinstance(data, dict):
data = self.process_dict(data)
if isinstance(data, tuple):
self.data = ()
data_len = len(data)
for i in range(data_len):
self.data = self.data + (np.array(data[i]),)
else:
self.data = (np.array(data),)
# check whether the data length in each column is equal
data_len = [len(data_item) for data_item in self.data]
if data_len[1:] != data_len[:-1]:
raise ValueError("Data length in each column is not equal.")
# Init column_name
if column_list is not None:
self.column_list = column_list
elif self.column_list is None:
self.column_list = []
column_num = len(self.data)
for i in range(column_num):
self.column_list.append("column_" + str(i))
def __getitem__(self, index):
data_row = [d[index, ...] for d in self.data]
data_res = tuple(data_row)
return data_res
def __len__(self):
return len(self.data[0])
def process_dict(self, input_data):
"""
Convert the dict like data into tuple format, when input is a tuple of dicts then compose it into a dict first.
"""
# Convert pandas like dict(has "values" column) into General dict
data_keys = list(input_data.keys())
data_col = input_data[data_keys[0]]
if hasattr(data_col, "values"):
new_dict = {}
for key in data_keys:
item1 = input_data.pop(key)
new_dict[key] = item1.values
input_data = new_dict
# Convert the data in dict into tuple
data = ()
keys = list(input_data.keys())
self.column_list = keys
for key in keys:
value = input_data[key]
data = data + (list(value),)
return data
class NumpySlicesDataset(GeneratorDataset):
"""
Creates a dataset with given data slices, mainly for loading Python data into dataset.
The column names and column types of generated dataset depend on Python data defined by users.
Args:
data (Union[list, tuple, dict]) Input of given data. Supported data types include: list, tuple, dict and other
NumPy formats. Input data will be sliced along the first dimension and generate additional rows, if input is
list, there will be one column in each row, otherwise there tends to be multi columns. Large data is not
recommended to be loaded in this way as data is loading into memory.
column_names (list[str], optional): List of column names of the dataset (default=None). If column_names is not
provided, the output column names will be named as the keys of dict when the input data is a dict,
otherwise they will be named like column_0, column_1 ...
num_samples (int, optional): The number of samples to be included in the dataset (default=None, all samples).
num_parallel_workers (int, optional): Number of subprocesses used to fetch the dataset in parallel (default=1).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required.
(default=None, expected order behavior shown in the table).
sampler (Union[Sampler, Iterable], optional): Object used to choose samples from the dataset. Random accessible
input is required (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
Random accessible input is required. When this argument is specified, `num_samples` reflects the max
sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This argument must be specified only
when num_shards is also specified. Random accessible input is required.
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Raises:
RuntimeError: If len of column_names does not match output len of data.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> # 1) Input data can be a list
>>> data = [1, 2, 3]
>>> dataset = ds.NumpySlicesDataset(data=data, column_names=["column_1"])
>>>
>>> # 2) Input data can be a dictionary, and column_names will be its keys
>>> data = {"a": [1, 2], "b": [3, 4]}
>>> dataset = ds.NumpySlicesDataset(data=data)
>>>
>>> # 3) Input data can be a tuple of lists (or NumPy arrays), each tuple element refers to data in each column
>>> data = ([1, 2], [3, 4], [5, 6])
>>> dataset = ds.NumpySlicesDataset(data=data, column_names=["column_1", "column_2", "column_3"])
>>>
>>> # 4) Load data from CSV file
>>> import pandas as pd
>>> df = pd.read_csv(filepath_or_buffer=csv_dataset_dir[0])
>>> dataset = ds.NumpySlicesDataset(data=dict(df), shuffle=False)
"""
@check_numpyslicesdataset
def __init__(self, data, column_names=None, num_samples=None, num_parallel_workers=1, shuffle=None, sampler=None,
num_shards=None, shard_id=None):
dataset = _NumpySlicesDataset(data, column_names)
super().__init__(dataset, column_names=dataset.column_list, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
class _PaddedDataset:
"""
Mainly for combining false samples provided by users into a dataset.
Args:
padded_samples (list(dict)): Data provided by user to be added to the initial Dataset.
"""
def __init__(self, padded_samples):
self.column_names = list(padded_samples[0].keys())
self.padded_samples = padded_samples
def __getitem__(self, item):
return (self.padded_samples[item][key] for key in self.column_names)
def __len__(self):
return len(self.padded_samples)
class PaddedDataset(GeneratorDataset):
"""
Creates a dataset with filler data provided by user. Mainly used to add to the original data set
and assign it to the corresponding shard.
Args:
padded_samples (list(dict)): Samples provided by user.
Raises:
TypeError: If padded_samples is not an instance of list.
TypeError: If the element of padded_samples is not an instance of dict.
ValueError: If the padded_samples is empty.
Examples:
>>> import numpy as np
>>> data = [{'image': np.zeros(1, np.uint8)}, {'image': np.zeros(2, np.uint8)}]
>>> dataset = ds.PaddedDataset(padded_samples=data)
"""
@check_paddeddataset
def __init__(self, padded_samples):
dataset = _PaddedDataset(padded_samples)
super().__init__(dataset, column_names=dataset.column_names, num_shards=None, shard_id=None, shuffle=False)
self._dataset_size = len(dataset.padded_samples)
self.padded_samples = padded_samples
class EMnistDataset(MappableDataset):
"""
A source dataset for reading and parsing the EMNIST dataset.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
name (str): Name of splits for this dataset, can be "byclass", "bymerge", "balanced", "letters", "digits"
or "mnist".
usage (str, optional): Usage of this dataset, can be "train", "test" or "all".
(default=None, will read all samples).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> emnist_dataset_dir = "/path/to/emnist_dataset_directory"
>>>
>>> # Read 3 samples from EMNIST dataset
>>> dataset = ds.EMnistDataset(dataset_dir=emnist_dataset_dir, name="mnist", num_samples=3)
>>>
>>> # Note: In emnist_dataset dataset, each dictionary has keys "image" and "label"
About EMNIST dataset:
The EMNIST dataset is a set of handwritten character digits derived from the NIST Special
Database 19 and converted to a 28x28 pixel image format and dataset structure that directly
matches the MNIST dataset. Further information on the dataset contents and conversion process
can be found in the paper available at https://arxiv.org/abs/1702.05373v1.
The numbers of characters and classes of each split of EMNIST are as follows:
By Class: 814,255 characters and 62 unbalanced classes.
By Merge: 814,255 characters and 47 unbalanced classes.
Balanced: 131,600 characters and 47 balanced classes.
Letters: 145,600 characters and 26 balanced classes.
Digits: 280,000 characters and 10 balanced classes.
MNIST: 70,000 characters and 10 balanced classes.
Here is the original EMNIST dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── mnist_dataset_dir
├── emnist-mnist-train-images-idx3-ubyte
├── emnist-mnist-train-labels-idx1-ubyte
├── emnist-mnist-test-images-idx3-ubyte
├── emnist-mnist-test-labels-idx1-ubyte
├── ...
Citation:
.. code-block::
@article{cohen_afshar_tapson_schaik_2017,
title = {EMNIST: Extending MNIST to handwritten letters},
DOI = {10.1109/ijcnn.2017.7966217},
journal = {2017 International Joint Conference on Neural Networks (IJCNN)},
author = {Cohen, Gregory and Afshar, Saeed and Tapson, Jonathan and Schaik, Andre Van},
year = {2017},
howpublished = {https://www.westernsydney.edu.au/icns/reproducible_research/
publication_support_materials/emnist}
}
"""
@check_emnist_dataset
def __init__(self, dataset_dir, name, usage=None, num_samples=None, num_parallel_workers=None,
shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.name = name
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.EMnistNode(self.dataset_dir, self.name, self.usage, self.sampler)
class FakeImageDataset(MappableDataset):
"""
A source dataset for generating fake images.
The generated dataset has two columns :py:obj:`[image, label]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`label` is a scalar of the uint32 type.
Args:
num_images (int, optional): Number of images to generate in the dataset (default=1000).
image_size (tuple, optional): Size of the fake image (default=(224, 224, 3)).
num_classes (int, optional): Number of classes in the dataset (default=10).
base_seed (int, optional): Offsets the index-based random seed used to generate each image (default=0).
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the max sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This
argument can only be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. 'sampler' and 'shuffle' are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle'
:widths: 25 25 50
:header-rows: 1
* - Parameter 'sampler'
- Parameter 'shuffle'
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> # Read 3 samples from FakeImage dataset
>>> dataset = ds.FakeImageDataset(num_images=1000, image_size=(224,224,3),
... num_classes=10, base_seed=0, num_samples=3)
>>>
>>> # Note: In FakeImage dataset, each dictionary has keys "image" and "label"
"""
@check_fake_image_dataset
def __init__(self, num_images=1000, image_size=(224, 224, 3), num_classes=10, base_seed=0, num_samples=None,
num_parallel_workers=None, shuffle=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.num_images = num_images
self.image_size = image_size
self.num_classes = num_classes
self.base_seed = base_seed
def parse(self, children=None):
return cde.FakeImageNode(self.num_images, self.image_size, self.num_classes, self.base_seed, self.sampler)
class FlickrDataset(MappableDataset):
"""
A source dataset for reading and parsing Flickr8k and Flickr30k dataset.
The generated dataset has two columns :py:obj:`[image, annotation]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`annotation` is a tensor which contains 5 annotations string,
such as ["a", "b", "c", "d", "e"].
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
annotation_file (str): Path to the root directory that contains the annotation.
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is not valid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If annotation_file is not exist.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> flickr_dataset_dir = "/path/to/flickr_dataset_directory"
>>> annotation_file = "/path/to/flickr_annotation_file"
>>>
>>> # 1) Get all samples from FLICKR dataset in sequence
>>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,
... annotation_file=annotation_file,
... shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from FLICKR dataset
>>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,
... annotation_file=annotation_file,
... num_samples=350,
... shuffle=True)
>>>
>>> # 3) Get samples from FLICKR dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.FlickrDataset(dataset_dir=flickr_dataset_dir,
... annotation_file=annotation_file,
... num_shards=2,
... shard_id=0)
>>>
>>> # In FLICKR dataset, each dictionary has keys "image" and "annotation"
About Flickr8k dataset:
The Flickr8k dataset consists of 8092 colour images. There are 40460 annotations in the Flickr8k.token.txt,
each image has 5 annotations.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
.. code-block::
.
└── Flickr8k
├── Flickr8k_Dataset
│ ├── 1000268201_693b08cb0e.jpg
│ ├── 1001773457_577c3a7d70.jpg
│ ├── ...
└── Flickr8k.token.txt
Citation:
.. code-block::
@article{DBLP:journals/jair/HodoshYH13,
author = {Micah Hodosh and Peter Young and Julia Hockenmaier},
title = {Framing Image Description as a Ranking Task: Data, Models and Evaluation Metrics},
journal = {J. Artif. Intell. Res.},
volume = {47},
pages = {853--899},
year = {2013},
url = {https://doi.org/10.1613/jair.3994},
doi = {10.1613/jair.3994},
timestamp = {Mon, 21 Jan 2019 15:01:17 +0100},
biburl = {https://dblp.org/rec/journals/jair/HodoshYH13.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
About Flickr30k dataset:
The Flickr30k dataset consists of 31783 colour images. There are 158915 annotations in
the results_20130124.token, each image has 5 annotations.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
Citation:
.. code-block::
.
└── Flickr30k
├── flickr30k-images
│ ├── 1000092795.jpg
│ ├── 10002456.jpg
│ ├── ...
└── results_20130124.token
.. code-block::
@article{DBLP:journals/tacl/YoungLHH14,
author = {Peter Young and Alice Lai and Micah Hodosh and Julia Hockenmaier},
title = {From image descriptions to visual denotations: New similarity metrics
for semantic inference over event descriptions},
journal = {Trans. Assoc. Comput. Linguistics},
volume = {2},
pages = {67--78},
year = {2014},
url = {https://tacl2013.cs.columbia.edu/ojs/index.php/tacl/article/view/229},
timestamp = {Wed, 17 Feb 2021 21:55:25 +0100},
biburl = {https://dblp.org/rec/journals/tacl/YoungLHH14.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
@check_flickr_dataset
def __init__(self, dataset_dir, annotation_file, num_samples=None, num_parallel_workers=None, shuffle=None,
decode=None, sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.annotation_file = annotation_file
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.FlickrNode(self.dataset_dir, self.annotation_file, self.decode, self.sampler)
class SBDataset(GeneratorDataset):
"""
A source dataset for reading and parsing Semantic Boundaries Dataset.
The generated dataset has two columns: :py:obj:`[image, task]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`task` contains 20 images of the uint8 type if `task` is `Boundaries` otherwise
contains 1 image of the uint8 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
task (str, optional): Acceptable tasks include `Boundaries` or `Segmentation` (default= `Boundaries`).
usage (str, optional): Acceptable usages include `train`, `val`, `train_noval` and `all` (default= `all`).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
Raises:
RuntimeError: If dataset_dir is not valid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If task is not in [`Boundaries`, `Segmentation`].
ValueError: If usage is not in [`train`, `val`, `train_noval`, `all`].
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a sampler. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> sb_dataset_dir = "/path/to/sb_dataset_directory"
>>>
>>> # 1) Get all samples from Semantic Boundaries Dataset in sequence
>>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from Semantic Boundaries Dataset
>>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from Semantic Boundaries Dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.SBDataset(dataset_dir=sb_dataset_dir, num_shards=2, shard_id=0)
>>>
>>> # In Semantic Boundaries Dataset, each dictionary has keys "image" and "task"
About Semantic Boundaries Dataset:
The Semantic Boundaries Dataset consists of 11355 colour images. There are 8498 images' name in the train.txt,
2857 images' name in the val.txt and 5623 images' name in the train_noval.txt. The category cls/
contains the Segmentation and Boundaries results of category-level, the category inst/ catains the
Segmentation and Boundaries results of instance-level.
You can unzip the dataset files into the following structure and read by MindSpore's API:
.. code-block::
.
└── benchmark_RELEASE
├── dataset
├── img
│ ├── 2008_000002.jpg
│ ├── 2008_000003.jpg
│ ├── ...
├── cls
│ ├── 2008_000002.mat
│ ├── 2008_000003.mat
│ ├── ...
├── inst
│ ├── 2008_000002.mat
│ ├── 2008_000003.mat
│ ├── ...
├── train.txt
└── val.txt
.. code-block::
@InProceedings{BharathICCV2011,
author = "Bharath Hariharan and Pablo Arbelaez and Lubomir Bourdev and
Subhransu Maji and Jitendra Malik",
title = "Semantic Contours from Inverse Detectors",
booktitle = "International Conference on Computer Vision (ICCV)",
year = "2011",
"""
@check_sb_dataset
def __init__(self, dataset_dir, task='Boundaries', usage='all', num_samples=None, num_parallel_workers=1,
shuffle=None, decode=None, sampler=None, num_shards=None, shard_id=None):
dataset = _SBDataset(dataset_dir, task, usage, decode)
super().__init__(dataset, column_names=dataset.column_list, num_samples=num_samples,
num_parallel_workers=num_parallel_workers, shuffle=shuffle, sampler=sampler,
num_shards=num_shards, shard_id=shard_id)
class _SBDataset:
"""
Dealing with the data file with .mat extension, and return one row in tuple (image, task) each time.
"""
def __init__(self, dataset_dir, task, usage, decode):
self.column_list = ['image', 'task']
self.task = task
self.images_path = os.path.join(dataset_dir, 'img')
self.cls_path = os.path.join(dataset_dir, 'cls')
self._loadmat = loadmat
self.categories = 20
self.decode = replace_none(decode, False)
if usage == "all":
image_names = []
for item in ["train", "val"]:
usage_path = os.path.join(dataset_dir, item + '.txt')
if not os.path.exists(usage_path):
raise FileNotFoundError("SBDataset: {0} not found".format(usage_path))
with open(usage_path, 'r') as f:
image_names += [x.strip() for x in f.readlines()]
else:
usage_path = os.path.join(dataset_dir, usage + '.txt')
if not os.path.exists(usage_path):
raise FileNotFoundError("SBDataset: {0} not found".format(usage_path))
with open(usage_path, 'r') as f:
image_names = [x.strip() for x in f.readlines()]
self.images = [os.path.join(self.images_path, i + ".jpg") for i in image_names]
self.clss = [os.path.join(self.cls_path, i + ".mat") for i in image_names]
if len(self.images) != len(self.clss):
raise ValueError("SBDataset: images count not equal to cls count")
self._get_data = self._get_boundaries_data if self.task == "Boundaries" else self._get_segmentation_data
self._get_item = self._get_decode_item if self.decode else self._get_undecode_item
def _get_boundaries_data(self, mat_path):
mat_data = self._loadmat(mat_path)
return np.concatenate([np.expand_dims(mat_data['GTcls'][0][self.task][0][i][0].toarray(), axis=0)
for i in range(self.categories)], axis=0)
def _get_segmentation_data(self, mat_path):
mat_data = self._loadmat(mat_path)
return Image.fromarray(mat_data['GTcls'][0][self.task][0])
def _get_decode_item(self, idx):
return Image.open(self.images[idx]).convert('RGB'), self._get_data(self.clss[idx])
def _get_undecode_item(self, idx):
return np.fromfile(self.images[idx], dtype=np.uint8), self._get_data(self.clss[idx])
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
return self._get_item(idx)
class DeserializedDataset(Dataset):
def __init__(self, input_obj):
super().__init__()
self.input_obj = input_obj
def parse(self, children=None):
if isinstance(self.input_obj, dict):
json_str = json.dumps(self.input_obj)
return cde.Dataset.from_json_string(json_str)
return cde.Dataset.from_json_file(self.input_obj)
class CityscapesDataset(MappableDataset):
"""
A source dataset for reading and parsing Cityscapes dataset.
The generated dataset has two columns :py:obj:`[image, task]`.
The tensor of column :py:obj:`image` is of the uint8 type.
The tensor of column :py:obj:`task` is of the uint8 type if task is not 'polygon' otherwise task is
a string tensor with serialize json.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str): Acceptable usages include `train`, `test`, `val` or `all` if quality_mode is `fine`
otherwise `train`, `train_extra`, `val` or `all` (default= `train`).
quality_mode (str): Acceptable quality_modes include `fine` or `coarse` (default= `fine`).
task (str): Acceptable tasks include `instance`, `semantic`, `polygon` or `color` (default= `instance`).
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is invalid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If task is invalid.
ValueError: If quality_mode is invalid.
ValueError: If usage is invalid.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> cityscapes_dataset_dir = "/path/to/cityscapes_dataset_directory"
>>>
>>> # 1) Get all samples from Cityscapes dataset in sequence
>>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, task="instance", quality_mode="fine",
... usage="train", shuffle=False, num_parallel_workers=1)
>>>
>>> # 2) Randomly select 350 samples from Cityscapes dataset
>>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, num_samples=350, shuffle=True,
... num_parallel_workers=1)
>>>
>>> # 3) Get samples from Cityscapes dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.CityscapesDataset(dataset_dir=cityscapes_dataset_dir, num_shards=2, shard_id=0,
... num_parallel_workers=1)
>>>
>>> # In Cityscapes dataset, each dictionary has keys "image" and "task"
About Cityscapes dataset:
The Cityscapes dataset consists of 5000 colour images with high quality dense pixel annotations and
19998 colour images with coarser polygonal annotations in 50 cities. There are 30 classes in this
dataset and the polygonal annotations include dense semantic segmentation and instance segmentation
for vehicle and people.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
Taking the quality_mode of `fine` as an example.
.. code-block::
.
└── Cityscapes
├── leftImg8bit
| ├── train
| | ├── aachen
| | | ├── aachen_000000_000019_leftImg8bit.png
| | | ├── aachen_000001_000019_leftImg8bit.png
| | | ├── ...
| | ├── bochum
| | | ├── ...
| | ├── ...
| ├── test
| | ├── ...
| ├── val
| | ├── ...
└── gtFine
├── train
| ├── aachen
| | ├── aachen_000000_000019_gtFine_color.png
| | ├── aachen_000000_000019_gtFine_instanceIds.png
| | ├── aachen_000000_000019_gtFine_labelIds.png
| | ├── aachen_000000_000019_gtFine_polygons.json
| | ├── aachen_000001_000019_gtFine_color.png
| | ├── aachen_000001_000019_gtFine_instanceIds.png
| | ├── aachen_000001_000019_gtFine_labelIds.png
| | ├── aachen_000001_000019_gtFine_polygons.json
| | ├── ...
| ├── bochum
| | ├── ...
| ├── ...
├── test
| ├── ...
└── val
├── ...
Citation:
.. code-block::
@inproceedings{Cordts2016Cityscapes,
title = {The Cityscapes Dataset for Semantic Urban Scene Understanding},
author = {Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler,
Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
booktitle = {Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year = {2016}
}
"""
@check_cityscapes_dataset
def __init__(self, dataset_dir, usage="train", quality_mode="fine", task="instance", num_samples=None,
num_parallel_workers=None, shuffle=None, decode=None, sampler=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.task = task
self.quality_mode = quality_mode
self.usage = usage
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.CityscapesNode(self.dataset_dir, self.usage, self.quality_mode, self.task, self.decode, self.sampler)
class DBpediaDataset(SourceDataset):
"""
A source dataset that reads and parses the DBpedia dataset.
The generated dataset has three columns :py:obj:`[class, title, content]`.
The tensor of column :py:obj:`class` is of the string type.
The tensor of column :py:obj:`title` is of the string type.
The tensor of column :py:obj:`content` is of the string type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str, optional): Usage of this dataset, can be `train`, `test` or `all`.
`train` will read from 560,000 train samples,
`test` will read from 70,000 test samples,
`all` will read from all 630,000 samples (default=None, all samples).
num_samples (int, optional): The number of samples to be included in the dataset
(default=None, will include all text).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (Union[bool, Shuffle level], optional): Perform reshuffling of the data every epoch
(default=Shuffle.GLOBAL).
If shuffle is False, no shuffling will be performed;
If shuffle is True, the behavior is the same as setting shuffle to be Shuffle.GLOBAL;
Otherwise, there are two levels of shuffling:
- Shuffle.GLOBAL: Shuffle both the files and samples.
- Shuffle.FILES: Shuffle files only.
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Examples:
>>> dbpedia_dataset_dir = "/path/to/dbpedia_dataset_directory"
>>>
>>> # 1) Read 3 samples from DBpedia dataset
>>> dataset = ds.DBpediaDataset(dataset_dir=dbpedia_dataset_dir, num_samples=3)
>>>
>>> # 2) Read train samples from DBpedia dataset
>>> dataset = ds.DBpediaDataset(dataset_dir=dbpedia_dataset_dir, usage="train")
About DBpedia dataset:
The DBpedia dataset consists of 630,000 text samples in 14 classes, there are 560,000 samples in the train.csv
and 70,000 samples in the test.csv.
The 14 different classes represent Company, EducationaInstitution, Artist, Athlete, OfficeHolder,
MeanOfTransportation, Building, NaturalPlace, Village, Animal, Plant, Album, Film, WrittenWork.
Here is the original DBpedia dataset structure.
You can unzip the dataset files into this directory structure and read by Mindspore's API.
.. code-block::
.
└── dbpedia_dataset_dir
├── train.csv
├── test.csv
├── classes.txt
└── readme.txt
.. code-block::
@article{DBpedia,
title = {DBPedia Ontology Classification Dataset},
author = {Jens Lehmann, Robert Isele, Max Jakob, Anja Jentzsch, Dimitris Kontokostas,
Pablo N. Mendes, Sebastian Hellmann, Mohamed Morsey, Patrick van Kleef,
Sören Auer, Christian Bizer},
year = {2015},
howpublished = {http://dbpedia.org}
}
"""
@check_dbpedia_dataset
def __init__(self, dataset_dir, usage=None, num_samples=None, num_parallel_workers=None, shuffle=Shuffle.GLOBAL,
num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, num_samples=num_samples, shuffle=shuffle,
num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = replace_none(usage, "all")
def parse(self, children=None):
return cde.DBpediaNode(self.dataset_dir, self.usage, self.num_samples, self.shuffle_flag, self.num_shards,
self.shard_id)
class DIV2KDataset(MappableDataset):
"""
A source dataset for reading and parsing DIV2KDataset dataset.
The generated dataset has two columns :py:obj:`[hr_image, lr_image]`.
The tensor of column :py:obj:`hr_image` is of the uint8 type.
The tensor of column :py:obj:`lr_image` is of the uint8 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
usage (str): Acceptable usages include `train`, `valid` or `all` (default= `train`).
downgrade (str): Acceptable downgrades include `bicubic`, `unknown`, `mild`, `difficult` or
`wild` (default= `bicubic`).
scale (int): Acceptable scales include 2, 3, 4 or 8 (default=2).
When `downgrade` is `bicubic`, scale can be 2, 3, 4, 8.
When `downgrade` is `unknown`, scale can only be 2, 3, 4.
When `downgrade` is `mild`, `difficult` or `wild`, scale can only be 4.
num_samples (int, optional): The number of images to be included in the dataset.
(default=None, all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, number set in the config).
shuffle (bool, optional): Whether to perform shuffle on the dataset (default=None, expected
order behavior shown in the table).
decode (bool, optional): Decode the images after reading (default=False).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided
into (default=None). When this argument is specified, `num_samples` reflects
the max sample number of per shard.
shard_id (int, optional): The shard ID within num_shards (default=None). This
argument can only be specified when num_shards is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir is invalid or does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If dataset_dir is not exist.
ValueError: If usage is invalid.
ValueError: If downgrade is invalid.
ValueError: If scale is invalid.
ValueError: If scale equal to 8 and downgrade not equal to `bicubic`.
ValueError: If downgrade in [`mild`, `difficult`, `wild`] and scale not equal to 4.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> div2k_dataset_dir = "/path/to/div2k_dataset_directory"
>>>
>>> # 1) Get all samples from DIV2K dataset in sequence
>>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage="train", scale=2, downgrade="bicubic",
... shuffle=False)
>>>
>>> # 2) Randomly select 350 samples from DIV2K dataset
>>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage="train", scale=2, downgrade="bicubic",
... num_samples=350, shuffle=True)
>>>
>>> # 3) Get samples from DIV2K dataset for shard 0 in a 2-way distributed training
>>> dataset = ds.DIV2KDataset(dataset_dir=div2k_dataset_dir, usage="train", scale=2, downgrade="bicubic",
... num_shards=2, shard_id=0)
>>>
>>> # In DIV2K dataset, each dictionary has keys "hr_image" and "lr_image"
About DIV2K dataset:
The DIV2K dataset consists of 1000 2K resolution images, among which 800 images are for training, 100 images
are for validation and 100 images are for testing. NTIRE 2017 and NTIRE 2018 include only training dataset
and validation dataset.
You can unzip the dataset files into the following directory structure and read by MindSpore's API.
Take the training set as an example.
.. code-block::
.
└── DIV2K
├── DIV2K_train_HR
| ├── 0001.png
| ├── 0002.png
| ├── ...
├── DIV2K_train_LR_bicubic
| ├── X2
| | ├── 0001x2.png
| | ├── 0002x2.png
| | ├── ...
| ├── X3
| | ├── 0001x3.png
| | ├── 0002x3.png
| | ├── ...
| └── X4
| ├── 0001x4.png
| ├── 0002x4.png
| ├── ...
├── DIV2K_train_LR_unknown
| ├── X2
| | ├── 0001x2.png
| | ├── 0002x2.png
| | ├── ...
| ├── X3
| | ├── 0001x3.png
| | ├── 0002x3.png
| | ├── ...
| └── X4
| ├── 0001x4.png
| ├── 0002x4.png
| ├── ...
├── DIV2K_train_LR_mild
| ├── 0001x4m.png
| ├── 0002x4m.png
| ├── ...
├── DIV2K_train_LR_difficult
| ├── 0001x4d.png
| ├── 0002x4d.png
| ├── ...
├── DIV2K_train_LR_wild
| ├── 0001x4w.png
| ├── 0002x4w.png
| ├── ...
└── DIV2K_train_LR_x8
├── 0001x8.png
├── 0002x8.png
├── ...
Citation:
.. code-block::
@InProceedings{Agustsson_2017_CVPR_Workshops,
author = {Agustsson, Eirikur and Timofte, Radu},
title = {NTIRE 2017 Challenge on Single Image Super-Resolution: Dataset and Study},
booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
url = "http://www.vision.ee.ethz.ch/~timofter/publications/Agustsson-CVPRW-2017.pdf",
month = {July},
year = {2017}
}
"""
@check_div2k_dataset
def __init__(self, dataset_dir, usage="train", downgrade="bicubic", scale=2, num_samples=None,
num_parallel_workers=None, shuffle=None, decode=None, sampler=None, num_shards=None,
shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
self.usage = usage
self.scale = scale
self.downgrade = downgrade
self.decode = replace_none(decode, False)
def parse(self, children=None):
return cde.DIV2KNode(self.dataset_dir, self.usage, self.downgrade, self.scale, self.decode, self.sampler)
class YesNoDataset(MappableDataset):
"""
A source dataset for reading and parsing the YesNo dataset.
The generated dataset has three columns :py:obj:`[waveform, sample_rate, labels]`.
The tensor of column :py:obj:`waveform` is a vector of the float32 type.
The tensor of column :py:obj:`sample_rate` is a scalar of the int32 type.
The tensor of column :py:obj:`labels` is a scalar of the int32 type.
Args:
dataset_dir (str): Path to the root directory that contains the dataset.
num_samples (int, optional): The number of images to be included in the dataset
(default=None, will read all images).
num_parallel_workers (int, optional): Number of workers to read the data
(default=None, will use value set in the config).
shuffle (bool, optional): Whether or not to perform shuffle on the dataset
(default=None, expected order behavior shown in the table).
sampler (Sampler, optional): Object used to choose samples from the
dataset (default=None, expected order behavior shown in the table).
num_shards (int, optional): Number of shards that the dataset will be divided into (default=None).
When this argument is specified, `num_samples` reflects the maximum sample number of per shard.
shard_id (int, optional): The shard ID within `num_shards` (default=None). This argument can only
be specified when `num_shards` is also specified.
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing
(default=None, which means no cache is used).
Raises:
RuntimeError: If dataset_dir does not contain data files.
RuntimeError: If num_parallel_workers exceeds the max thread numbers.
RuntimeError: If sampler and shuffle are specified at the same time.
RuntimeError: If sampler and sharding are specified at the same time.
RuntimeError: If num_shards is specified but shard_id is None.
RuntimeError: If shard_id is specified but num_shards is None.
ValueError: If shard_id is invalid (< 0 or >= num_shards).
Note:
- This dataset can take in a `sampler`. `sampler` and `shuffle` are mutually exclusive.
The table below shows what input arguments are allowed and their expected behavior.
.. list-table:: Expected Order Behavior of Using `sampler` and `shuffle`
:widths: 25 25 50
:header-rows: 1
* - Parameter `sampler`
- Parameter `shuffle`
- Expected Order Behavior
* - None
- None
- random order
* - None
- True
- random order
* - None
- False
- sequential order
* - Sampler object
- None
- order defined by sampler
* - Sampler object
- True
- not allowed
* - Sampler object
- False
- not allowed
Examples:
>>> yes_no_dataset_dir = "/path/to/yes_no_dataset_directory"
>>>
>>> # Read 3 samples from YesNo dataset
>>> dataset = ds.YesNoDataset(dataset_dir=yes_no_dataset_dir, num_samples=3)
>>>
>>> # Note: In YesNo dataset, each dictionary has keys "waveform", "sample_rate", "label"
About YesNo dataset:
Yesno is an audio dataset consisting of 60 recordings of one individual saying yes or no in Hebrew; each
recording is eight words long. It was created for the Kaldi audio project by an author who wishes to
remain anonymous.
Here is the original YesNo dataset structure.
You can unzip the dataset files into this directory structure and read by MindSpore's API.
.. code-block::
.
└── yes_no_dataset_dir
├── 1_1_0_0_1_1_0_0.wav
├── 1_0_0_0_1_1_0_0.wav
├── 1_1_0_0_1_1_0_0.wav
└──....
Citation:
.. code-block::
@NetworkResource{Kaldi_audio_project,
author = {anonymous},
url = "http://wwww.openslr.org/1/"
}
"""
@check_yes_no_dataset
def __init__(self, dataset_dir, num_samples=None, num_parallel_workers=None, shuffle=None,
sampler=None, num_shards=None, shard_id=None, cache=None):
super().__init__(num_parallel_workers=num_parallel_workers, sampler=sampler, num_samples=num_samples,
shuffle=shuffle, num_shards=num_shards, shard_id=shard_id, cache=cache)
self.dataset_dir = dataset_dir
def parse(self, children=None):
return cde.YesNoNode(self.dataset_dir, self.sampler)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6452
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
sporf.py
|
#!/usr/bin/env python3
# Copyright (c) 2019, Richard Hughes All rights reserved.
# Released under the BSD license. Please see LICENSE.md for more information.
import sys
import os
import argparse
import glob
import threading
import subprocess
import urllib.parse
from queue import Queue
from datetime import datetime
# Define command line arguments
parms=argparse.ArgumentParser()
parms.add_argument("-c", "--cmd", type=str, required=True, help="Command to execute")
parms.add_argument("-f", "--file", type=str, required=True, help="Specify input file")
parms.add_argument("-n", "--num_processes", type=int, required=False, default="32", help="Number of concurrent processes")
parms.add_argument("-p", "--path", type=str, required=False, default=".", help="Specify location of input file")
parms.add_argument("-t", "--test_only", required=False, action="store_true", help="Do not execute commands")
args = vars(parms.parse_args())
# Globals
cmdqueue=Queue()
# Main processing
def main(args):
# Get current date and time
timestamp = datetime.now()
# Open file of parameters to populate command template
lines = open(args['path'] + "/" + args['file'], "r")
# Split input into parameters to build commend
for line in lines:
line=line.rstrip()
parmlist=line.split("|")
# Build command to execute
cmdline=args['cmd']
for idx in range(len(parmlist)):
cmdline=cmdline.replace("{NOW}", timestamp.strftime('%Y%m%d%H%M%S'))
cmdline=cmdline.replace("{"+str(idx)+"}", parmlist[idx])
cmdline=cmdline.replace("{URL:"+str(idx)+"}", urllib.parse.quote(parmlist[idx]).replace("/","%2F"))
# Append command to list
cmdqueue.put(cmdline)
# Process
process(args['num_processes'])
# Process command queue
def process(num_processes):
# Create new threads
threadlist=[]
for threadnum in range(num_processes):
threadlist.append(threading.Thread(target=thread_function))
threadlist[threadnum].start()
# Join threads
for threadnum in range(num_processes):
threadlist[threadnum].join()
# Thread function
def thread_function():
while not cmdqueue.empty():
if not cmdqueue.empty():
c=cmdqueue.get_nowait()
if args['test_only']:
print(c)
else:
# Use subprocess.call if Python version < 3.5
rc=0
if sys.version_info[0] < 5:
rc=subprocess.call(c,shell=True)
else:
p=subprocess.run(c,shell=True)
rc=p.returncode
if not rc == 0:
print("Return Code: " + str(rc) + " - " + c, file=sys.stderr)
if __name__ == '__main__':
# Execute main method
main(args)
|
test_dialect_detection.py
|
# -*- coding: utf-8 -*-
"""
Integration tests for dialect detection.
Author: G.J.J. van den Burg
"""
import argparse
import chardet
import clevercsv
import gzip
import json
import multiprocessing
import os
import termcolor
import warnings
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
SOURCE_DIR = os.path.join(THIS_DIR, "data")
TEST_FILES = os.path.join(SOURCE_DIR, "files")
TEST_DIALECTS = os.path.join(SOURCE_DIR, "dialects")
LOG_SUCCESS = os.path.join(THIS_DIR, "success.log")
LOG_ERROR = os.path.join(THIS_DIR, "error.log")
LOG_FAILED = os.path.join(THIS_DIR, "failed.log")
LOG_METHOD = os.path.join(THIS_DIR, "method.log")
LOG_SUCCESS_PARTIAL = os.path.join(THIS_DIR, "success_partial.log")
LOG_ERROR_PARTIAL = os.path.join(THIS_DIR, "error_partial.log")
LOG_FAILED_PARTIAL = os.path.join(THIS_DIR, "failed_partial.log")
LOG_METHOD_PARTIAL = os.path.join(THIS_DIR, "method_partial.log")
TIMEOUT = 5 * 60
N_BYTES_PARTIAL = 10000
def log_result(name, kind, verbose, partial):
table = {
"error": (LOG_ERROR, LOG_ERROR_PARTIAL, "yellow"),
"success": (LOG_SUCCESS, LOG_SUCCESS_PARTIAL, "green"),
"failure": (LOG_FAILED, LOG_FAILED_PARTIAL, "red"),
}
outfull, outpartial, color = table.get(kind)
fname = outpartial if partial else outfull
with open(fname, "a") as fp:
fp.write(name + "\n")
if verbose:
termcolor.cprint(name, color=color)
def log_method(name, method, partial):
fname = LOG_METHOD_PARTIAL if partial else LOG_METHOD
with open(fname, "a") as fp:
fp.write(f"{name},{method}\n")
def worker(args, return_dict, **kwargs):
det = clevercsv.Detector()
filename, encoding, partial = args
return_dict["error"] = False
return_dict["dialect"] = None
return_dict["method"] = None
with gzip.open(filename, "rt", newline="", encoding=encoding) as fp:
data = fp.read(N_BYTES_PARTIAL) if partial else fp.read()
try:
return_dict["dialect"] = det.detect(data, **kwargs)
return_dict["method"] = det.method_
except clevercsv.Error:
return_dict["error"] = True
def run_with_timeout(args, kwargs, limit):
manager = multiprocessing.Manager()
return_dict = manager.dict()
p = multiprocessing.Process(
target=worker, args=(args, return_dict), kwargs=kwargs
)
p.start()
p.join(limit)
if p.is_alive():
p.terminate()
return None, True, None
return return_dict["dialect"], return_dict["error"], return_dict["method"]
def run_test(name, gz_filename, annotation, verbose=1, partial=False):
if "encoding" in annotation:
enc = annotation["encoding"]
else:
with gzip.open(gz_filename, "rb") as fid:
enc = chardet.detect(fid.read())["encoding"]
true_dialect = annotation["dialect"]
dialect, error, method = run_with_timeout(
(gz_filename, enc, partial), {"verbose": verbose > 1}, TIMEOUT
)
if error:
return log_result(name, "error", verbose, partial)
if dialect is None:
log_result(name, "failure", verbose, partial)
elif dialect.delimiter != true_dialect["delimiter"]:
log_result(name, "failure", verbose, partial)
elif dialect.quotechar != true_dialect["quotechar"]:
log_result(name, "failure", verbose, partial)
elif dialect.escapechar != true_dialect["escapechar"]:
log_result(name, "failure", verbose, partial)
else:
log_result(name, "success", verbose, partial)
log_method(name, method, partial)
def load_test_cases():
cases = []
for f in sorted(os.listdir(TEST_FILES)):
base = f[: -len(".csv.gz")]
dialect_file = os.path.join(TEST_DIALECTS, base + ".json")
if not os.path.exists(dialect_file):
continue
filename = os.path.join(TEST_FILES, f)
with open(dialect_file, "r") as fid:
annotation = json.load(fid)
if not annotation["filename"] == f[: -len(".gz")]:
warnings.warn(
"filename doesn't match! Input file: %s\nDialect file: %s"
% (filename, dialect_file)
)
continue
if annotation["status"] == "skip":
continue
cases.append((base, filename, annotation))
return cases
def clear_output_files(partial):
files = {
True: [
LOG_SUCCESS_PARTIAL,
LOG_FAILED_PARTIAL,
LOG_ERROR_PARTIAL,
LOG_METHOD_PARTIAL,
],
False: [LOG_SUCCESS, LOG_FAILED, LOG_ERROR, LOG_METHOD],
}
delete = lambda f: os.unlink(f) if os.path.exists(f) else None
any(map(delete, files[partial]))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--partial",
help="Run test with partial file data",
action="store_true",
)
parser.add_argument("-v", "--verbose", help="Be verbose", action="count")
return parser.parse_args()
def main():
args = parse_args()
clear_output_files(args.partial)
cases = load_test_cases()
for name, gz_filename, annotation in cases:
run_test(
name,
gz_filename,
annotation,
verbose=args.verbose,
partial=args.partial,
)
if __name__ == "__main__":
main()
|
backsec.py
|
import os
import stat
import hashlib
import ConfigParser
import sqlite3
import shutil
import time
import datetime
import gzip
import json
import sys
import time
import datetime
import threading
import argparse
# RSA
from Crypto.PublicKey import RSA
# AES
from hashlib import md5
from Crypto.Cipher import AES
from Crypto import Random
# Logs
import logging
# NOT USED NOW
# import base64
# import codecs
# import binascii
#VSS windows
if os.name == "nt":
#vss
import win32com.client
import ctypes
class BackSecClient:
'''
config = {
'crypt': True, -> Cifrado si o no
'type' : 'local', -> tipo puede ser: local, direct, reverse
'compresion': True, -> Gzip compresion si o no
}
'''
config = {
'crypt': True,
'type': 'local',
'compresion': True,
'passwd': '',
}
paths = [] # paths a respaldar
privatekey = ""
publickey = ""
logger = None
def __init__(self, config={}, paths=[]):
if config == {} or paths == []:
self.loadConfig()
else:
self.config = config
self.paths = paths
def loadConfig(self):
config = ConfigParser.ConfigParser()
try:
app_path = self.pathParser(os.path.dirname(os.path.abspath(__file__)))
except NameError: # We are the main py2exe script, not a module
app_path = self.pathParser(os.path.dirname(os.path.abspath(sys.argv[0])))
if not os.path.exists('{0}client.conf'.format(app_path)):
app_path = self.pathParser(os.path.dirname(os.path.abspath(sys.argv[0])))
config.read('{0}client.conf'.format(app_path))
try:
self.config['type'] = str(config.get('GENERAL', 'type'))
if self.config['type'] == "local":
self.paths = str(config.get('GENERAL', 'paths')).split(",")
self.config['crypt'] = eval(config.get('GENERAL', 'crypt'))
self.config['destination'] = config.get('GENERAL', 'destination')
self.config['compresion'] = eval(config.get('GENERAL', 'compresion'))
self.config['logs'] = eval(config.get('GENERAL', 'logs'))
self.setInitialConfig()
if self.config['crypt']:
self.config['passwd'] = config.get('GENERAL', 'passwd')
self.generateKeys()
self.loadKeys()
self.config['full'] = str(config.get('POLICY', 'full'))
self.config['full'] = self.config['full'].split(",")
self.config['incremental'] = str(config.get('POLICY', 'incremental'))
self.config['incremental'] = self.config['incremental'].split(",")
if self.config['logs']:
self.setLogsOn(app_path)
except Exception as e:
sys.stdout.write("[!] An error happened loading the configuration\n")
sys.stdout.write(str(e))
def setLogsOn(self, app_path):
logging.basicConfig(filename='{0}backsec-log.log'.format(app_path),
format='%(asctime)s,%(msecs)d-%(name)s-%(levelname)s %(message)s',
datefmt='%H:%M:%S_%d-%m-%Y',
level=logging.DEBUG)
self.logger = logging.getLogger('backsec')
self.logger.setLevel(logging.DEBUG)
def writeLine(self, texto, logtype="info", output="std"):
texto1 = texto
if texto[len(texto) - 1] != "\n":
texto1 = texto + "\n"
if output == "err":
sys.stderr.write(texto1)
elif output==None:
pass
else:
sys.stdout.write(texto1)
if self.config['logs']:
if self.logger == None:
try:
app_path = self.pathParser(os.path.dirname(
os.path.abspath(__file__)))
except NameError: # We are the main py2exe script, not a module
app_path = self.pathParser(os.path.dirname(
os.path.abspath(sys.argv[0])))
if not os.path.exists('{0}client.conf'.format(app_path)):
app_path = self.pathParser(os.path.dirname(os.path.abspath(sys.argv[0])))
self.setLogsOn(app_path)
if logtype == "warn":
self.logger.warning(texto)
elif logtype == "error":
self.logger.error(texto)
elif logtype == "crit":
self.logger.critical(texto)
elif logtype == "debug":
self.logger.debug(texto)
else:
self.logger.info(texto)
def md5(self, fname):
try:
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
except:
return "No-md5"
def getFileInfo(self, filename, osname="linux"):
try:
fileinfo = os.stat(filename)
if osname == "linux":
modtime = fileinfo.st_mtime
elif osname == "windows":
modtime = 0
return {
'fsze': fileinfo.st_size,
# 'accesstime': fileinfo.st_atime,
# 'creationtime': fileinfo.st_ctime,
'mtime': modtime,
'uid': fileinfo.st_uid,
'gid': fileinfo.st_gid,
'mode': fileinfo.st_mode,
}
except Exception as e:
# print e
return {
'fsze': 0,
# 'accesstime': 0,
# 'creationtime': 0,
'mtime': 0,
'uid': 0,
'gid': 0,
'mode': 0,
}
def getFileList(self, path=""):
filetree = []
filetree.append({"ori-path": path})
ext = ""
self.writeLine("[-] Searching on {0}".format(path))
lastdirpath = ""
for dirpath, dirname, files in os.walk(path):
for directorio in dirname:
atributes = self.getFileInfo(dirpath)
atributes['type'] = "d" # directory
filetree.append({'path': self.pathParser(dirpath) + \
directorio, 'attr': atributes})
for filea in files:
if dirpath != lastdirpath:
atributes = self.getFileInfo(dirpath)
atributes['type'] = "d" # directory
filetree.append({'path': dirpath, 'attr': atributes})
if filea.endswith(ext):
filenpath = os.path.join(dirpath, filea)
fileinfo = self.getFileInfo(filenpath)
fileinfo['type'] = "f" # file
filetree.append({'path': os.path.join(
self.pathParser(dirpath), filea), 'attr': fileinfo})
lastdirpath = dirpath
return filetree
def getFileListAllPaths(self):
filetree = []
for path in self.paths:
tmpresult = self.getFileList(path)
for fileres in tmpresult:
filetree.append(fileres)
return filetree
def pathParser(self, path, endp=True):
path = path.replace("\\c:\\", "\\")
if ("\\" in path) or os.name == "nt":
path = path.replace("/", "\\")
if not endp or path.endswith("\\"):
return path
else:
return path + "\\"
if "/" in path:
path = path.replace("\\", "/")
if not endp or path.endswith("/"):
return path
else:
return path + "/"
def getLastDir(self, path):
lastdir = ""
arraytmp = []
if "\\" in path:
arraytmp = path.split("\\")
elif "/" in path:
arraytmp = path.split("/")
for directory in arraytmp:
if directory == "":
arraytmp.remove("")
lastdir = arraytmp[len(arraytmp) - 1]
return lastdir
def setInitialConfig(self):
self.config['destination'] = self.pathParser(self.config['destination'])
if self.config['type'] == "local":
self.config['localdb'] = self.pathParser(self.config['destination'] + "local.db",endp=False)
if not os.path.exists(self.config['destination']):
try:
os.makedirs(self.config['destination'])
except:
sys.stderr.write("The application cannot create the directory " + self.config['destination'])
if not os.path.exists(self.config['localdb']):
shutil.copy("localtpl.sql", self.config['localdb'])
def loadKeys(self):
if os.path.exists("privatekey.pem") and os.path.exists("publickey.pem"):
fpriv = open("privatekey.pem", 'r')
fpub = open("publickey.pem", 'r')
self.privatekey = fpriv.read()
self.publickey = fpub.read()
fpriv.close()
fpub.close()
else:
self.writeLine("[!] An error happened charging the keys")
def loadIndexFile(self, indexpath):
try:
f = gzip.open(indexpath, 'rb')
data = f.read()
f.close()
data = data.replace("\\","\\\\")
return json.loads(data)
# except ValueError as error:
except OSError as error:
# except Exception as error:
self.writeLine("[!] couldn't load index file '{0}'".format(indexpath),
logtype="error")
return []
def writeIndexFile(self, indexfile, indexcontent):
try:
indexcontent = str(indexcontent)
indexcontent = indexcontent.replace(", u\"", ", \"")
indexcontent = indexcontent.replace("{u\"", "{\"")
indexcontent = indexcontent.replace(": u\"", ": \"")
indexcontent = indexcontent.replace("L, \"gid\"", ", \"gid\"")
indexcontent = indexcontent.replace("\\\\", "\\")
findex = gzip.open(indexfile, 'wb')
findex.write(indexcontent)
findex.close()
return True
except:
return False
# compara el indice anterior con el nuevo (solo para indices nuevos)
def compareLastIndex(self, lastindex, actualindex, islastafile=True, isactualafile=True):
res = []
if islastafile:
lastindexarr = self.loadIndexFile(lastindex)
else:
lastindexarr = lastindex
if isactualafile:
actualindexarr = self.loadIndexFile(actualindex)
else:
actualindexarr = actualindex
for indexolditem in lastindexarr:
if indexolditem.keys()[0] != "ori-path" and indexolditem.keys()[0] == "path":
found = False
for i in actualindexarr:
if i.keys()[0] != "ori-path" and i['path'] == indexolditem['path']:
found = True
if indexolditem.keys()[1] == "attr" and i['attr'] != indexolditem['attr']:
# print indexolditem['path']
res.append(i)
if not found:
deleted = indexolditem
deleted['attr'] = {"status": "del"}
res.append(deleted)
# add ficheros nuevos
for indexnewitem in actualindexarr:
if indexnewitem.keys()[0] != "ori-path" and indexnewitem.keys()[0] == "path":
found = False
for i in lastindexarr:
if i.keys()[0] != "ori-path" and i['path'] == indexnewitem['path']:
found = True
if not found:
res.append(indexnewitem)
# fin
return res
# Se lee la ruta original del lastindexarr y de actualindexarr y
# se comprueba si existe cada path ahora, si no existe, se guarda una
# anotacion como que se ha eliminado, se coprueba entre un index y otro
# si varia el mode, el size o el usuario/grupo y mode(permisos)
# Retorna el index del incremental
def compareLastIndexRestore(self, lastindexes, actualindex, islindexarray=True):
res = []
if islindexarray == False:
lastindexes = [lastindexes]
actualindexarr = self.loadIndexFile(actualindex)
for item in actualindexarr:
found = False
for i in res:
if i['path'] == item['path']:
found = True
if item.keys()[0] != 'ori-path' and not found:
ifrom = actualindex.replace("/index.gz", "/files")
ifrom = ifrom.replace("\index.gz", "\\files")
item['from'] = ifrom
res.append(item)
for lastindex in lastindexes:
lastindexarr = self.loadIndexFile(lastindex)
for item in lastindexarr:
if 'ori-path' not in item.keys():
found = False
for i in res:
if i['path'] == item['path']:
found = True
if not found:
ifrom = lastindex.replace("/index.gz", "/files")
ifrom = ifrom.replace("\index.gz", "\\files")
item['from'] = ifrom
res.append(item)
return res
def generateKeys(self):
if not os.path.exists("privatekey.pem") or not os.path.exists("publickey.pem"):
new_key = RSA.generate(4096) # generate RSA key that 4096 bits long
# Export the Key in PEM format, the PEM extension contains ASCII encoding
public_key = new_key.publickey().exportKey("PEM")
private_key = new_key.exportKey("PEM")
try:
fprivkey = open("privatekey.pem", 'w')
fpubkey = open("publickey.pem", 'w')
fpubkey.write(public_key)
fprivkey.write(private_key)
finally:
fpubkey.close()
fprivkey.close()
self.writeLine(public_key, logtype="info", output="std")
self.writeLine(private_key, logtype="info", output="std")
else:
self.writeLine("[-] The keys exists")
def encryptRSA(self, text, publickey):
encryptor = RSA.importKey(publickey)
global encriptedData
# b64 = codecs.encode(binascii.b2a_base64(text),"base64")
# encriptedData=encryptor.encrypt(b64[0:len(b64)-1], 0)
encriptedData = encryptor.encrypt(text, 0)
return encriptedData[0]
def decryptRSA(self, text, privatekey):
decryptor = RSA.importKey(privatekey)
# dec = base64.b64decode( decryptor.decrypt(text) + "==" )
dec = decryptor.decrypt(text)
return dec
def encryptAES256(self, in_file, out_file, password, key_length=32):
bs = AES.block_size
salt = Random.new().read(bs - len('Salted__'))
key, iv = self.derive_key_and_iv(password, salt, key_length, bs)
cipher = AES.new(key, AES.MODE_CBC, iv)
out_file.write('Salted__' + salt)
finished = False
while not finished:
chunk = in_file.read(1024 * bs)
if len(chunk) == 0 or len(chunk) % bs != 0:
padding_length = (bs - len(chunk) % bs) or bs
chunk += padding_length * chr(padding_length)
finished = True
out_file.write(cipher.encrypt(chunk))
def decryptAES256(self, in_file, out_file, password, key_length=32):
bs = AES.block_size
salt = in_file.read(bs)[len('Salted__'):]
key, iv = self.derive_key_and_iv(password, salt, key_length, bs)
cipher = AES.new(key, AES.MODE_CBC, iv)
next_chunk = ''
finished = False
while not finished:
chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))
if len(next_chunk) == 0:
padding_length = ord(chunk[-1])
chunk = chunk[:-padding_length]
finished = True
out_file.write(chunk)
def derive_key_and_iv(self, password, salt, key_length, iv_length):
d = d_i = ''
while len(d) < key_length + iv_length:
d_i = md5(d_i + password + salt).digest()
d += d_i
return d[:key_length], d[key_length:key_length + iv_length]
def copyFileGzip(self, source, destination):
fsource = open(source, "rb")
if self.config['compresion']:
fdest = gzip.open(destination, 'wb')
else:
fdest = open(destination, 'wb')
try:
if self.config['crypt']:
self.encryptAES256(fsource, fdest, self.config['passwd'])
else:
byte = "128"
while byte != "":
byte = fsource.read(128)
fdest.write(byte)
finally:
fsource.close()
fdest.close()
def restoreFileGzip(self, source, destination):
fdest = open(destination, "wb")
if self.config['compresion']:
fsource = gzip.open(source, 'rb')
else:
fsource = open(source, 'rb')
try:
if self.config['crypt']:
self.decryptAES256(fsource, fdest, self.config['passwd'])
else:
byte = "128"
while byte != "":
byte = fsource.read(128)
fdest.write(byte)
finally:
fsource.close()
fdest.close()
def vssDelete(self,id):
wcd = win32com.client.Dispatch("WbemScripting.SWbemLocator")
wmi = wcd.ConnectServer(".", "root\cimv2")
obj = wmi.ExecQuery(
'SELECT * FROM Win32_ShadowCopy WHERE ID="{0}"'.format(id)
)
obj[0].Delete_()
def findVSS(self,id=""):
wcd = win32com.client.Dispatch("WbemScripting.SWbemLocator")
wmi = wcd.ConnectServer(".", "root\cimv2")
if id != "":
obj = wmi.ExecQuery("SELECT * FROM win32_ShadowCopy WHERE id='{0}'".format(id))
return [x.DeviceObject for x in obj]
else:
return []
def getVssList(self):
res = []
if os.name == "nt":
wcd=win32com.client.Dispatch("WbemScripting.SWbemLocator")
wmi=wcd.ConnectServer(".","root\cimv2")
obj=wmi.ExecQuery("SELECT * FROM win32_ShadowCopy")
res = [x.DeviceObject for x in obj]
return res
def vssCreate(self,unidad="c:\\"):
if os.name == "nt":
wmi=win32com.client.GetObject("winmgmts:\\\\.\\root\\cimv2:Win32_ShadowCopy")
createmethod = wmi.Methods_("Create")
createparams = createmethod.InParameters
createparams.Properties_[1].value=unidad
results = wmi.ExecMethod_("Create",createparams)
return results.Properties_[1].value
return []
def createLink(self, link, destino): #hacer mklink
flags = {'directory':1,'file':0}
##res = ctypes.windll.kernel32.CreateSymbolicLinkW(link, destino, flags['directory'])
##return res
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
res = csl(link, destino, flags['directory'])
#csl("c:\\shadow_C", "\\\\?\\GLOBALROOT\\Device\\HarddiskVolumeShadowCopy5\\", 1)
return res
def doVss(self,path):
res = [path, []]
if os.name == "nt":
shadowlink = "shadow{0}".format(int( time.time() ))
volunit = path[0:3]
shadowlinkpath = "{0}{1}".format(volunit,shadowlink)
vssid = self.vssCreate(volunit)
self.createLink(shadowlinkpath,"{0}\\".format(self.findVSS(id=vssid)[0]))
newpath = "{0}{1}{2}".format(volunit,shadowlink,path.replace(volunit,"\\"))
res = [newpath,[vssid,shadowlinkpath]]
return res
def removeVssAndUmount(self,vssid,mountpoint):
if os.name == "nt":
os.rmdir(mountpoint)
self.vssDelete(vssid)
def doFullBackup(self):
for path in self.paths:
vssres = self.doVss(path)
timestamp = int(time.time())
query = "INSERT INTO backups VALUES(null," + str(timestamp) + \
",'full','" + path + "','started')"
dbres = self.setDataToDB(self.config['localdb'], query)
if dbres:
self.writeLine("[-] Full backup '{0}' started".format(path))
else:
self.writeLine("[-] Full backup '{0}' Failed".format(path),
logtype="error")
continue
if path == "/":
lastdir = self.getLastDir("/_ROOT_DIR_") + "/full_" + str(timestamp)
else:
lastdir = self.getLastDir(path) + "/full_" + str(timestamp)
tmpdestination = self.pathParser(self.config['destination'] + lastdir)
os.makedirs(tmpdestination + "files")
bckindex = self.getFileList(path)
self.writeIndexFile(tmpdestination + "index.gz", str(bckindex).replace("'", "\""))
for item in bckindex:
try:
if item['path'] != "": # Comprobamos que no es ori-path
destpath = item['path'].replace(path, tmpdestination + "files/")
destpath = self.pathParser(destpath, endp=False)
# Cambiamos path el path del VSS
item['path'] = item['path'].replace(path,vssres[0])
# Si es Directorio lo crea en destino
if item['attr']['type'] == 'd':
try:
os.makedirs(destpath)
except OSError:
self.writeLine("\t[!] {0}".format(e),
logtype="error")
else:
# Si es fichero copia el fichero con gzip:
try:
self.copyFileGzip(item['path'], destpath)
except Exception as e:
self.writeLine("\t[!] {0}".format(e),
logtype="error")
except KeyError as e:
self.writeLine("\t[!] {0}".format(e),
logtype="error")
query = "UPDATE backups set status='completed' where datetime=" + \
str(timestamp) + " and type='full' and path='" + path + "'" + \
" and status='started'"
dbres = self.setDataToDB(self.config['localdb'], query)
if dbres:
self.writeLine("[-] Full backup '{0}' Completed".format(path))
else:
self.writeLine("[-] Full backup '{0}' Failed".format(path))
continue
if len(vssres[1]) > 0:
self.removeVssAndUmount(vssres[1][0], vssres[1][1])
def doIncrementalBackup(self):
for path in self.paths:
vssres = self.doVss(path)
timestamp = int(time.time())
query = "INSERT INTO backups VALUES(null," + str(timestamp) + \
",'incremental','" + path + "','started')"
dbres = self.setDataToDB(self.config['localdb'], query)
if dbres:
self.writeLine("[-] Incremental backup '{0}' started".format(path))
else:
self.writeLine("[-] Incremental backup '{0}' Failed".format(path))
continue
if path == "/":
lastdir = self.getLastDir("/_ROOT_DIR_") + "/incremental_" + str(timestamp)
else:
lastdir = self.getLastDir(path) + "/incremental_" + str(timestamp)
tmpdestination = self.pathParser(self.config['destination'] + lastdir)
os.makedirs(tmpdestination + "files")
# Get last full backup index
backups = self.getBackups(path, bcktype='full')
lastfulldir = self.getLastDir(path) + "/full_" + str(backups[len(backups) - 1][1]) + "/"
lastindex = self.config['destination'] + "/" + lastfulldir + "index.gz"
# fin
tmpindex = self.getFileList(path)
bckindex = self.compareLastIndex(lastindex, tmpindex, isactualafile=False)
self.writeIndexFile(tmpdestination + "index.gz", str(bckindex).replace("'", "\""))
for item in bckindex:
try:
if item['path'] != "": # Comprobamos que no es ori-path
destpath = item['path'].replace(path, tmpdestination + "files/")
destpath = self.pathParser(destpath, endp=False)
# Cambiamos path el path del VSS
item['path'] = item['path'].replace(path, vssres[0])
# Si es Directorio lo crea en destino
if item['attr']['type'] == 'd':
try:
os.makedirs(destpath)
except OSError as e:
self.writeLine("\t[!] {0}".format(e),
logtype="error")
else:
# Si es fichero copia el fichero con gzip:
try:
self.copyFileGzip(item['path'], destpath)
except Exception as e:
err = "\t[!] " + str(e)
self.writeLine(err, logtype="error")
except KeyError as e:
self.writeLine("\t[!] {0}".format(e),
logtype="error")
query = "UPDATE backups set status='completed' where datetime=" + \
str(timestamp) + " and type='full' and path='" + path + "'" + \
" and status='started'"
dbres = self.setDataToDB(self.config['localdb'], query)
if dbres:
self.writeLine("[-] Incremental backup '{0}' Completed".format(path))
else:
self.writeLine("[-] Incremental backup '{0}' Failed".format(path))
continue
if len(vssres[1]) > 0:
self.removeVssAndUmount(vssres[1][0], vssres[1][1])
def restoreFull(self, backupidarr, pathdest, pathsource="/"):
for backupid in backupidarr:
query = "SELECT * FROM backups WHERE id={0}".format(backupid)
dbres = self.getDataFromDB(self.config['localdb'], query)
timestamp = dbres[0][1]
path = dbres[0][3]
if dbres != []:
self.writeLine("[-] Full backup '{0}' restoring on {1} started\n".format(path, pathdest))
else:
self.writeLine("[-] Full backup restore '{0}' Failed\n".format(path))
continue
if path == "/" or path == "c:\\":
lastdir = self.getLastDir("/_ROOT_DIR_") + "/full_" + str(timestamp)
else:
lastdir = self.getLastDir(path) + "/full_" + str(timestamp)
tmpsource = self.pathParser(self.config['destination'] + lastdir)
bckindex = self.loadIndexFile(tmpsource + "index.gz")
destbase = self.pathParser(pathdest)
for item in bckindex:
try:
if item['path'] != "": # Comprobamos que no es ori-path
sourcepath = self.pathParser(item['path'].replace(path,
tmpsource + "files/"), endp=False)
sourcepath = sourcepath.replace("//", "/")
sourcepath = sourcepath.replace("\\\\", "\\")
letravol = ":\\"
if item['path'][1:3] == ":\\":
letravol = item['path'][0:3]
destpath = destbase + item['path'].replace(letravol,"\\")
destpath = destpath.replace("//", "/")
destpath = destpath.replace("\\\\", "\\")
destpath = self.pathParser(destpath, endp=False)
# Si es Directorio lo crea en destino
if item['attr']['type'] == 'd':
try:
os.makedirs(destpath)
os.chmod(destpath, item['attr']['mode'])
try:
os.chown(destpath,
item['attr']['uid'],
item['attr']['gid'])
except:
pass
except OSError:
pass
except Exception as e:
self.writeLine("\t[!] {0}".format(str(e)),
logtype="error")
else:
# Si es fichero copia el fichero con gzip:
try:
self.restoreFileGzip(sourcepath, destpath)
os.chmod(destpath, item['attr']['mode'])
try:
os.chown(destpath,
item['attr']['uid'],
item['attr']['gid'])
except:
pass
except Exception as e:
self.writeLine("\t[!] {0}".format(str(e)),
logtype="error")
except KeyError:
pass
if dbres:
self.writeLine("[-] Full backup '{0}' Restored".format(path))
else:
self.writeLine("[-] Full backup '{0}' restore Failed".format(path))
continue
# TODO ajustar errores de tipo de fichero y revisar el exception del copyfiles
def restoreIncremental(self, backupid, pathdest, pathsource="/"):
backupdata = self.getBackupIncremental(backupid[0])
destbase = self.pathParser(pathdest)
indexesfilesarr = []
fusionedindex = []
if len(backupdata) > 0:
lastindex = backupdata[0]
backupdata.remove(lastindex)
lastbckindexfile = self.getIndexFilePath(lastindex)
path = lastindex[3]
self.writeLine("[-] Incremental backup '{0}' restoring on {1} started".format(path, pathdest))
for data in backupdata:
indexesfilesarr.append(self.getIndexFilePath(data))
fusionedindex = self.compareLastIndexRestore(indexesfilesarr, lastbckindexfile)
####
for item in fusionedindex:
try:
if not "status" in item.keys() and item['path'] != "": # Comprobamos que no es ori-path
tmpsource = item['from']
sourcepath = item['path'].replace(path, tmpsource)
sourcepath = sourcepath.replace("//", "/")
sourcepath = sourcepath.replace("\\\\", "\\")
letravol = ":\\"
if item['path'][1:3] == ":\\":
letravol = item['path'][0:3]
destpath = destbase + item['path'].replace(letravol, "\\")
destpath = destpath.replace("//", "/")
destpath = destpath.replace("\\\\", "\\")
# print destpath
# Si es Directorio lo crea en destino
if item['attr']['type'] == 'd':
try:
os.makedirs(destpath)
os.chmod(destpath, item['attr']['mode'])
os.chown(destpath,
item['attr']['uid'],
item['attr']['gid'])
except OSError:
pass
except AttributeError:
pass
else:
# Si es fichero copia el fichero con gzip:
try:
self.restoreFileGzip(sourcepath, destpath)
os.chmod(destpath, item['attr']['mode'])
os.chown(destpath,
item['attr']['uid'],
item['attr']['gid'])
except AttributeError:
pass
except Exception as e:
self.writeLine("\t[!] " + str(e), logtype="error")
except KeyError:
pass
self.writeLine("[-] Incremental backup '{0}' Restored".format(path))
else:
self.writeLine("[!] Restore failed", logtype="error")
exit()
# 1- consultar en db el id del backup incremental -> getBackupIncremental(self,path,bckincrementalid)
# 2- Consultar en db los datos del backup full anterior -> getBackupIncremental
# 3- Comparar los index (hay que crear un metodo de
# comparacion de indices para restores evitando que se tome
# como eliminados los ficheros que no aparezcan en el segundo indice,
# y tomando como eliminados los status:del) -> compareLastIndexRestore
# 4- Restaurar la version mas actual de los ficheros evitando
# restaurar eliminados en incremental
pass
# TODO restorefile
def restoreOnceFileOfBackup(self, backupid, filepath, pathdest):
query = "SELECT * FROM backups WHERE id={0}".format(backupid)
dbres = self.getDataFromDB(self.config['localdb'], query)
timestamp = dbres[0][1]
path = dbres[0][3]
if dbres != []:
self.writeLine("[-] Full backup '{0}' restoring on {1} started\n".format(path, pathdest))
else:
self.writeLine("[-] Full backup restore '{0}' Failed\n".format(path))
if path == "/" or path == "c:\\":
lastdir = self.getLastDir("/_ROOT_DIR_") + "/full_" + str(timestamp)
else:
lastdir = self.getLastDir(path) + "/full_" + str(timestamp)
tmpsource = self.pathParser(self.config['destination'] + lastdir)
sourcepath = self.pathParser(filepath.replace(path,
tmpsource + "files/"), endp=False)
destbase = self.pathParser(pathdest)
destpath = destbase + filepath
destpath = destpath.replace("//", "/")
destpath = destpath.replace("\\\\", "\\")
destpath = self.pathParser(destpath, endp=False)
self.restoreFileGzip(sourcepath, pathdest)
def getIndexFilePath(self, data): # data es la fila de db backup
path = data[3]
if path == "/" or path == "c:\\":
root = self.getLastDir("/_ROOT_DIR_")
lastdir = "{0}/{1}_{2}".format(root, data[2], data[1])
else:
pathparsed = self.getLastDir(path)
lastdir = "{0}/{1}_{2}".format(pathparsed, data[2], data[1])
tmpsource = self.pathParser(self.config['destination'] + lastdir) + "index.gz"
return tmpsource
def setDataToDB(self, filename, query):
try:
con = sqlite3.connect(filename)
cursor = con.cursor()
cursor.execute(query)
con.commit()
con.close()
return True
except Exception:
return False
def getDataFromDB(self, filename, query):
con = sqlite3.connect(filename)
cursor = con.cursor()
cursor.execute(query)
res = cursor.fetchall()
return res
def getBackups(self, path, bcktype='full'):
if bcktype == "all":
bcktype = "%"
query = "SELECT * FROM backups WHERE path='" + path + "' AND type like '" + \
bcktype + "' ORDER BY id"
bcklist = self.getDataFromDB(self.config['localdb'], query)
return bcklist
def getBackupIncremental(self, bckincrementalid, path=""):
if path != "":
query = "SELECT * FROM backups WHERE path='{0}' AND type='full'" + \
" AND id<{1} ORDER BY id DESC LIMIT 1"
query = query.format(path, bckincrementalid)
else:
query = "SELECT * FROM backups WHERE type='full' AND id<{0} ORDER BY id DESC LIMIT 1"
query = query.format(bckincrementalid)
bckres = []
bcklist = self.getDataFromDB(self.config['localdb'], query)
if len(bcklist) > 0:
query = "SELECT * FROM backups WHERE path='{0}' AND type='incremental'" + \
" AND id<={1} AND id>={2} ORDER BY id DESC LIMIT 1"
query = query.format(bcklist[0][3], bckincrementalid, bcklist[0][0])
bcklisttmp = self.getDataFromDB(self.config['localdb'], query)
for incremental in bcklisttmp:
bckres.append(incremental)
bckres.append(bcklist[0])
return bckres
def getPaths(self):
return self.paths
def runBackups(self):
self.writeLine("[-] Checking if is the time to do full backups")
for full in self.config['full']:
datetmp = full.split(" ")
weekday = datetime.datetime.now().strftime("%a").lower()
monday = datetime.datetime.now().strftime("%d").lower()
time = datetime.datetime.now().strftime("%H:%M").lower()
if (datetmp[0] == weekday or datetmp[0] == monday) and datetmp[1] == time:
t1 = threading.Thread(target=self.doFullBackup(), args=(None,))
t1.start()
self.writeLine("[-] Checking if is the time to do incremental backups\n")
for incr in self.config['incremental']:
datetmp = incr.split(" ")
weekday = datetime.datetime.now().strftime("%a").lower()
monday = datetime.datetime.now().strftime("%d").lower()
time = datetime.datetime.now().strftime("%H:%M").lower()
if (datetmp[0] == weekday or datetmp[0] == monday) and datetmp[1] == time:
t1 = threading.Thread(target=self.doIncrementalBackup(), args=(None,))
t1.start()
# >>> print "Or like this: " ,datetime.datetime.now().strftime("%a %y-%m-%d-%H-%M")
# Or like this: Wed 17-11-08-02-44
def launchDaemon(self):
timetowait = 50
while True:
self.runBackups()
self.writeLine("[-] Waiting {0} seconds to recheck".format(timetowait))
time.sleep(timetowait)
def main():
bsc = BackSecClient()
try:
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='commands')
# modo demonio
dparser = subparsers.add_parser('daemon', help='Activate daemon mode')
dparser.add_argument("daemon", action='store_true',
default=True, help="Activate daemon mode")
# Run policy once
rpparser = subparsers.add_parser('runpolicy', help='Run backup policy one time')
rpparser.add_argument('runpolicy', action='store_true', default=True,
help='Run backup policy one time')
rpparser.add_argument('--btype', '-BT', action='store', default='full',
choices=('incremental', 'full'),
help='Backup type (full or incremental)')
# listar directorios gestionados
ldparser = subparsers.add_parser('listdirs', help='List backup directories')
ldparser.add_argument('listdirs', action='store_true', default=True,
help='List backup directories')
# List backups
lbparser = subparsers.add_parser('listbackups', help='List backups')
lbparser.add_argument('listbackups', action='store_true', default=True,
help='List backups')
lbparser.add_argument('directory', action='store', help='Backup source directory')
lbparser.add_argument('--btype', '-BT', default='all', action='store', dest='btype',
choices=('all', 'incremental', 'full'),
help='Select backupt to find (full,incremental or all)')
# restore backups
# TODO hacer que se seleccione el tipo de backup en base a la bbdd sin necesidad de indicerlo
rbparser = subparsers.add_parser('restore', help='Restore backups')
rbparser.add_argument('restore', action='store_true', default=True,
help='Restore backups')
rbparser.add_argument('backupid', action='store', help='Backup id')
rbparser.add_argument('destination', action='store', help='Restore destination directory')
rbparser.add_argument('--btype', '-BT', default='all', action='store', dest='btype',
choices=('incremental', 'full'),
help='Select backupt to find (full,incremental)')
# TODO restorefile
# restore once file
# TODO hacer que se seleccione el tipo de backup en base a la bbdd sin necesidad de indicerlo
rbparser = subparsers.add_parser('restorefile', help='Restore once file')
rbparser.add_argument('restorefile', action='store_true', default=True,
help='Restore once file')
rbparser.add_argument('backupid', action='store', help='Backup id')
rbparser.add_argument('filepath', action='store', help='File path on the system')
rbparser.add_argument('destination', action='store', help='Restore destination directory')
rbparser.add_argument('--btype', '-BT', default='all', action='store', dest='btype',
choices=('incremental', 'full'),
help='Select backupt to find (full,incremental)')
args = parser.parse_args()
if 'daemon' in args and args.daemon:
bsc.launchDaemon()
elif 'listdirs' in args and args.listdirs:
bsc.writeLine("[-] List the directories saved:\n")
for direc in bsc.getPaths():
bsc.writeLine("\t{0}\n".format(direc))
elif 'listbackups' in args and args.listbackups:
bsc.writeLine("[-] List the backups saved:\n")
counter = 0
for direc in bsc.getBackups(args.directory, bcktype=args.btype):
counter += 1
datet = datetime.datetime.fromtimestamp(int(direc[1])).strftime('%Y-%m-%d %H:%M:%S')
bsc.writeLine("\t-{0}. {1} {2} {3} {4} (id: {5})\n".format(counter,
direc[3], direc[2], datet, direc[4],
direc[0]))
elif 'runpolicy' in args and args.runpolicy:
bsc.writeLine("[-] Running policy {0} one time:\n".format(args.btype))
if args.btype == "full":
bsc.doFullBackup()
elif args.btype == "incremental":
bsc.doIncrementalBackup()
elif 'restore' in args and args.restore:
bsc.writeLine("[-] Running restore {0} one time:\n".format(args.btype))
if args.btype == "full":
bsc.restoreFull([args.backupid], args.destination)
elif args.btype == "incremental":
bsc.restoreIncremental([args.backupid], args.destination)
# TODO restorefile
elif 'restorefile' in args and args.restorefile:
bsc.writeLine("[-] Running restore {0} one time:\n".format(args.btype))
bsc.restoreOnceFileOfBackup(args.backupid, args.filepath, args.destination)
except Exception as e:
bsc.writeLine("[!] An error ocurred {0}".format(e), logtype="error")
except KeyboardInterrupt:
bsc.writeLine("[-] You have chosen exit\n")
#
if __name__ == "__main__":
main()
# TODO Hacer que el restore se haga full o incremental dependiendo del backup y no del usuario
# TODO Sustituir write's por self.writeLine
# TODO hacer que se pueda restaurar un fichero
|
socket_server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable = line-too-long
"""
@FIle:socket_server.py
~~~~~~~~~~~
:copyright: (c) 2020 by the Niyuhang.
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import re
from socket import *
from multiprocessing import Process
def main():
# 开启socket链接 作为服务端
# 使用默认参数:
# 地址簇 ipv4, family=AF_INET,
# type=SOCK_STREAM
server_socket = socket()
# 设置socket 在四次挥手后立即关闭 并且立即释放资源
server_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# 绑定监听地址 (host, port)
logging.info("服务启动了")
server_socket.bind(("0.0.0.0", 8080))
server_socket.listen(128) # 最多可以监听128个连接
while True:
# 监听请求
# 收到请求后会得到对应socket请求对方和请求的地址
# 并且利用这个socket和客户端进行信息传输
socket_client, client_addr = server_socket.accept()
# 这个时候子进程会把主进程里面的变量复制一遍 只在一方修改的时候进行分离
# 由于子进程复制了这个socket_client 所以需要在主进程关闭socket
this_process = Process(target=handle_request, args=(socket_client,))
this_process.start()
socket_client.close()
def handle_request(socket_client):
"""
处理socket链接
:param socket_client:
:return:
"""
logging.info("处理客户端请求了")
recv_data = socket_client.recv(1024).decode("utf-8") # 1024表示本次接收的最大字节数
request_header_lines = recv_data.splitlines()
# for line in request_header_lines:
# print(line)
if not request_header_lines:
file_name = "index.html"
else:
file_name = get_the_file_name(request_header_lines[0]) or "index.html"
try:
# 返回浏览器数据
# 设置返回的头信息 header
response_headers = "HTTP/1.2.1.2 200 OK\r\n" # 200 表示找到这个资源
response_headers += "\r\n" # 空一行与body隔开
# 设置内容body
response_body = find_static_file_data(file_name)
except:
response_headers = "HTTP/1.2.1.2 404 NOT FOUND\r\n" # 404
response_headers += "\r\n" # 空一行与body隔开
response_body = ""
# 合并返回的response数据
response = response_headers + response_body
socket_client.send(response.encode())
socket_client.close()
def get_the_file_name(path):
"""
匹配到/路由后到空格前的路径
:param path: "GET / HTTP1.0"
:return:
"""
pattern = re.compile(r"(?:[^/]+)/([^?\s]*)")
res = pattern.match(path)
if res:
return res.groups()[0]
def find_static_file_data(file_name: str):
try:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "htmls", file_name)
with open(path, "r") as f:
return f.read()
except Exception as e:
raise ValueError(e)
if __name__ == '__main__':
main()
|
check_mongodb.bak.py
|
#!/usr/bin/env python
#coding:utf-8
import os
import sys
import string
import time
import datetime
import MySQLdb
import pymongo
import bson
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("wlblazers")
path='./include'
sys.path.insert(0,path)
import functions as func
from multiprocessing import Process;
def check_mongodb(host,port,user,passwd,server_id,tags):
try:
connect = pymongo.Connection(host,int(port))
db = connect['admin']
db.authenticate(user,passwd)
serverStatus=connect.admin.command(bson.son.SON([('serverStatus', 1), ('repl', 2)]))
time.sleep(1)
serverStatus_2=connect.admin.command(bson.son.SON([('serverStatus', 1), ('repl', 2)]))
connect = 1
ok = int(serverStatus['ok'])
version = serverStatus['version']
uptime = serverStatus['uptime']
connections_current = serverStatus['connections']['current']
connections_available = serverStatus['connections']['available']
globalLock_activeClients = serverStatus['globalLock']['activeClients']['total']
globalLock_currentQueue = serverStatus['globalLock']['currentQueue']['total']
indexCounters_accesses = serverStatus['indexCounters']['accesses']
indexCounters_hits = serverStatus['indexCounters']['hits']
indexCounters_misses = serverStatus['indexCounters']['misses']
indexCounters_resets = serverStatus['indexCounters']['resets']
indexCounters_missRatio = serverStatus['indexCounters']['missRatio']
#cursors_totalOpen = serverStatus['cursors']['totalOpen']
#cursors_timeOut = serverStatus['cursors']['timeOut']
dur_commits = serverStatus['dur']['commits']
dur_journaledMB = serverStatus['dur']['journaledMB']
dur_writeToDataFilesMB = serverStatus['dur']['writeToDataFilesMB']
dur_compression = serverStatus['dur']['compression']
dur_commitsInWriteLock = serverStatus['dur']['commitsInWriteLock']
dur_earlyCommits = serverStatus['dur']['earlyCommits']
dur_timeMs_dt = serverStatus['dur']['timeMs']['dt']
dur_timeMs_prepLogBuffer = serverStatus['dur']['timeMs']['prepLogBuffer']
dur_timeMs_writeToJournal = serverStatus['dur']['timeMs']['writeToJournal']
dur_timeMs_writeToDataFiles = serverStatus['dur']['timeMs']['writeToDataFiles']
dur_timeMs_remapPrivateView = serverStatus['dur']['timeMs']['remapPrivateView']
mem_bits = serverStatus['mem']['bits']
mem_resident = serverStatus['mem']['resident']
mem_virtual = serverStatus['mem']['virtual']
mem_supported = serverStatus['mem']['supported']
mem_mapped = serverStatus['mem']['mapped']
mem_mappedWithJournal = serverStatus['mem']['mappedWithJournal']
network_bytesIn_persecond = int(serverStatus_2['network']['bytesIn']) - int(serverStatus['network']['bytesIn'])
network_bytesOut_persecond = int(serverStatus_2['network']['bytesOut']) - int(serverStatus['network']['bytesOut'])
network_numRequests_persecond = int(serverStatus_2['network']['numRequests']) - int(serverStatus['network']['numRequests'])
opcounters_insert_persecond = int(serverStatus_2['opcounters']['insert']) - int(serverStatus['opcounters']['insert'])
opcounters_query_persecond = int(serverStatus_2['opcounters']['query']) - int(serverStatus['opcounters']['query'])
opcounters_update_persecond = int(serverStatus_2['opcounters']['update']) - int(serverStatus['opcounters']['update'])
opcounters_delete_persecond = int(serverStatus_2['opcounters']['delete']) - int(serverStatus['opcounters']['delete'])
opcounters_command_persecond = int(serverStatus_2['opcounters']['command']) - int(serverStatus['opcounters']['command'])
#replset
try:
repl=serverStatus['repl']
setName=repl['setName']
replset=1
if repl['secondary']== True:
repl_role='secondary'
repl_role_new='s'
else:
repl_role='master'
repl_role_new='m'
except:
replset=0
repl_role='master'
repl_role_new='m'
pass
##################### insert data to mysql server#############################
sql = "insert into mongodb_status(server_id,host,port,tags,connect,replset,repl_role,ok,uptime,version,connections_current,connections_available,globalLock_currentQueue,globalLock_activeClients,indexCounters_accesses,indexCounters_hits,indexCounters_misses,indexCounters_resets,indexCounters_missRatio,dur_commits,dur_journaledMB,dur_writeToDataFilesMB,dur_compression,dur_commitsInWriteLock,dur_earlyCommits,dur_timeMs_dt,dur_timeMs_prepLogBuffer,dur_timeMs_writeToJournal,dur_timeMs_writeToDataFiles,dur_timeMs_remapPrivateView,mem_bits,mem_resident,mem_virtual,mem_supported,mem_mapped,mem_mappedWithJournal,network_bytesIn_persecond,network_bytesOut_persecond,network_numRequests_persecond,opcounters_insert_persecond,opcounters_query_persecond,opcounters_update_persecond,opcounters_delete_persecond,opcounters_command_persecond) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param = (server_id,host,port,tags,connect,replset,repl_role,ok,uptime,version,connections_current,connections_available,globalLock_currentQueue,globalLock_activeClients,indexCounters_accesses,indexCounters_hits,indexCounters_misses,indexCounters_resets,indexCounters_missRatio,dur_commits,dur_journaledMB,dur_writeToDataFilesMB,dur_compression,dur_commitsInWriteLock,dur_earlyCommits,dur_timeMs_dt,dur_timeMs_prepLogBuffer,dur_timeMs_writeToJournal,dur_timeMs_writeToDataFiles,dur_timeMs_remapPrivateView,mem_bits,mem_resident,mem_virtual,mem_supported,mem_mapped,mem_mappedWithJournal,network_bytesIn_persecond,network_bytesOut_persecond,network_numRequests_persecond,opcounters_insert_persecond,opcounters_query_persecond,opcounters_update_persecond,opcounters_delete_persecond,opcounters_command_persecond)
func.mysql_exec(sql,param)
role='m'
func.update_db_status_init(repl_role_new,version,host,port,tags)
except Exception, e:
logger_msg="check mongodb %s:%s : %s" %(host,port,e)
logger.warning(logger_msg)
try:
connect=0
sql="insert into mongodb_status(server_id,host,port,tags,connect) values(%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,connect)
func.mysql_exec(sql,param)
except Exception, e:
logger.error(e)
sys.exit(1)
finally:
sys.exit(1)
finally:
func.check_db_status(server_id,host,port,tags,'mongodb')
sys.exit(1)
def main():
func.mysql_exec("insert into mongodb_status_his SELECT *,LEFT(REPLACE(REPLACE(REPLACE(create_time,'-',''),' ',''),':',''),12) from mongodb_status;",'')
func.mysql_exec('delete from mongodb_status;','')
#get mongodb servers list
servers = func.mysql_query('select id,host,port,username,password,tags from db_cfg_mongodb where is_delete=0 and monitor=1;')
logger.info("check mongodb controller started.")
if servers:
plist = []
for row in servers:
server_id=row[0]
host=row[1]
port=row[2]
username=row[3]
password=row[4]
tags=row[5]
p = Process(target = check_mongodb, args = (host,port,username,password,server_id,tags))
plist.append(p)
p.start()
for p in plist:
p.join()
else:
logger.warning("check mongodb: not found any servers")
logger.info("check mongodb controller finished.")
if __name__=='__main__':
main()
|
server.py
|
from arduinoToPi import *
from piToDb import *
import os
def main():
print("Starting pi server...")
print("Current directory: " + str(os.getcwd()))
master_queue = Queue()
# Note: we need a comma after the queue to indicate that we want a tuple,
# rather than just an expression with parentheses around it.
arduino_thread = Thread(target=arduino_main, args=(master_queue,))
db_thread = Thread(target=database_main, args=(master_queue,))
arduino_thread.start()
db_thread.start()
arduino_thread.join()
db_thread.join()
if __name__ == "__main__":
main()
|
handlers.py
|
# Copyright 2001-2015 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2015 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
try:
import threading
except ImportError: #pragma: no cover
threading = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
self._connect_unixsocket(address)
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.socktype = socktype
self.formatter = None
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host, context=self.context)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
try:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
test_tune_restore.py
|
# coding: utf-8
import signal
from collections import Counter
import os
import shutil
import tempfile
import time
import unittest
import skopt
import numpy as np
from hyperopt import hp
from nevergrad.optimization import optimizerlib
from zoopt import ValueType
from hebo.design_space.design_space import DesignSpace as HEBODesignSpace
import ray
from ray import tune
from ray.test_utils import recursive_fnmatch
from ray.rllib import _register_all
from ray.tune.callback import Callback
from ray.tune.suggest.basic_variant import BasicVariantGenerator
from ray.tune.suggest import ConcurrencyLimiter, Searcher
from ray.tune.suggest.hyperopt import HyperOptSearch
from ray.tune.suggest.dragonfly import DragonflySearch
from ray.tune.suggest.bayesopt import BayesOptSearch
from ray.tune.suggest.flaml import CFO
from ray.tune.suggest.skopt import SkOptSearch
from ray.tune.suggest.nevergrad import NevergradSearch
from ray.tune.suggest.optuna import OptunaSearch, param as ot_param
from ray.tune.suggest.sigopt import SigOptSearch
from ray.tune.suggest.zoopt import ZOOptSearch
from ray.tune.suggest.hebo import HEBOSearch
from ray.tune.utils import validate_save_restore
from ray.tune.utils._mock_trainable import MyTrainableClass
class TuneRestoreTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=1, num_gpus=0, local_mode=True)
tmpdir = tempfile.mkdtemp()
test_name = "TuneRestoreTest"
tune.run(
"PG",
name=test_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=tmpdir,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
logdir = os.path.expanduser(os.path.join(tmpdir, test_name))
self.logdir = logdir
self.checkpoint_path = recursive_fnmatch(logdir, "checkpoint-1")[0]
def tearDown(self):
shutil.rmtree(self.logdir)
ray.shutdown()
_register_all()
def testTuneRestore(self):
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2}, # train one more iteration.
checkpoint_freq=1,
restore=self.checkpoint_path, # Restore the checkpoint
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
def testPostRestoreCheckpointExistence(self):
"""Tests that checkpoint restored from is not deleted post-restore."""
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2},
checkpoint_freq=1,
keep_checkpoints_num=1,
restore=self.checkpoint_path,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
self.assertTrue(os.path.isfile(self.checkpoint_path))
class TuneInterruptionTest(unittest.TestCase):
def setUp(self) -> None:
# Wait up to five seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
def testExperimentInterrupted(self):
import multiprocessing
trainer_semaphore = multiprocessing.Semaphore()
driver_semaphore = multiprocessing.Semaphore()
class SteppingCallback(Callback):
def on_step_end(self, iteration, trials, **info):
driver_semaphore.release() # Driver should continue
trainer_semaphore.acquire() # Wait until released
def _run(local_dir):
def _train(config):
for i in range(7):
tune.report(val=i)
tune.run(
_train,
local_dir=local_dir,
name="interrupt",
callbacks=[SteppingCallback()])
local_dir = tempfile.mkdtemp()
process = multiprocessing.Process(target=_run, args=(local_dir, ))
process.daemon = False
process.start()
exp_dir = os.path.join(local_dir, "interrupt")
# Skip first five steps
for i in range(5):
driver_semaphore.acquire() # Wait for callback
trainer_semaphore.release() # Continue training
driver_semaphore.acquire()
experiment_state_file = None
for file in os.listdir(exp_dir):
if file.startswith("experiment_state"):
experiment_state_file = os.path.join(exp_dir, file)
break
self.assertTrue(experiment_state_file)
last_mtime = os.path.getmtime(experiment_state_file)
# Now send kill signal
os.kill(process.pid, signal.SIGINT)
# Release trainer. It should handle the signal and try to
# checkpoint the experiment
trainer_semaphore.release()
time.sleep(2) # Wait for checkpoint
new_mtime = os.path.getmtime(experiment_state_file)
self.assertNotEqual(last_mtime, new_mtime)
shutil.rmtree(local_dir)
class TuneFailResumeGridTest(unittest.TestCase):
class FailureInjectorCallback(Callback):
"""Adds random failure injection to the TrialExecutor."""
def __init__(self, steps=20):
self._step = 0
self.steps = steps
def on_trial_start(self, trials, **info):
self._step += 1
if self._step >= self.steps:
print(f"Failing after step {self._step} with "
f"{len(trials)} trials")
raise RuntimeError
class CheckStateCallback(Callback):
"""Checks state for the experiment initialization."""
def __init__(self, expected_trials=20):
self.expected_trials = expected_trials
self._checked = False
def on_step_begin(self, iteration, trials, **kwargs):
if not self._checked:
assert len(trials) == self.expected_trials
self._checked = True
def setUp(self):
self.logdir = tempfile.mkdtemp()
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "0"
# Wait up to 1.5 seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "1.5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
# Change back to local_mode=True after this is resolved:
# https://github.com/ray-project/ray/issues/13932
ray.init(local_mode=False, num_cpus=2)
from ray.tune import register_trainable
register_trainable("trainable", MyTrainableClass)
def tearDown(self):
os.environ.pop("TUNE_GLOBAL_CHECKPOINT_S")
shutil.rmtree(self.logdir)
ray.shutdown()
def testFailResumeGridSearch(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback()],
**config)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback()],
**config)
assert len(analysis.trials) == 27
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert all(v == 9 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert all(v == 9 for v in test2_counter.values())
def testFailResumeWithPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(points_to_evaluate=[{
"test": -1,
"test2": -1
}, {
"test": -1
}, {
"test2": -1
}])
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(5)],
search_alg=search_alg,
**config)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=5)],
search_alg=search_alg,
**config)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testFailResumeAfterPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(points_to_evaluate=[{
"test": -1,
"test2": -1
}, {
"test": -1
}, {
"test2": -1
}])
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(15)],
search_alg=search_alg,
**config)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=15)],
search_alg=search_alg,
**config)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testMultiExperimentFail(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
experiments = []
for i in range(3):
experiments.append(
tune.Experiment(
run=MyTrainableClass,
name="trainable",
num_samples=2,
config={
"test": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 1},
local_dir=self.logdir))
with self.assertRaises(RuntimeError):
tune.run(
experiments,
callbacks=[self.FailureInjectorCallback(10)],
fail_fast=True)
analysis = tune.run(
experiments,
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=10)],
fail_fast=True)
assert len(analysis.trials) == 18
def testWarningLargeGrid(self):
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search(list(range(20))),
"test2": tune.grid_search(list(range(20))),
"test3": tune.grid_search(list(range(20))),
"test4": tune.grid_search(list(range(20))),
"test5": tune.grid_search(list(range(20))),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1)
with self.assertWarnsRegex(UserWarning,
"exceeds the serialization threshold"):
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(10)],
**config)
class TuneExampleTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=2)
def tearDown(self):
ray.shutdown()
_register_all()
def testPBTKeras(self):
from ray.tune.examples.pbt_tune_cifar10_with_keras import Cifar10Model
from tensorflow.python.keras.datasets import cifar10
cifar10.load_data()
validate_save_restore(Cifar10Model)
validate_save_restore(Cifar10Model, use_object_store=True)
def testPyTorchMNIST(self):
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
from torchvision import datasets
datasets.MNIST("~/data", train=True, download=True)
validate_save_restore(TrainMNIST)
validate_save_restore(TrainMNIST, use_object_store=True)
def testHyperbandExample(self):
from ray.tune.examples.hyperband_example import MyTrainableClass
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
def testAsyncHyperbandExample(self):
from ray.tune.utils.mock import MyTrainableClass
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
class AutoInitTest(unittest.TestCase):
def testTuneRestore(self):
self.assertFalse(ray.is_initialized())
tune.run("__fake", name="TestAutoInit", stop={"training_iteration": 1})
self.assertTrue(ray.is_initialized())
def tearDown(self):
ray.shutdown()
_register_all()
class AbstractWarmStartTest:
def setUp(self):
ray.init(num_cpus=1, local_mode=True)
self.tmpdir = tempfile.mkdtemp()
self.experiment_name = "results"
def tearDown(self):
shutil.rmtree(self.tmpdir)
ray.shutdown()
_register_all()
def set_basic_conf(self):
raise NotImplementedError()
def run_part_from_scratch(self):
np.random.seed(162)
search_alg, cost = self.set_basic_conf()
search_alg = ConcurrencyLimiter(search_alg, 1)
results_exp_1 = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
verbose=0,
name=self.experiment_name,
local_dir=self.tmpdir)
checkpoint_path = os.path.join(self.tmpdir, "warmStartTest.pkl")
search_alg.save(checkpoint_path)
return results_exp_1, np.random.get_state(), checkpoint_path
def run_from_experiment_restore(self, random_state):
search_alg, cost = self.set_basic_conf()
search_alg = ConcurrencyLimiter(search_alg, 1)
search_alg.restore_from_dir(
os.path.join(self.tmpdir, self.experiment_name))
results = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
verbose=0,
name=self.experiment_name,
local_dir=self.tmpdir)
return results
def run_explicit_restore(self, random_state, checkpoint_path):
np.random.set_state(random_state)
search_alg2, cost = self.set_basic_conf()
search_alg2 = ConcurrencyLimiter(search_alg2, 1)
search_alg2.restore(checkpoint_path)
return tune.run(cost, num_samples=5, search_alg=search_alg2, verbose=0)
def run_full(self):
np.random.seed(162)
search_alg3, cost = self.set_basic_conf()
search_alg3 = ConcurrencyLimiter(search_alg3, 1)
return tune.run(
cost, num_samples=10, search_alg=search_alg3, verbose=0)
def testWarmStart(self):
results_exp_1, r_state, checkpoint_path = self.run_part_from_scratch()
results_exp_2 = self.run_explicit_restore(r_state, checkpoint_path)
results_exp_3 = self.run_full()
trials_1_config = [trial.config for trial in results_exp_1.trials]
trials_2_config = [trial.config for trial in results_exp_2.trials]
trials_3_config = [trial.config for trial in results_exp_3.trials]
self.assertEqual(trials_1_config + trials_2_config, trials_3_config)
def testRestore(self):
results_exp_1, r_state, checkpoint_path = self.run_part_from_scratch()
results_exp_2 = self.run_from_experiment_restore(r_state)
results_exp_3 = self.run_full()
trials_1_config = [trial.config for trial in results_exp_1.trials]
trials_2_config = [trial.config for trial in results_exp_2.trials]
trials_3_config = [trial.config for trial in results_exp_3.trials]
self.assertEqual(trials_1_config + trials_2_config, trials_3_config)
class HyperoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = {
"x": hp.uniform("x", 0, 10),
"y": hp.uniform("y", -10, 10),
"z": hp.uniform("z", -10, 0)
}
def cost(space, reporter):
loss = space["x"]**2 + space["y"]**2 + space["z"]**2
reporter(loss=loss)
search_alg = HyperOptSearch(
space,
metric="loss",
mode="min",
random_state_seed=5,
n_initial_points=1,
max_concurrent=1000 # Here to avoid breaking back-compat.
)
return search_alg, cost
class BayesoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self, analysis=None):
space = {"width": (0, 20), "height": (-100, 100)}
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = BayesOptSearch(
space, metric="loss", mode="min", analysis=analysis)
return search_alg, cost
def testBootStrapAnalysis(self):
analysis = self.run_full()
search_alg3, cost = self.set_basic_conf(analysis)
search_alg3 = ConcurrencyLimiter(search_alg3, 1)
tune.run(cost, num_samples=10, search_alg=search_alg3, verbose=0)
class CFOWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = {
"height": tune.uniform(-100, 100),
"width": tune.randint(0, 100),
}
def cost(param, reporter):
reporter(loss=(param["height"] - 14)**2 - abs(param["width"] - 3))
search_alg = CFO(
space=space,
metric="loss",
mode="min",
seed=20,
)
return search_alg, cost
class SkoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
optimizer = skopt.Optimizer([(0, 20), (-100, 100)])
previously_run_params = [[10, 0], [15, -20]]
known_rewards = [-189, -1144]
def cost(space, reporter):
reporter(loss=(space["height"]**2 + space["width"]**2))
search_alg = SkOptSearch(
optimizer,
["width", "height"],
metric="loss",
mode="min",
max_concurrent=1000, # Here to avoid breaking back-compat.
points_to_evaluate=previously_run_params,
evaluated_rewards=known_rewards)
return search_alg, cost
class NevergradWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
instrumentation = 2
parameter_names = ["height", "width"]
optimizer = optimizerlib.OnePlusOne(instrumentation)
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = NevergradSearch(
optimizer,
parameter_names,
metric="loss",
mode="min",
max_concurrent=1000, # Here to avoid breaking back-compat.
)
return search_alg, cost
class OptunaWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
from optuna.samplers import TPESampler
space = [
ot_param.suggest_uniform("width", 0, 20),
ot_param.suggest_uniform("height", -100, 100)
]
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = OptunaSearch(
space, sampler=TPESampler(seed=10), metric="loss", mode="min")
return search_alg, cost
class DragonflyWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
from dragonfly.opt.gp_bandit import EuclideanGPBandit
from dragonfly.exd.experiment_caller import EuclideanFunctionCaller
from dragonfly import load_config
def cost(space, reporter):
height, width = space["point"]
reporter(loss=(height - 14)**2 - abs(width - 3))
domain_vars = [{
"name": "height",
"type": "float",
"min": -10,
"max": 10
}, {
"name": "width",
"type": "float",
"min": 0,
"max": 20
}]
domain_config = load_config({"domain": domain_vars})
func_caller = EuclideanFunctionCaller(
None, domain_config.domain.list_of_domains[0])
optimizer = EuclideanGPBandit(func_caller, ask_tell_mode=True)
search_alg = DragonflySearch(
optimizer,
metric="loss",
mode="min",
max_concurrent=1000, # Here to avoid breaking back-compat.
)
return search_alg, cost
@unittest.skip("Skip because this doesn't seem to work.")
def testWarmStart(self):
pass
@unittest.skip("Skip because this doesn't seem to work.")
def testRestore(self):
pass
class SigOptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = [
{
"name": "width",
"type": "int",
"bounds": {
"min": 0,
"max": 20
},
},
{
"name": "height",
"type": "int",
"bounds": {
"min": -100,
"max": 100
},
},
]
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
# Unfortunately, SigOpt doesn't allow setting of random state. Thus,
# we always end up with different suggestions, which is unsuitable
# for the warm start test. Here we make do with points_to_evaluate,
# and ensure that state is preserved over checkpoints and restarts.
points = [
{
"width": 5,
"height": 20
},
{
"width": 10,
"height": -20
},
{
"width": 15,
"height": 30
},
{
"width": 5,
"height": -30
},
{
"width": 10,
"height": 40
},
{
"width": 15,
"height": -40
},
{
"width": 5,
"height": 50
},
{
"width": 10,
"height": -50
},
{
"width": 15,
"height": 60
},
{
"width": 12,
"height": -60
},
]
search_alg = SigOptSearch(
space,
name="SigOpt Example Experiment",
max_concurrent=1,
metric="loss",
mode="min",
points_to_evaluate=points)
return search_alg, cost
def testWarmStart(self):
if "SIGOPT_KEY" not in os.environ:
self.skipTest("No SigOpt API key found in environment.")
return
super().testWarmStart()
def testRestore(self):
if "SIGOPT_KEY" not in os.environ:
self.skipTest("No SigOpt API key found in environment.")
return
super().testRestore()
class ZOOptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
dim_dict = {
"height": (ValueType.CONTINUOUS, [-100, 100], 1e-2),
"width": (ValueType.DISCRETE, [0, 20], False)
}
def cost(param, reporter):
reporter(loss=(param["height"] - 14)**2 - abs(param["width"] - 3))
search_alg = ZOOptSearch(
algo="Asracos", # only support ASRacos currently
budget=200,
dim_dict=dim_dict,
metric="loss",
mode="min")
return search_alg, cost
class HEBOWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space_config = [
{
"name": "width",
"type": "num",
"lb": 0,
"ub": 20
},
{
"name": "height",
"type": "num",
"lb": -100,
"ub": 100
},
]
space = HEBODesignSpace().parse(space_config)
def cost(param, reporter):
reporter(loss=(param["height"] - 14)**2 - abs(param["width"] - 3))
search_alg = HEBOSearch(
space=space, metric="loss", mode="min", random_state_seed=5)
return search_alg, cost
class SearcherTest(unittest.TestCase):
class MockSearcher(Searcher):
def __init__(self, data):
self.data = data
def save(self, path):
with open(path, "w") as f:
f.write(self.data)
def restore(self, path):
with open(path, "r") as f:
self.data = f.read()
def testSaveRestoreDir(self):
tmpdir = tempfile.mkdtemp()
original_data = "hello-its-me"
searcher = self.MockSearcher(original_data)
searcher.save_to_dir(tmpdir)
searcher_2 = self.MockSearcher("no-its-not-me")
searcher_2.restore_from_dir(tmpdir)
assert searcher_2.data == original_data
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
|
runtests.py
|
#!/usr/bin/env python
from __future__ import print_function
import atexit
import base64
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
from collections import defaultdict
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
IS_PY2 = sys.version_info[0] < 3
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see https://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro and getattr(numpy, '__version__', '') not in ('1.19.0', '1.19.1'):
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_gdb_extension(ext, _has_gdb=[None]):
# We should probably also check for Python support.
if not include_debugger:
_has_gdb[0] = False
if _has_gdb[0] is None:
try:
subprocess.check_call(["gdb", "--version"])
except (IOError, subprocess.CalledProcessError):
_has_gdb[0] = False
else:
_has_gdb[0] = True
if not _has_gdb[0]:
return EXCLUDE_EXT
return ext
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
from distutils import ccompiler
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:gdb': update_gdb_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
'tag:py3only': exclude_extension_in_pyver((2, 7)),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
'run.different_package_names',
'run.unicode_imports', # encoding problems on appveyor in Py2
'run.reimport_failure', # reimports don't do anything in Py2
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2',
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'build.cythonize_pep420_namespace',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
'run.qualname',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
'run.time_pxd', # _PyTime_GetSystemClock doesn't exist in 3.4
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x)) # needs lambda to set function attribute
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self, encoding=None):
self.output = []
self.encoding = encoding
def write(self, value):
if self.encoding:
value = value.encode('ISO-8859-1').decode(self.encoding)
self.output.append(value)
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items(), key=operator.itemgetter(1), reverse=True):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.doctest_selector = re.compile(options.only_pattern).search if options.only_pattern else None
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
self.capture = options.capture
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
# TODO: parallelise I/O with a thread pool for the different directories once we drop Py2 support
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if (sys.platform not in ['win32'] and self.add_embedded_test
# the embedding test is currently broken in Py3.8+, except on Linux.
and (sys.version_info < (3, 8) or sys.platform != 'darwin')):
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir,
self.cleanup_workdir, stats=self.stats,
capture=self.capture))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if 'c' in languages and skip_c(tags):
languages = list(languages)
languages.remove('c')
if 'cpp' in languages and 'no-cpp' in tags['tag']:
languages = list(languages)
languages.remove('cpp')
if not languages:
return []
language_levels = [2, 3] if 'all_language_levels' in tags['tag'] else [None]
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
try:
pythran_ext = pythran.config.make_extension(python=True)
except TypeError: # old pythran version syntax
pythran_ext = pythran.config.make_extension()
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list
for language_level in language_levels
]
return tests
def build_test(self, test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % (preparse,)
if language_level:
workdir += '_cy%d' % (language_level,)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
doctest_selector=self.doctest_selector,
fork=self.fork,
language_level=language_level or self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False, doctest_selector=None,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.doctest_selector = doctest_selector
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s%s) %s" % (
self.language,
"/cy2" if self.language_level == 2 else "/cy3" if self.language_level == 3 else "",
"/pythran" if self.pythran_dir is not None else "",
self.description_name()
)
def description_name(self):
return self.name
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in (
'warning_errors',
'clear_to_none',
'error_on_unknown_names',
'error_on_uninitialized',
# 'cache_builtins', # not currently supported due to incorrect global caching
)
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
ext = os.path.splitext(rmfile)[1]
if not cleanup_c_files:
# Keep C, C++ files, header files, preprocessed sources
# and assembly sources (typically the .i and .s files
# are intentionally generated when -save-temps is given)
if ext in (".c", ".cpp", ".h", ".i", ".ii", ".s"):
continue
if ext == ".html" and rmfile.startswith(self.module):
continue
is_shared_obj = ext in (".so", ".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
from Cython.Utils import detect_opened_file_encoding
with io_open(source_file, 'rb') as f:
# encoding is passed to ErrorWriter but not used on the source
# since it is sometimes deliberately wrong
encoding = detect_opened_file_encoding(f, default=None)
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter(encoding=encoding)
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter(encoding=encoding)
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Options import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Options import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
# Compile with -DCYTHON_CLINE_IN_TRACEBACK=1 unless we have
# the "traceback" tag
if 'traceback' not in self.tags['tag']:
extension.define_macros.append(("CYTHON_CLINE_IN_TRACEBACK", 1))
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
if IS_PY2:
workdir = str(workdir) # work around type check in distutils that disallows unicode strings
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if so_path and not stderr:
# normal success case => ignore non-error compiler output
stdout = None
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def description_name(self):
return self.name if self.cython_only else "and running %s" % self.name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
failures, errors, skipped = len(result.failures), len(result.errors), len(result.skipped)
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
if self.doctest_selector is not None:
tests._tests[:] = [test for test in tests._tests if self.doctest_selector(test.id())]
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
if partial_result is not None:
with open(result_file, 'wb') as output:
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception(
"Tests in module '%s' were unexpectedly killed by signal %d, see test output for details." % (
module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
try:
with open(result_file, 'rb') as f:
PartialTestResult.join_results(result, pickle.load(f))
except Exception:
raise Exception(
"Failed to load test result from test in module '%s' after exit status %d,"
" see test output for details." % (module_name, result_code))
if result_code:
raise Exception(
"Tests in module '%s' exited with status %d, see test output for details." % (
module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run([
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
])
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
result.skipped.extend(skipped)
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.description_name())
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "setup.cfg")
if not os.path.exists(config_file):
config_file = os.path.join(os.path.dirname(__file__), "setup.cfg")
paths = []
for codedir in ['Cython', 'Demos', 'docs', 'pyximport', 'tests']:
paths += glob.glob(os.path.join(self.cython_dir, codedir + "/**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
excludelist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in excludelist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None, capture=True):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
self.capture = capture
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir, self.cython_root)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + self.workdir + os.pathsep + old_path
env['PYTHONPATH'] = new_path
if not env.get("PYTHONIOENCODING"):
env["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
cmd = []
out = []
err = []
for command_no, command in enumerate(self.commands, 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if 'setup.py' in command else 'etoe-run'):
if self.capture:
p = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
_out, _err = p.communicate()
res = p.returncode
else:
p = subprocess.call(command, env=env)
_out, _err = b'', b''
res = p
cmd.append(command)
out.append(_out)
err.append(_err)
if res == 0 and b'REFNANNY: ' in _out:
res = -1
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = os.path.abspath(os.path.join('..', '..', 'cython.py'))
try:
subprocess.check_output([
"make",
"PYTHON='%s'" % sys.executable,
"CYTHON='%s'" % cython,
"LIBDIR1='%s'" % libdir,
"paths", "test",
])
except subprocess.CalledProcessError as err:
print(err.output.decode())
raise
self.assertTrue(True) # :)
def load_listfile(filename):
# just re-use the FileListExclude implementation
fle = FileListExcluder(filename)
return list(fle.excludes)
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, module_name in deps.items():
try:
module = __import__(module_name)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
print("Test dependency not found: '%s'" % module_name)
else:
version = self.find_dep_version(module_name, module)
print("Test dependency found: '%s' version %s" % (module_name, version))
self.tests_missing_deps = []
def find_dep_version(self, name, module):
try:
version = module.__version__
except AttributeError:
stdlib_dir = os.path.dirname(shutil.__file__) + os.sep
module_path = getattr(module, '__file__', stdlib_dir) # no __file__? => builtin stdlib module
if module_path.startswith(stdlib_dir):
# stdlib module
version = sys.version.partition(' ')[0]
elif '.' in name:
# incrementally look for a parent package with version
name = name.rpartition('.')[0]
return self.find_dep_version(name, __import__(name))
else:
version = '?.?'
return version
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = any(string_selector(ex)(testname) for ex in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
# Random seed to improve the hash distribution.
_seed = base64.b64decode(b'2ged1EtsGz/GkisJr22UcLeP6n9XIaA5Vby2wM49Wvg=')
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=IS_PY2):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(self._seed + testname) & 0x7fffffff if _is_py2 else _hash(self._seed + testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.current_thread()
blocking_threads = []
for t in threading.enumerate():
if not t.is_alive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.is_alive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
# Set an environment variable to the top directory
os.environ['CYTHON_PROJECT_DIR'] = os.path.abspath(os.path.dirname(__file__))
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("--listfile", dest="listfile",
action="append",
help="specify a file containing a list of tests to run")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-k", dest="only_pattern",
help="a regex pattern for selecting doctests and test functions in the test modules")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
parser.add_option("--no-capture", dest="capture", default=True, action="store_false",
help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive")
parser.add_option("--limited-api", dest="limited_api", default=False, action="store_true",
help="Compiles Cython using CPython's LIMITED_API")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.listfile:
for listfile in options.listfile:
cmd_args.extend(load_listfile(listfile))
if options.capture and not options.for_debugging:
keep_alive_interval = 10
else:
keep_alive_interval = None
if options.shard_count > 1 and options.shard_num == -1:
if "PYTHONIOENCODING" not in os.environ:
# Make sure subprocesses can print() Unicode text.
os.environ["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
error_shards = []
failure_outputs = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread(interval=keep_alive_interval):
for shard_num, shard_stats, return_code, failure_output in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
error_shards.append(shard_num)
failure_outputs.append(failure_output)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if error_shards:
sys.stderr.write("Errors found in shards %s\n" % ", ".join([str(e) for e in error_shards]))
for failure_output in zip(error_shards, failure_outputs):
sys.stderr.write("\nErrors from shard %s:\n%s" % failure_output)
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread(interval=keep_alive_interval):
_, stats, return_code, _ = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
if not interval or interval < 0:
# Do nothing
yield
return
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.datetime.now
stop = False
# We capture stderr in some places.
# => make sure we write to the real (original) stderr of the test runner.
stderr = os.dup(2)
def write(s):
os.write(stderr, s if type(s) is bytes else s.encode('ascii'))
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
os.close(stderr)
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Options import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
# faulthandler should be able to provide a limited traceback
# in the event of a segmentation fault. Hopefully better than Travis
# just keeping running until timeout. Only available on Python 3.3+
try:
import faulthandler
except ImportError:
pass # OK - not essential
else:
faulthandler.enable()
if sys.platform == "win32" and sys.version_info < (3, 6):
# enable Unicode console output, if possible
try:
import win_unicode_console
except ImportError:
pass
else:
win_unicode_console.enable()
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Options import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY=1")
if options.limited_api:
CFLAGS.append("-DCYTHON_LIMITED_API=1")
CFLAGS.append('-Wno-unused-function')
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('pypy2_bugs.txt', IS_PYPY and IS_PY2),
('pypy_crash_bugs.txt', IS_PYPY),
('pypy_implementation_detail_bugs.txt', IS_PYPY),
('limited_api_bugs.txt', options.limited_api),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
examples_workdir = os.path.join(WORKDIR, 'examples')
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, examples_workdir, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
result_code = 0 if options.exit_ok else not result.wasSuccessful()
if xml_output_dir:
failure_output = ""
else:
failure_output = "".join(collect_failure_output(result))
return options.shard_num, stats, result_code, failure_output
def collect_failure_output(result):
"""Extract test error/failure output from a TextTestResult."""
failure_output = []
for flavour, errors in (("ERROR", result.errors), ("FAIL", result.failures)):
for test, err in errors:
failure_output.append("%s\n%s: %s\n%s\n%s\n" % (
result.separator1,
flavour, result.getDescription(test),
result.separator2,
err))
return failure_output
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
basic_ops.py
|
import json
from random import choice, randint
from threading import Thread
from BucketLib.BucketOperations import BucketHelper
from BucketLib.bucket import Bucket
from Cb_constants import constants, CbServer, DocLoading
from basetestcase import ClusterSetup
from cb_tools.cbepctl import Cbepctl
from cb_tools.cbstats import Cbstats
from cb_tools.mc_stat import McStat
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import DurabilityHelper
from error_simulation.cb_error import CouchbaseError
from mc_bin_client import MemcachedClient, MemcachedError
from remote.remote_util import RemoteMachineShellConnection
from sdk_client3 import SDKClient
from sdk_exceptions import SDKException
from table_view import TableView
"""
Capture basic get, set operations, also the meta operations.
This is based on some 4.1.1 test which had separate
bugs with incr and delete with meta and I didn't see an obvious home for them.
This is small now but we will reactively add things
These may be parameterized by:
- full and value eviction
- DGM and non-DGM
"""
class basic_ops(ClusterSetup):
def setUp(self):
super(basic_ops, self).setUp()
self.create_bucket()
self.doc_ops = self.input.param("doc_ops", "").split(";")
self.observe_test = self.input.param("observe_test", False)
# Scope/collection name can be default or create a random one to test
self.scope_name = self.input.param("scope", CbServer.default_scope)
self.collection_name = self.input.param("collection",
CbServer.default_collection)
# Create Scope/Collection with random names if not equal to default
if self.scope_name != CbServer.default_scope:
self.scope_name = self.bucket_util.get_random_name()
self.bucket_util.create_scope(self.cluster.master,
self.bucket_util.buckets[0],
{"name": self.scope_name})
if self.collection_name != CbServer.default_collection:
self.collection_name = self.bucket_util.get_random_name()
self.bucket_util.create_collection(self.cluster.master,
self.bucket_util.buckets[0],
self.scope_name,
{"name": self.collection_name,
"num_items": self.num_items})
self.log.info("Using scope::collection - '%s::%s'"
% (self.scope_name, self.collection_name))
# Update required num_items under default collection
self.bucket_util.buckets[0] \
.scopes[self.scope_name] \
.collections[self.collection_name] \
.num_items = self.num_items
self.durability_helper = DurabilityHelper(
self.log, len(self.cluster.nodes_in_cluster),
durability=self.durability_level,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
# Create sdk_clients for pool
if self.sdk_client_pool:
self.log.info("Creating SDK client pool")
self.sdk_client_pool.create_clients(
self.bucket_util.buckets[0],
[self.cluster.master],
req_clients=self.sdk_pool_capacity,
compression_settings=self.sdk_compression)
self.bucket_util.print_bucket_stats()
self.log.info("==========Finished Basic_ops base setup========")
def tearDown(self):
super(basic_ops, self).tearDown()
def do_basic_ops(self):
KEY_NAME = 'key1'
KEY_NAME2 = 'key2'
self.log.info('Starting basic ops')
default_bucket = self.bucket_util.get_all_buckets()[0]
sdk_client = SDKClient([self.cluster.master],
default_bucket,
compression_settings=self.sdk_compression)
# mcd = client.memcached(KEY_NAME)
# MB-17231 - incr with full eviction
rc = sdk_client.incr(KEY_NAME, delta=1)
self.log.info('rc for incr: {0}'.format(rc))
# MB-17289 del with meta
rc = sdk_client.set(KEY_NAME, 0, 0,
json.dumps({'value': 'value2'}))
self.log.info('set is: {0}'.format(rc))
# cas = rc[1]
# wait for it to persist
persisted = 0
while persisted == 0:
opaque, rep_time, persist_time, persisted, cas = \
sdk_client.observe(KEY_NAME)
try:
rc = sdk_client.evict_key(KEY_NAME)
except MemcachedError as exp:
self.fail("Exception with evict meta - {0}".format(exp))
CAS = 0xabcd
try:
# key, exp, flags, seqno, cas
rc = mcd.del_with_meta(KEY_NAME2, 0, 0, 2, CAS)
except MemcachedError as exp:
self.fail("Exception with del_with meta - {0}".format(exp))
# Reproduce test case for MB-28078
def do_setWithMeta_twice(self):
mc = MemcachedClient(self.cluster.master.ip,
constants.memcached_port)
mc.sasl_auth_plain(self.cluster.master.rest_username,
self.cluster.master.rest_password)
mc.bucket_select('default')
try:
mc.setWithMeta('1', '{"Hello":"World"}', 3600, 0, 1,
0x1512a3186faa0000)
except MemcachedError as error:
self.log.info("<MemcachedError #%d ``%s''>"
% (error.status, error.message))
self.fail("Error on First setWithMeta()")
stats = mc.stats()
self.log.info('curr_items: {0} and curr_temp_items:{1}'
.format(stats['curr_items'], stats['curr_temp_items']))
self.sleep(5, "Wait before checking the stats")
stats = mc.stats()
self.log.info('curr_items: {0} and curr_temp_items:{1}'
.format(stats['curr_items'], stats['curr_temp_items']))
try:
mc.setWithMeta('1', '{"Hello":"World"}', 3600, 0, 1,
0x1512a3186faa0000)
except MemcachedError as error:
stats = mc.stats()
self.log.info('After 2nd setWithMeta(), curr_items: {} '
'and curr_temp_items: {}'
.format(stats['curr_items'],
stats['curr_temp_items']))
if int(stats['curr_temp_items']) == 1:
self.fail("Error on second setWithMeta(), "
"expected curr_temp_items to be 0")
else:
self.log.info("<MemcachedError #%d ``%s''>"
% (error.status, error.message))
def generate_docs_bigdata(self, docs_per_day, start=0,
document_size=1024000):
return doc_generator(self.key, start, docs_per_day,
key_size=self.key_size,
doc_size=document_size,
doc_type=self.doc_type,
target_vbucket=self.target_vbucket,
vbuckets=self.cluster_util.vbuckets,
randomize_doc_size=self.randomize_doc_size,
randomize_value=self.randomize_value)
def test_doc_size(self):
def check_durability_failures():
self.log.error(task.sdk_acked_curd_failed.keys())
self.log.error(task.sdk_exception_crud_succeed.keys())
self.assertTrue(
len(task.sdk_acked_curd_failed) == 0,
"Durability failed for docs: %s" % task.sdk_acked_curd_failed.keys())
self.assertTrue(
len(task.sdk_exception_crud_succeed) == 0,
"Durability failed for docs: %s" % task.sdk_acked_curd_failed.keys())
"""
Basic tests for document CRUD operations using JSON docs
"""
doc_op = self.input.param("doc_op", None)
def_bucket = self.bucket_util.buckets[0]
ignore_exceptions = list()
retry_exceptions = list()
supported_d_levels = self.bucket_util.get_supported_durability_levels()
# Stat validation reference variables
verification_dict = dict()
verification_dict["ops_create"] = 0
verification_dict["ops_update"] = 0
verification_dict["ops_delete"] = 0
verification_dict["rollback_item_count"] = 0
verification_dict["sync_write_aborted_count"] = 0
verification_dict["sync_write_committed_count"] = 0
if self.target_vbucket and type(self.target_vbucket) is not list:
self.target_vbucket = [self.target_vbucket]
self.log.info("Creating doc_generator..")
# Load basic docs into bucket
doc_create = doc_generator(
self.key, 0, self.num_items, key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type, target_vbucket=self.target_vbucket,
vbuckets=self.cluster_util.vbuckets,
randomize_doc_size=self.randomize_doc_size,
randomize_value=self.randomize_value)
self.log.info("Loading {0} docs into the bucket: {1}"
.format(self.num_items, def_bucket))
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_create,
DocLoading.Bucket.DocOps.CREATE, 0,
batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
ryow=self.ryow,
check_persistence=self.check_persistence,
scope=self.scope_name,
collection=self.collection_name,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
if self.ryow:
check_durability_failures()
# Retry doc_exception code
self.log.info("Validating failed doc's (if any) exceptions")
doc_op_info_dict = dict()
doc_op_info_dict[task] = self.bucket_util.get_doc_op_info_dict(
def_bucket, DocLoading.Bucket.DocOps.CREATE,
exp=0, replicate_to=self.replicate_to,
persist_to=self.persist_to, durability=self.durability_level,
timeout=self.sdk_timeout, time_unit="seconds",
ignore_exceptions=ignore_exceptions,
retry_exceptions=retry_exceptions)
self.bucket_util.verify_doc_op_task_exceptions(doc_op_info_dict,
self.cluster,
self.sdk_client_pool)
if len(doc_op_info_dict[task]["unwanted"]["fail"].keys()) != 0:
self.fail("Failures in retry doc CRUDs: {0}"
.format(doc_op_info_dict[task]["unwanted"]["fail"]))
self.log.info("Wait for ep_all_items_remaining to become '0'")
self.bucket_util._wait_for_stats_all_buckets()
# Update ref_val
verification_dict["ops_create"] += \
self.num_items - len(task.fail.keys())
# Validate vbucket stats
if self.durability_level in supported_d_levels:
verification_dict["sync_write_committed_count"] += self.num_items
failed = self.durability_helper.verify_vbucket_details_stats(
def_bucket, self.cluster_util.get_kv_nodes(),
vbuckets=self.cluster_util.vbuckets,
expected_val=verification_dict)
if failed:
self.fail("Cbstat vbucket-details verification failed")
# Verify initial doc load count
self.log.info("Validating doc_count in buckets")
self.bucket_util.validate_doc_count_as_per_collections(def_bucket)
self.log.info("Creating doc_generator for doc_op")
num_item_start_for_crud = int(self.num_items / 2)
doc_update = doc_generator(
self.key, 0, num_item_start_for_crud,
key_size=self.key_size,
doc_size=self.doc_size, doc_type=self.doc_type,
target_vbucket=self.target_vbucket,
vbuckets=self.cluster_util.vbuckets,
mutate=1,
randomize_doc_size=self.randomize_doc_size,
randomize_value=self.randomize_value)
if self.target_vbucket:
mutation_doc_count = len(doc_update.doc_keys)
else:
mutation_doc_count = (doc_update.end - doc_update.start
+ len(task.fail.keys()))
if doc_op == DocLoading.Bucket.DocOps.UPDATE:
self.log.info("Performing 'update' mutation over the docs")
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_update,
DocLoading.Bucket.DocOps.UPDATE, 0,
batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
ryow=self.ryow,
check_persistence=self.check_persistence,
scope=self.scope_name,
collection=self.collection_name,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
verification_dict["ops_update"] += mutation_doc_count
if self.durability_level in supported_d_levels:
verification_dict["sync_write_committed_count"] \
+= mutation_doc_count
if self.ryow:
check_durability_failures()
# Read all the values to validate update operation
task = self.task.async_validate_docs(
self.cluster, def_bucket,
doc_update, DocLoading.Bucket.DocOps.UPDATE, 0,
batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
scope=self.scope_name,
collection=self.collection_name,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
elif doc_op == DocLoading.Bucket.DocOps.DELETE:
self.log.info("Performing 'delete' mutation over the docs")
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, doc_update,
DocLoading.Bucket.DocOps.DELETE, 0,
batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
ryow=self.ryow, check_persistence=self.check_persistence,
scope=self.scope_name,
collection=self.collection_name,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
if self.collection_name is None:
target_scope = CbServer.default_scope
target_collection = CbServer.default_collection
else:
target_scope = self.scope_name
target_collection = self.collection_name
def_bucket \
.scopes[target_scope] \
.collections[target_collection] \
.num_items -= (self.num_items - num_item_start_for_crud)
verification_dict["ops_delete"] += mutation_doc_count
if self.durability_level in supported_d_levels:
verification_dict["sync_write_committed_count"] \
+= mutation_doc_count
if self.ryow:
check_durability_failures()
# Read all the values to validate delete operation
task = self.task.async_validate_docs(
self.cluster, def_bucket, doc_update,
DocLoading.Bucket.DocOps.DELETE, 0,
batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
else:
self.log.warning("Unsupported doc_operation")
self.log.info("Wait for ep_all_items_remaining to become '0'")
self.bucket_util._wait_for_stats_all_buckets()
failed = self.durability_helper.verify_vbucket_details_stats(
def_bucket, self.cluster_util.get_kv_nodes(),
vbuckets=self.cluster_util.vbuckets,
expected_val=verification_dict)
if failed:
self.fail("Cbstat vbucket-details verification failed")
self.log.info("Validating doc_count")
self.bucket_util.validate_doc_count_as_per_collections(def_bucket)
def test_large_doc_size(self):
# bucket size=256MB, when Bucket gets filled 236MB then
# test starts failing document size=2MB, No of docs = 221,
# load 250 docs generate docs with size >= 1MB , See MB-29333
self.doc_size *= 1024*1024
gens_load = self.generate_docs_bigdata(
docs_per_day=self.num_items, document_size=self.doc_size)
for bucket in self.bucket_util.buckets:
task = self.task.async_load_gen_docs(
self.cluster, bucket, gens_load,
DocLoading.Bucket.DocOps.CREATE, 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
# check if all the documents(250) are loaded with default timeout
self.bucket_util.verify_stats_all_buckets(self.num_items)
def test_large_doc_20MB(self):
# test reproducer for MB-29258,
# Load a doc which is greater than 20MB
# with compression enabled and check if it fails
# check with compression_mode as active, passive and off
val_error = SDKException.ValueTooLargeException
gens_load = self.generate_docs_bigdata(
docs_per_day=1, document_size=(self.doc_size * 1024000))
for bucket in self.bucket_util.buckets:
task = self.task.async_load_gen_docs(
self.cluster, bucket, gens_load,
DocLoading.Bucket.DocOps.CREATE, 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
if self.doc_size > 20:
if len(task.fail.keys()) == 0:
self.log_failure("No failures during large doc insert")
for doc_id, doc_result in task.fail.items():
if val_error not in str(doc_result["error"]):
self.log_failure("Invalid exception for key %s: %s"
% (doc_id, doc_result))
else:
if len(task.fail.keys()) != 0:
self.log_failure("Failures during large doc insert")
for bucket in self.bucket_util.buckets:
if self.doc_size > 20:
# failed with error "Data Too Big" when document size > 20MB
self.bucket_util.verify_stats_all_buckets(0)
else:
self.bucket_util.verify_stats_all_buckets(1)
gens_update = self.generate_docs_bigdata(
docs_per_day=1, document_size=(21 * 1024000))
task = self.task.async_load_gen_docs(
self.cluster, bucket, gens_update,
DocLoading.Bucket.DocOps.UPDATE, 0,
batch_size=10,
process_concurrency=8,
replicate_to=self.replicate_to,
persist_to=self.persist_to,
durability=self.durability_level,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
if len(task.fail.keys()) != 1:
self.log_failure("Large docs inserted for keys: %s"
% task.fail.keys())
if len(task.fail.keys()) == 0:
self.log_failure("No failures during large doc insert")
for key, crud_result in task.fail.items():
if SDKException.ValueTooLargeException \
not in str(crud_result["error"]):
self.log_failure("Unexpected error for key %s: %s"
% (key, crud_result["error"]))
for doc_id, doc_result in task.fail.items():
if val_error not in str(doc_result["error"]):
self.log_failure("Invalid exception for key %s: %s"
% (doc_id, doc_result))
self.bucket_util.verify_stats_all_buckets(1)
self.validate_test_failure()
def test_parallel_cruds(self):
data_op_dict = dict()
num_items = self.num_items
half_of_num_items = self.num_items / 2
supported_d_levels = self.bucket_util.get_supported_durability_levels()
exp_values_to_test = [0, 300, 10000, 12999]
# Initial doc_loading
initial_load = doc_generator(self.key, 0, self.num_items,
doc_size=self.doc_size)
task = self.task.async_load_gen_docs(
self.cluster, self.bucket_util.buckets[0], initial_load,
DocLoading.Bucket.DocOps.CREATE, 0,
batch_size=100, process_concurrency=8,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
# Create required doc_gens and doc_op task object
for op_index, doc_op in enumerate(self.doc_ops):
if doc_op == DocLoading.Bucket.DocOps.CREATE:
num_items += half_of_num_items
gen_start = self.num_items
gen_end = self.num_items + half_of_num_items
elif doc_op == DocLoading.Bucket.DocOps.DELETE:
gen_start = 0
gen_end = half_of_num_items
else:
gen_start = half_of_num_items
gen_end = self.num_items
d_level = ""
replicate_to = persist_to = 0
if self.observe_test:
if self.num_replicas > 0:
replicate_to = randint(1, self.num_replicas)
persist_to = randint(0, self.num_replicas + 1)
else:
d_level = choice(supported_d_levels)
doc_ttl = choice(exp_values_to_test)
self.log.info("Doc_op %s, range (%d, %d), ttl=%s, "
"replicate_to=%s, persist_to=%s, d_level=%s"
% (doc_op, gen_start, gen_end, doc_ttl,
replicate_to, persist_to, d_level))
# Required to handle similar doc_ops like create,create case
dict_key = "%s_%s" % (doc_op, op_index)
data_op_dict[dict_key] = dict()
data_op_dict[dict_key]["doc_gen"] = doc_generator(
self.key, gen_start, gen_end,
doc_size=self.doc_size,
mutation_type=doc_op)
data_op_dict[dict_key]["task"] = self.task.async_load_gen_docs(
self.cluster, self.bucket_util.buckets[0],
data_op_dict[dict_key]["doc_gen"], doc_op,
exp=doc_ttl,
compression=self.sdk_compression,
persist_to=persist_to, replicate_to=replicate_to,
durability=d_level, timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool,
process_concurrency=1, batch_size=1,
print_ops_rate=False, start_task=False,
task_identifier="%s_%d" % (doc_op, op_index))
# Start all tasks
for op_index, doc_op in enumerate(self.doc_ops):
dict_key = "%s_%s" % (doc_op, op_index)
self.task_manager.add_new_task(data_op_dict[dict_key]["task"])
# Wait for doc_ops to complete and validate final doc value result
for op_index, doc_op in enumerate(self.doc_ops):
dict_key = "%s_%s" % (doc_op, op_index)
self.task_manager.get_task_result(data_op_dict[dict_key]["task"])
self.log.info("%s task completed" % doc_op)
if data_op_dict[dict_key]["task"].fail:
self.log_failure("Doc_loading failed for %s: %s"
% (doc_op,
data_op_dict[dict_key]["task"].fail))
elif doc_op in [DocLoading.Bucket.DocOps.CREATE,
DocLoading.Bucket.DocOps.UPDATE,
DocLoading.Bucket.DocOps.REPLACE,
DocLoading.Bucket.DocOps.DELETE]:
# Docs could have expired during CRUD, will get KEY_ENOENT
if data_op_dict[dict_key]["task"].exp == exp_values_to_test[1]:
continue
suppress_err_tbl = False
if doc_op == DocLoading.Bucket.DocOps.DELETE:
suppress_err_tbl = True
self.log.info("Validating %s results" % doc_op)
# Read all the values to validate doc_operation values
task = self.task.async_validate_docs(
self.cluster, self.bucket_util.buckets[0],
data_op_dict[dict_key]["doc_gen"], doc_op, 0,
batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
sdk_client_pool=self.sdk_client_pool,
suppress_error_table=suppress_err_tbl)
self.task.jython_task_manager.get_task_result(task)
self.validate_test_failure()
def test_diag_eval_curl(self):
# Check if diag/eval can be done only by local host
self.disable_diag_eval_on_non_local_host = \
self.input.param("disable_diag_eval_non_local", False)
port = self.cluster.master.port
# check if local host can work fine
cmd = []
cmd_base = 'curl http://{0}:{1}@localhost:{2}/diag/eval ' \
.format(self.cluster.master.rest_username,
self.cluster.master.rest_password, port)
command = cmd_base + '-X POST -d \'os:cmd("env")\''
cmd.append(command)
command = cmd_base + '-X POST ' \
'-d \'case file:read_file("/etc/passwd") ' \
'of {ok, B} -> io:format("~p~n", ' \
'[binary_to_term(B)]) end.\''
cmd.append(command)
shell = RemoteMachineShellConnection(self.cluster.master)
for command in cmd:
output, error = shell.execute_command(command)
self.assertNotEquals("API is accessible from localhost only",
output[0])
# Disable allow_nonlocal_eval
if not self.disable_diag_eval_on_non_local_host:
command = cmd_base + '-X POST -d \'ns_config:set(' \
'allow_nonlocal_eval, true).\''
_, _ = shell.execute_command(command)
# Check ip address on diag/eval will not work fine
# when allow_nonlocal_eval is disabled
cmd = []
cmd_base = 'curl http://{0}:{1}@{2}:{3}/diag/eval ' \
.format(self.cluster.master.rest_username,
self.cluster.master.rest_password,
self.cluster.master.ip, port)
command = cmd_base + '-X POST -d \'os:cmd("env")\''
cmd.append(command)
command = cmd_base + '-X POST ' \
'-d \'case file:read_file("/etc/passwd") ' \
'of {ok, B} -> io:format("~p~n", ' \
'[binary_to_term(B)]) end.\''
cmd.append(command)
for command in cmd:
output, error = shell.execute_command(command)
if self.disable_diag_eval_on_non_local_host:
self.assertEquals("API is accessible from localhost only",
output[0])
else:
self.assertNotEquals("API is accessible from localhost only",
output[0])
def test_MB_40967(self):
"""
1. Load initial docs into the bucket
2. Perform continuous reads until get_cmd stats breaks in
'cbstats timings' command
"""
total_gets = 0
max_gets = 2500000000
bucket = self.bucket_util.buckets[0]
doc_gen = doc_generator(self.key, 0, self.num_items,
doc_size=1)
create_task = self.task.async_load_gen_docs(
self.cluster, bucket, doc_gen, DocLoading.Bucket.DocOps.CREATE, 0,
batch_size=100, process_concurrency=self.process_concurrency,
timeout_secs=self.sdk_timeout)
self.task_manager.get_task_result(create_task)
cbstat = dict()
kv_nodes = self.cluster_util.get_kv_nodes()
for node in kv_nodes:
shell = RemoteMachineShellConnection(node)
cbstat[node] = Cbstats(shell)
self.log.info("Start doc_reads until total_gets cross: %s" % max_gets)
read_task = self.task.async_continuous_doc_ops(
self.cluster, bucket, doc_gen,
op_type=DocLoading.Bucket.DocOps.READ, batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
timeout_secs=self.sdk_timeout)
self.sleep(60, "Wait for read task to start")
while total_gets < max_gets:
total_gets = 0
for node in kv_nodes:
output, error = cbstat[node].get_timings(bucket.name)
if error:
self.log_failure("Error during cbstat timings: %s" % error)
break
get_cmd_found = False
for line in output:
if "get_cmd_" in line:
if "get_cmd_mean" in line:
break
get_cmd_found = True
if not get_cmd_found:
self.log.error(output)
self.log_failure("cbstat timings get_cmd stats not found")
break
vb_details = cbstat[node].vbucket_details(bucket.name)
for _, vb_stats in vb_details.items():
total_gets += long(vb_stats["ops_get"])
if self.test_failure:
break
self.sleep(120, "Total_gets: %s, itr: %s" % (total_gets,
read_task.itr_count))
read_task.end_task()
self.task_manager.get_task_result(read_task)
# Close all shell connections
for node in kv_nodes:
cbstat[node].shellConn.disconnect()
self.validate_test_failure()
def test_MB_41510(self):
"""
1. Load initial docs into the bucket
2. Perform continuous reads
3. Perform 'mcstat reset' in parallel to the reads
4. Perform 'cbstats timings' command to read the current values
5. Validate there is no crash when stats are getting reset continuously
"""
def reset_mcstat(bucket_name):
mc_stat = dict()
for t_node in kv_nodes:
shell_conn = RemoteMachineShellConnection(t_node)
mc_stat[t_node] = McStat(shell_conn)
while not stop_thread:
for t_node in mc_stat.keys():
try:
mc_stat[t_node].reset(bucket_name)
except Exception as mcstat_err:
self.log_failure(mcstat_err)
if self.test_failure:
break
for t_node in mc_stat.keys():
mc_stat[t_node].shellConn.disconnect()
def get_timings(bucket_name):
cb_stat = dict()
for t_node in kv_nodes:
shell_conn = RemoteMachineShellConnection(t_node)
cb_stat[t_node] = Cbstats(shell_conn)
while not stop_thread:
for t_node in cb_stat.keys():
try:
cb_stat[t_node].get_timings(bucket_name)
except Exception as cbstat_err:
self.log_failure(cbstat_err)
if self.test_failure:
break
for t_node in cb_stat.keys():
cb_stat[t_node].shellConn.disconnect()
total_gets = 0
max_gets = 50000000
stop_thread = False
bucket = self.bucket_util.buckets[0]
cb_stat_obj = dict()
kv_nodes = self.cluster_util.get_kv_nodes()
for node in self.cluster_util.get_kv_nodes():
shell = RemoteMachineShellConnection(node)
cb_stat_obj[node] = Cbstats(shell)
doc_gen = doc_generator(self.key, 0, self.num_items, doc_size=1)
create_task = self.task.async_load_gen_docs(
self.cluster, bucket, doc_gen, DocLoading.Bucket.DocOps.CREATE, 0,
batch_size=500, process_concurrency=self.process_concurrency,
timeout_secs=self.sdk_timeout)
self.task_manager.get_task_result(create_task)
mc_stat_reset_thread = Thread(target=reset_mcstat, args=[bucket.name])
get_timings_thread = Thread(target=get_timings, args=[bucket.name])
mc_stat_reset_thread.start()
get_timings_thread.start()
read_task = self.task.async_continuous_doc_ops(
self.cluster, bucket, doc_gen,
op_type=DocLoading.Bucket.DocOps.READ,
batch_size=self.batch_size,
process_concurrency=self.process_concurrency,
timeout_secs=self.sdk_timeout)
while total_gets < max_gets:
total_gets = 0
try:
for node in cb_stat_obj.keys():
vb_details = cb_stat_obj[node].vbucket_details(bucket.name)
for _, vb_stats in vb_details.items():
total_gets += long(vb_stats["ops_get"])
except Exception as err:
self.log_failure(err)
self.log.info("Total gets: %s" % total_gets)
result = self.check_coredump_exist(self.servers,
force_collect=True)
if result is True:
self.log_failure("Cb_logs validation failed")
break
elif self.test_failure:
break
self.sleep(60, "Wait before next check")
stop_thread = True
read_task.end_task()
mc_stat_reset_thread.join()
get_timings_thread.join()
# Close all shell connections
for node in cb_stat_obj.keys():
cb_stat_obj[node].shellConn.disconnect()
self.validate_test_failure()
def test_MB_41255(self):
def create_docs_with_xattr():
value = {'val': 'a' * self.doc_size}
xattr_kv = ["field", "value"]
while not stop_loader:
t_key = "%s-%s" % (self.key, self.num_items)
crud_result = client.crud(DocLoading.Bucket.DocOps.CREATE,
t_key, value, timeout=60)
if crud_result["status"] is False:
self.log_failure("Create key %s failed: %s"
% (t_key, crud_result["error"]))
break
self.num_items += 1
client.crud("subdoc_insert", t_key, xattr_kv, xattr=True)
nodes_data = dict()
stop_loader = False
non_resident_keys = list()
non_resident_keys_len = 0
self.num_items = 0
max_keys_to_del = 250
self.active_resident_threshold = \
int(self.input.param("active_resident_threshold", 99))
bucket = self.bucket_util.buckets[0]
for node in self.cluster_util.get_kv_nodes():
nodes_data[node] = dict()
nodes_data[node]["shell"] = RemoteMachineShellConnection(node)
nodes_data[node]["cbstats"] = Cbstats(nodes_data[node]["shell"])
nodes_data[node]["active_vbs"] = nodes_data[node][
"cbstats"].vbucket_list(bucket.name, "active")
nodes_data[node]["replica_vbs"] = nodes_data[node][
"cbstats"].vbucket_list(bucket.name, "replica")
bucket_helper = BucketHelper(self.cluster.master)
client = SDKClient([self.cluster.master], bucket)
self.log.info("Loading documents until %s%% DGM is achieved"
% self.active_resident_threshold)
dgm_thread = Thread(target=create_docs_with_xattr)
dgm_thread.start()
# Run doc_loading until the targeted DGM value is hit
while not stop_loader:
dgm_value = bucket_helper.fetch_bucket_stats(bucket.name)["op"][
"samples"]["vb_active_resident_items_ratio"][-1]
if dgm_value <= self.active_resident_threshold:
self.log.info("DGM value: %s" % dgm_value)
stop_loader = True
dgm_thread.join()
self.log.info("Loaded %s documents" % self.num_items)
# Wait for ep_engine_queue size to become '0'
self.bucket_util._wait_for_stats_all_buckets()
# Fetch evicted keys
self.log.info("Fetching keys evicted from replica vbs")
for doc_index in range(self.num_items):
key = "%s-%s" % (self.key, doc_index)
vb_for_key = self.bucket_util.get_vbucket_num_for_key(key)
for node, n_data in nodes_data.items():
if vb_for_key in n_data["replica_vbs"]:
stat = n_data["cbstats"].vkey_stat(bucket.name, key,
vbucket_num=vb_for_key)
if stat["is_resident"] == "false":
non_resident_keys.append(key)
non_resident_keys_len += 1
break
if non_resident_keys_len >= max_keys_to_del:
break
self.log.info("Non-resident key count: %d" % non_resident_keys_len)
# Start rebalance-out operation
rebalance_out = self.task.async_rebalance(
self.cluster.servers[0:self.nodes_init], [],
[self.cluster.servers[-1]])
self.sleep(10, "Wait for rebalance to start")
# Start deleting the evicted docs in parallel to rebalance task
self.log.info("Deleting evicted keys")
for key in non_resident_keys:
result = client.crud(DocLoading.Bucket.DocOps.DELETE, key)
if result["status"] is False:
self.log_failure("Key %s deletion failed: %s"
% (key, result["error"]))
# Wait for rebalance to complete
self.task_manager.get_task_result(rebalance_out)
# Wait for ep_engine_queue size to become '0'
self.bucket_util._wait_for_stats_all_buckets()
# Trigger compaction
self.bucket_util._run_compaction(number_of_times=1)
# Read all deleted keys (include replica read) to validate
for key in non_resident_keys:
result = client.get_from_all_replicas(key)
if result:
self.log_failure("Key '%s' exists on %d replica(s)"
% (key, len(result)))
# Close SDK and shell connections
client.close()
for node in nodes_data.keys():
nodes_data[node]["shell"].disconnect()
self.assertTrue(rebalance_out.result, "Rebalance_out failed")
self.bucket_util.verify_stats_all_buckets(self.num_items
- non_resident_keys_len)
self.validate_test_failure()
def test_MB_41405(self):
"""
1. Pick random vbucket number
2. Create, Delete doc_keys and validate on_disk_deleted counter moves
3. Fetch bloom_filter_size during first delete op and run_compaction
4. Create-delete 10K more items and run compaction again
5. Make sure current bloom_filter_size is > the value during step#3
"""
def validate_crud_result(op_type, doc_key, crud_result):
if crud_result["status"] is False:
self.log_failure("Key %s %s failed: %s"
% (doc_key, op_type, crud_result["error"]))
on_disk_deletes = 0
bloom_filter_size = None
bucket = self.bucket_util.buckets[0]
target_vb = choice(range(self.cluster_util.vbuckets))
vb_str = str(target_vb)
doc_gen = doc_generator(self.key, 0, self.num_items,
target_vbucket=[target_vb])
target_node = None
nodes_data = dict()
for node in self.cluster_util.get_kv_nodes():
nodes_data[node] = dict()
nodes_data[node]["shell"] = RemoteMachineShellConnection(node)
nodes_data[node]["cbstats"] = Cbstats(nodes_data[node]["shell"])
nodes_data[node]["active_vbs"] = nodes_data[node][
"cbstats"].vbucket_list(bucket.name, "active")
if target_vb in nodes_data[node]["active_vbs"]:
target_node = node
# Open SDK client for doc_ops
client = SDKClient([self.cluster.master], bucket)
self.log.info("Testing using vbucket %s" % target_vb)
while doc_gen.has_next():
key, val = doc_gen.next()
vb_for_key = self.bucket_util.get_vbucket_num_for_key(key)
# Create and delete a key
result = client.crud(DocLoading.Bucket.DocOps.CREATE, key, val)
validate_crud_result(DocLoading.Bucket.DocOps.CREATE, key, result)
result = client.crud(DocLoading.Bucket.DocOps.DELETE, key, val)
validate_crud_result(DocLoading.Bucket.DocOps.DELETE, key, result)
on_disk_deletes += 1
# Wait for ep_queue_size to become zero
self.bucket_util._wait_for_stats_all_buckets()
dcp_vb_takeover_stats = nodes_data[target_node][
"cbstats"].dcp_vbtakeover(bucket.name, vb_for_key, key)
if dcp_vb_takeover_stats["on_disk_deletes"] != on_disk_deletes:
self.log_failure("Stat on_disk_deleted mismatch. "
"Actual :: %s, Expected :: %s"
% (dcp_vb_takeover_stats["on_disk_deletes"],
on_disk_deletes))
# Record bloom filter and perform compaction for the first item
if bloom_filter_size is None:
vb_details_stats = nodes_data[target_node][
"cbstats"].vbucket_details(bucket.name)
bloom_filter_size = \
vb_details_stats[vb_str]["bloom_filter_size"]
self.log.info("Bloom filter size before compaction: %s"
% bloom_filter_size)
self.bucket_util._run_compaction(number_of_times=1)
vb_details_stats = nodes_data[target_node][
"cbstats"].vbucket_details(bucket.name)
bloom_filter_size_after_compaction = \
vb_details_stats[vb_str]["bloom_filter_size"]
self.log.info("Bloom filter size after compaction: %s"
% bloom_filter_size_after_compaction)
# Create and delete 10K more items to validate bloom_filter_size
doc_gen = doc_generator(self.key, self.num_items, self.num_items+10000,
target_vbucket=[target_vb])
self.log.info("Loading 10K items for bloom_filter_size validation")
while doc_gen.has_next():
key, val = doc_gen.next()
# Create and delete a key
client.crud(DocLoading.Bucket.DocOps.CREATE, key, val)
client.crud(DocLoading.Bucket.DocOps.DELETE, key, val)
# self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util._run_compaction(number_of_times=1)
self.sleep(5, "Compaction complete")
vb_details_stats = nodes_data[target_node][
"cbstats"].vbucket_details(bucket.name)
bloom_filter_size_after_compaction = \
vb_details_stats[vb_str]["bloom_filter_size"]
self.log.info("Bloom filter size after compaction: %s"
% bloom_filter_size_after_compaction)
if int(bloom_filter_size_after_compaction) <= int(bloom_filter_size):
self.log_failure("Bloom filter init_size <= curr_size")
# Close SDK and shell connections
client.close()
for node in nodes_data.keys():
nodes_data[node]["shell"].disconnect()
self.validate_test_failure()
def test_MB_43055(self):
"""
1. Load till low_wm
2. Make non_io_threads=0
3. Load few more docs and so that we do exceed the high_wm,
this schedules the item pager
4. Delete few docs to go below low_wm
5. Make non_io_threads=default. Now the item pager tries to run,
but finds mem_used < low_wat so exits without paging anything,
triggering the bug
6. Load docs to cross high_wm
7. Confirm that the item pager never runs successfully,
even though the memory usage is back above the high watermark
"""
def perform_doc_op(op_type):
start = self.num_items
if op_type == DocLoading.Bucket.DocOps.DELETE:
start = self.del_items
doc_gen = doc_generator(self.key, start, start+load_batch,
doc_size=self.doc_size)
doc_op_task = self.task.async_load_gen_docs(
self.cluster, bucket, doc_gen, op_type,
timeout_secs=self.sdk_timeout,
print_ops_rate=False,
skip_read_on_error=True,
suppress_error_table=True,
batch_size=100,
process_concurrency=8,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(doc_op_task)
self.bucket_util._wait_for_stats_all_buckets()
if op_type == DocLoading.Bucket.DocOps.CREATE:
self.num_items += load_batch
elif op_type == DocLoading.Bucket.DocOps.DELETE:
self.del_items += load_batch
def display_bucket_water_mark_values(t_node):
wm_tbl.rows = list()
a_stats = nodes_data[t_node]["cbstat"].all_stats(bucket.name)
wm_tbl.add_row(["High water_mark", a_stats["ep_mem_high_wat"],
a_stats["ep_mem_high_wat_percent"]])
wm_tbl.add_row(["Low water_mark", a_stats["ep_mem_low_wat"],
a_stats["ep_mem_low_wat_percent"]])
wm_tbl.add_row(["Num pager runs", a_stats["ep_num_pager_runs"],
""])
wm_tbl.add_row(["Memory Used", a_stats["mem_used"],
""])
wm_tbl.display("Memory stats")
return a_stats
stats = None
nodes_data = dict()
self.num_items = 0
self.del_items = 0
load_batch = 5000
# To provide little 'headroom' while loading/deleting docs in batches
mem_buffer_gap = 10000
low_wm_reached = False
high_wm_reached = False
wm_tbl = TableView(self.log.info)
bucket = self.bucket_util.buckets[0]
wm_tbl.set_headers(["Stat", "Memory Val", "Percent"])
kv_nodes = self.cluster_util.get_kv_nodes()
for node in kv_nodes:
shell = RemoteMachineShellConnection(node)
nodes_data[node] = dict()
nodes_data[node]["shell"] = shell
nodes_data[node]["cbstat"] = Cbstats(shell)
nodes_data[node]["eviction_start"] = False
nodes_data[node]["active_vbs"] = nodes_data[node][
"cbstat"].vbucket_list(bucket.name, "active")
nodes_data[node]["replica_vbs"] = nodes_data[node][
"cbstat"].vbucket_list(bucket.name, "replica")
target_node = choice(kv_nodes)
cbepctl = Cbepctl(nodes_data[target_node]["shell"])
self.log.info("Loading till low_water_mark is reached")
while not low_wm_reached:
perform_doc_op(DocLoading.Bucket.DocOps.CREATE)
stats = nodes_data[target_node]["cbstat"].all_stats(bucket.name)
if int(stats["mem_used"]) > int(stats["ep_mem_low_wat"]):
display_bucket_water_mark_values(target_node)
self.log.info("Low water_mark reached")
low_wm_reached = True
if int(stats["ep_num_pager_runs"]) != 0:
self.log_failure("ItemPager has run while loading")
else:
self.log.info("Setting num_nonio_threads=0")
cbepctl.set(bucket.name,
"flush_param", "num_nonio_threads", 0)
self.log.info("Loading docs till high_water_mark is reached")
while not high_wm_reached:
perform_doc_op(DocLoading.Bucket.DocOps.CREATE)
stats = nodes_data[target_node]["cbstat"].all_stats(bucket.name)
if int(stats["mem_used"]) > int(stats["ep_mem_high_wat"]):
display_bucket_water_mark_values(target_node)
self.log.info("High water_mark reached")
high_wm_reached = True
if not high_wm_reached:
self.log_failure("Failed to reach high_wm with the given load")
if int(stats["ep_num_pager_runs"]) != 0:
self.log_failure("ItemPager has run with non_io_threads=0")
self.log.info("Delete docs until the mem_used goes below low_wm")
low_wm_reached = False
while not low_wm_reached and self.del_items < self.num_items:
perform_doc_op(DocLoading.Bucket.DocOps.DELETE)
stats = nodes_data[target_node]["cbstat"].all_stats(bucket.name)
if int(stats["mem_used"]) < (int(stats["ep_mem_low_wat"])
- mem_buffer_gap):
low_wm_reached = True
display_bucket_water_mark_values(target_node)
self.log.info("Low water_mark reached")
if int(stats["ep_num_pager_runs"]) != 0:
self.log_failure("ItemPager ran after del_op & non_io_threads=0")
self.log.info("Setting num_nonio_threads=8")
cbepctl.set(bucket.name, "flush_param", "num_nonio_threads", 8)
self.sleep(10, "Wait after setting num_nonio_threads=8")
stats = display_bucket_water_mark_values(target_node)
if int(stats["ep_num_pager_runs"]) != 0:
self.log_failure("ItemPager run with lower_wm levels")
self.log.info("Loading docs till high_water_mark is reached")
high_wm_reached = False
while not high_wm_reached:
perform_doc_op(DocLoading.Bucket.DocOps.CREATE)
stats = nodes_data[target_node]["cbstat"].all_stats(bucket.name)
if int(stats["mem_used"]) > (int(stats["ep_mem_high_wat"])
+ mem_buffer_gap):
high_wm_reached = True
self.log.info("High water_mark reached")
retry_count = 0
while retry_count < 5:
retry_count += 1
stats = display_bucket_water_mark_values(target_node)
if int(stats["ep_num_pager_runs"]) > 1:
break
self.sleep(1, "ep_num_pager_runs=%s, expected > 1"
% stats["ep_num_pager_runs"])
else:
self.log_failure("ItemPager not triggered with high_wm")
elif int(stats["ep_num_pager_runs"]) > 5:
high_wm_reached = True
self.log.info("ep_num_pager_runs started running")
# Closing all shell connections
for node in nodes_data.keys():
nodes_data[node]["shell"].disconnect()
self.validate_test_failure()
def test_MB_42918(self):
"""
- Add item for some key
- Stop persistence
- Delete item
- Do durable write with PersistMajority for same key
- Doc get should return KEY_NOENT
"""
doc_val = {"field": "val"}
bucket = self.bucket_util.buckets[0]
shell = RemoteMachineShellConnection(self.cluster.master)
cb_err = CouchbaseError(self.log, shell)
client_1 = SDKClient([self.cluster.master], bucket)
client_2 = SDKClient([self.cluster.master], bucket)
# Perform create-delete to populate bloom-filter
client_1.crud(DocLoading.Bucket.DocOps.CREATE, self.key, doc_val)
self.bucket_util._wait_for_stats_all_buckets()
client_1.crud(DocLoading.Bucket.DocOps.DELETE, self.key)
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(0)
# Create the document using async-write
client_1.crud(DocLoading.Bucket.DocOps.CREATE, self.key, doc_val)
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(1)
# Stop persistence and delete te document
cb_err.create(CouchbaseError.STOP_PERSISTENCE, bucket.name)
self.sleep(2, "Wait after stop_persistence")
client_1.crud(DocLoading.Bucket.DocOps.DELETE, self.key)
# Get doc to make sure we see not_found exception
result = client_1.crud(DocLoading.Bucket.DocOps.READ, self.key)
if SDKException.DocumentNotFoundException not in str(result["error"]):
self.log.info("Result: %s" % result)
self.log_failure("Invalid exception with deleted_doc: %s"
% result["error"])
# Perform sync-write to create doc prepare in hash-table
create_thread = Thread(
target=client_1.crud,
args=[DocLoading.Bucket.DocOps.CREATE, self.key, doc_val],
kwargs={"durability": Bucket.DurabilityLevel.PERSIST_TO_MAJORITY,
"timeout": 15})
create_thread.start()
self.sleep(5, "Wait to make sure prepare is generated")
# Doc read should return not_found
result = client_2.crud(DocLoading.Bucket.DocOps.READ, self.key)
if SDKException.DocumentNotFoundException not in str(result["error"]):
self.log.info("Result: %s" % result)
self.log_failure("Invalid exception with prepared doc: %s"
% result["error"])
result = client_2.get_from_all_replicas(self.key)
if result:
self.log_failure("Able to read deleted value: %s" % result)
create_thread.join()
cb_err.revert(CouchbaseError.STOP_MEMCACHED, bucket.name)
# Close shell and SDK connections
client_1.close()
client_2.close()
shell.disconnect()
self.validate_test_failure()
def test_MB_41942(self):
"""
1. Load huge dataset into bucket with replica=1
2. Set doc_ttl for few docs on active node with persistence stopped
3. Kill memcached during loading
4. Set expiry pager to run during warmup
5. Kill memcached again such that kill happens before warmup completes
6. Validate high_seqno and uuid
"""
bucket = self.bucket_util.buckets[0]
target_node = choice(self.cluster_util.get_kv_nodes())
self.log.info("Target node %s" % target_node.ip)
shell = RemoteMachineShellConnection(target_node)
cb_stat = Cbstats(shell)
cb_error = CouchbaseError(self.log, shell)
# Load initial data set into bucket
self.log.info("Loading %s docs into bucket" % self.num_items)
doc_gen = doc_generator(self.key, 0, self.num_items,
doc_size=10000)
load_task = self.task.async_load_gen_docs(
self.cluster, bucket, doc_gen,
DocLoading.Bucket.DocOps.CREATE, 0,
batch_size=500, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool,
print_ops_rate=False)
self.task_manager.get_task_result(load_task)
self.bucket_util._wait_for_stats_all_buckets()
self.durability_level = Bucket.DurabilityLevel.MAJORITY
active_vbs = cb_stat.vbucket_list(bucket.name,
vbucket_type="active")
doc_gen = doc_generator(self.key, 0, 10000,
doc_size=1,
target_vbucket=active_vbs)
# Load with doc_ttl set
self.log.info("Setting doc_ttl=1 for %s docs" % 10000)
load_task = self.task.async_load_gen_docs(
self.cluster, bucket, doc_gen,
DocLoading.Bucket.DocOps.UPDATE, exp=1,
batch_size=2000, process_concurrency=5,
durability=self.durability_level,
timeout_secs=30,
sdk_client_pool=self.sdk_client_pool,
skip_read_on_error=True,
print_ops_rate=False)
self.task_manager.get_task_result(load_task)
# Read task to trigger expiry_purger
load_task = self.task.async_load_gen_docs(
self.cluster, bucket, doc_gen,
DocLoading.Bucket.DocOps.READ,
batch_size=500, process_concurrency=8,
timeout_secs=30,
sdk_client_pool=self.sdk_client_pool,
suppress_error_table=True,
start_task=False,
print_ops_rate=False)
retry = 0
before_stats = None
warmup_running = False
# Kill memcached during ttl load
cb_error.create(CouchbaseError.KILL_MEMCACHED)
while not warmup_running and retry < 10:
try:
warmup_stats = cb_stat.warmup_stats(bucket.name)
self.log.info("Current warmup state %s:%s"
% (warmup_stats["ep_warmup_thread"],
warmup_stats["ep_warmup_state"]))
if warmup_stats["ep_warmup_thread"] != "complete":
warmup_running = True
while before_stats is None:
before_stats = cb_stat.vbucket_details(bucket.name)
self.log.info("Starting read task to trigger purger")
self.task_manager.add_new_task(load_task)
warmup_stats = cb_stat.warmup_stats(bucket.name)
cb_error.create(CouchbaseError.KILL_MEMCACHED)
self.log.info("Warmup state during mc_kill %s:%s"
% (warmup_stats["ep_warmup_thread"],
warmup_stats["ep_warmup_state"]))
if warmup_stats["ep_warmup_thread"] == "complete":
self.log_failure("Can't trust the outcome, "
"bucket warmed_up before mc_kill")
self.task_manager.get_task_result(load_task)
except Exception:
pass
finally:
retry += 1
self.sleep(0.3)
while True:
try:
after_stats = cb_stat.vbucket_details(bucket.name)
break
except Exception:
pass
self.log.info("Validating high_seqno/uuid from vbucket-details")
for vb_num, stats in before_stats.items():
t_stat = "high_seqno"
pre_kill_stat = before_stats[vb_num]
post_kill_stat = after_stats[vb_num]
if int(pre_kill_stat[t_stat]) > int(post_kill_stat[t_stat]):
self.log_failure("%s::%s - %s > %s"
% (vb_num, t_stat,
pre_kill_stat[t_stat],
post_kill_stat[t_stat]))
t_stat = "uuid"
if vb_num in active_vbs \
and pre_kill_stat[t_stat] == post_kill_stat[t_stat]:
self.log_failure("%s %s: %s == %s"
% (vb_num, t_stat,
pre_kill_stat[t_stat],
post_kill_stat[t_stat]))
shell.disconnect()
self.validate_test_failure()
def verify_stat(self, items, value="active"):
mc = MemcachedClient(self.cluster.master.ip,
constants.memcached_port)
mc.sasl_auth_plain(self.cluster.master.rest_username,
self.cluster.master.rest_password)
mc.bucket_select('default')
stats = mc.stats()
self.assertEquals(stats['ep_compression_mode'], value)
self.assertEquals(int(stats['ep_item_compressor_num_compressed']),
items)
self.assertNotEquals(int(stats['vb_active_itm_memory']),
int(stats['vb_active_itm_memory_uncompressed']))
def test_compression_active_and_off(self):
"""
test reproducer for MB-29272,
Load some documents with compression mode set to active
get the cbstats
change compression mode to off and wait for minimum 250ms
Load some more documents and check the compression is not done
epengine.basic_ops.basic_ops.test_compression_active_and_off,items=10000,compression_mode=active
:return:
"""
# Load some documents with compression mode as active
gen_create = doc_generator("eviction1_",
start=0,
end=self.num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets,
randomize_doc_size=self.randomize_doc_size,
randomize_value=self.randomize_value)
gen_create2 = doc_generator("eviction2_",
start=0,
end=self.num_items,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type,
vbuckets=self.cluster_util.vbuckets,
randomize_doc_size=self.randomize_doc_size,
randomize_value=self.randomize_value)
def_bucket = self.bucket_util.get_all_buckets()[0]
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, gen_create,
DocLoading.Bucket.DocOps.CREATE, 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(self.num_items)
remote = RemoteMachineShellConnection(self.cluster.master)
for bucket in self.bucket_util.buckets:
# change compression mode to off
output, _ = remote.execute_couchbase_cli(
cli_command='bucket-edit', cluster_host="localhost:8091",
user=self.cluster.master.rest_username,
password=self.cluster.master.rest_password,
options='--bucket=%s --compression-mode off' % bucket.name)
self.assertTrue(' '.join(output).find('SUCCESS') != -1,
'compression mode set to off')
# sleep for 10 sec (minimum 250sec)
self.sleep(10)
# Load data and check stats to see compression
# is not done for newly added data
task = self.task.async_load_gen_docs(
self.cluster, def_bucket, gen_create2,
DocLoading.Bucket.DocOps.CREATE, 0,
batch_size=10, process_concurrency=8,
replicate_to=self.replicate_to, persist_to=self.persist_to,
durability=self.durability_level,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
sdk_client_pool=self.sdk_client_pool)
self.task.jython_task_manager.get_task_result(task)
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(self.num_items*2)
def MB36948(self):
node_to_stop = self.servers[0]
self.log.info("Adding index/query node")
self.task.rebalance([self.cluster.master], [self.servers[2]], [],
services=["n1ql,index"])
self.log.info("Creating SDK client connection")
client = SDKClient([self.cluster.master],
self.bucket_util.buckets[0],
compression_settings=self.sdk_compression)
self.log.info("Stopping memcached on: %s" % node_to_stop)
ssh_conn = RemoteMachineShellConnection(node_to_stop)
err_sim = CouchbaseError(self.log, ssh_conn)
err_sim.create(CouchbaseError.STOP_MEMCACHED)
result = client.crud(DocLoading.Bucket.DocOps.CREATE,
"abort1", "abort1_val")
if not result["status"]:
self.log_failure("Async SET failed")
result = client.crud(DocLoading.Bucket.DocOps.UPDATE,
"abort1", "abort1_val",
durability=self.durability_level,
timeout=3, time_unit="seconds")
if result["status"]:
self.log_failure("Sync write succeeded")
if SDKException.DurabilityAmbiguousException not in result["error"]:
self.log_failure("Invalid exception for sync_write: %s" % result)
self.log.info("Resuming memcached on: %s" % node_to_stop)
err_sim.revert(CouchbaseError.STOP_MEMCACHED)
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(1)
self.log.info("Closing ssh & SDK connections")
ssh_conn.disconnect()
client.close()
self.validate_test_failure()
def do_get_random_key(self):
# MB-31548, get_Random key gets hung sometimes.
mc = MemcachedClient(self.cluster.master.ip,
constants.memcached_port)
mc.sasl_auth_plain(self.cluster.master.rest_username,
self.cluster.master.rest_password)
mc.bucket_select('default')
count = 0
while count < 1000000:
count += 1
try:
mc.get_random_key()
except MemcachedError as error:
self.fail("<MemcachedError #%d ``%s''>"
% (error.status, error.message))
if count % 1000 == 0:
self.log.info('The number of iteration is {}'.format(count))
|
test_arduino.py
|
import json
import socket
import threading
import traceback
from contextlib import ExitStack
from time import sleep
from typing import Dict
import pytest
from afancontrol.arduino import (
ArduinoConnection,
ArduinoName,
ArduinoPin,
SetPWMCommand,
pyserial_available,
)
from afancontrol.pwmfan import (
ArduinoFanPWMRead,
ArduinoFanPWMWrite,
ArduinoFanSpeed,
PWMValue,
)
pytestmark = pytest.mark.skipif(
not pyserial_available, reason="pyserial is not installed"
)
class DummyArduino:
"""Emulate an Arduino board, i.e. the other side of the pyserial connection.
Slightly mimics the Arduino program `micro.ino`.
"""
def __init__(self) -> None:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", 0))
s.listen(1)
listening_port = s.getsockname()[1]
self.sock = s
self.pyserial_url = "socket://127.0.0.1:%s" % listening_port
self._lock = threading.Lock()
self._loop_iteration_complete = threading.Event()
self._first_loop_iteration_complete = threading.Event()
self._disconnected = threading.Event()
self._thread_error = threading.Event()
self._is_connected = False
self._inner_state_pwms = {"5": 255, "9": 255, "10": 255, "11": 255}
self._inner_state_speeds = {"0": 0, "1": 0, "2": 0, "3": 0, "7": 0}
def set_inner_state_pwms(self, pwms: Dict[str, int]) -> None:
with self._lock:
self._inner_state_pwms.update(pwms)
if self.is_connected:
self._loop_iteration_complete.clear()
assert self._loop_iteration_complete.wait(5) is True
def set_speeds(self, speeds: Dict[str, int]) -> None:
with self._lock:
self._inner_state_speeds.update(speeds)
if self.is_connected:
self._loop_iteration_complete.clear()
assert self._loop_iteration_complete.wait(5) is True
@property
def inner_state_pwms(self):
with self._lock:
copy = self._inner_state_pwms.copy()
return copy
@property
def is_connected(self):
with self._lock:
if not self._is_connected:
return False
assert self._first_loop_iteration_complete.wait(5) is True
return True
def wait_for_disconnected(self):
assert self._disconnected.wait(5) is True
def accept(self):
client, _ = self.sock.accept()
self.sock.close() # Don't accept any more connections
with self._lock:
self._is_connected = True
threading.Thread(target=self._thread_run, args=(client,), daemon=True).start()
def _thread_run(self, sock):
sock.settimeout(0.001)
command_buffer = bytearray()
try:
while True:
# The code in this loop mimics the `loop` function
# in the `micro.ino` program.
try:
command_buffer.extend(sock.recv(1024))
except socket.timeout:
pass
while len(command_buffer) >= 3:
command_raw = command_buffer[:3]
del command_buffer[:3]
command = SetPWMCommand.parse(command_raw)
with self._lock:
self._inner_state_pwms[str(command.pwm_pin)] = command.pwm
sock.sendall(self._make_status())
self._loop_iteration_complete.set()
self._first_loop_iteration_complete.set()
sleep(0.050)
except (ConnectionResetError, BrokenPipeError):
pass
except Exception:
traceback.print_exc()
self._thread_error.set()
finally:
with self._lock:
self._is_connected = False
sock.close()
self._disconnected.set()
def _make_status(self):
with self._lock:
status = {
"fan_inputs": self._inner_state_speeds,
"fan_pwm": self._inner_state_pwms,
}
return (json.dumps(status) + "\n").encode("ascii")
def ensure_no_errors_in_thread(self):
assert self._thread_error.is_set() is not True
@pytest.fixture
def dummy_arduino():
return DummyArduino()
def test_smoke(dummy_arduino):
conn = ArduinoConnection(ArduinoName("test"), dummy_arduino.pyserial_url)
fan_speed = ArduinoFanSpeed(conn, tacho_pin=ArduinoPin(3))
pwm_read = ArduinoFanPWMRead(conn, pwm_pin=ArduinoPin(9))
pwm_write = ArduinoFanPWMWrite(conn, pwm_pin=ArduinoPin(9))
dummy_arduino.set_inner_state_pwms({"9": 42})
with ExitStack() as stack:
assert not dummy_arduino.is_connected
stack.enter_context(fan_speed)
stack.enter_context(pwm_read)
stack.enter_context(pwm_write)
dummy_arduino.accept()
assert dummy_arduino.is_connected
dummy_arduino.set_speeds({"3": 1200})
conn.wait_for_status() # required only for synchronization in the tests
assert fan_speed.get_speed() == 1200
assert pwm_read.get() == 255
assert dummy_arduino.inner_state_pwms["9"] == 255
pwm_write.set(PWMValue(192))
dummy_arduino.set_speeds({"3": 998})
conn.wait_for_status() # required only for synchronization in the tests
assert fan_speed.get_speed() == 998
assert pwm_read.get() == 192
assert dummy_arduino.inner_state_pwms["9"] == 192
dummy_arduino.wait_for_disconnected()
assert dummy_arduino.inner_state_pwms["9"] == 255
assert not dummy_arduino.is_connected
dummy_arduino.ensure_no_errors_in_thread()
|
c3po.py
|
# Copyright 2015-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Thomas Beermann <thomas.beermann@cern.ch>, 2015-2017
# - Vincent Garonne <vgaronne@gmail.com>, 2017-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
#
# PY3K COMPATIBLE
'''
Dynamic data placement daemon.
'''
import logging
from datetime import datetime
from hashlib import md5
from json import dumps
try:
from Queue import Queue
except ImportError:
from queue import Queue
from sys import stdout
from time import sleep
from threading import Event, Thread
from uuid import uuid4
from requests import post
from requests.auth import HTTPBasicAuth
from requests.exceptions import RequestException
from rucio.client import Client
from rucio.common.config import config_get, config_get_options
from rucio.common.exception import RucioException
from rucio.daemons.c3po.collectors.free_space import FreeSpaceCollector
from rucio.daemons.c3po.collectors.jedi_did import JediDIDCollector
from rucio.daemons.c3po.collectors.workload import WorkloadCollector
logging.basicConfig(stream=stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
GRACEFUL_STOP = Event()
def read_free_space(once=False, thread=0, waiting_time=1800):
"""
Thread to collect the space usage information for RSEs.
"""
free_space_collector = FreeSpaceCollector()
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
logging.info('collecting free space')
free_space_collector.collect_free_space()
timer = 0
def read_workload(once=False, thread=0, waiting_time=1800):
"""
Thread to collect the workload information from PanDA.
"""
workload_collector = WorkloadCollector()
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
logging.info('collecting workload')
workload_collector.collect_workload()
timer = 0
def print_workload(once=False, thread=0, waiting_time=600):
"""
Thread to regularly output the workload to logs for debugging.
"""
workload_collector = WorkloadCollector()
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
logging.info('Number of sites cached %d' % len(workload_collector.get_sites()))
for site in workload_collector.get_sites():
logging.info('%s: %d / %d / %d' % (site, workload_collector.get_cur_jobs(site), workload_collector.get_avg_jobs(site), workload_collector.get_max_jobs(site)))
timer = 0
def read_dids(once=False, thread=0, did_collector=None, waiting_time=60):
"""
Thread to collect DIDs for the placement algorithm.
"""
timer = waiting_time
while not GRACEFUL_STOP.is_set():
if timer < waiting_time:
timer += 10
sleep(10)
continue
did_collector.get_dids()
timer = 0
def add_rule(client, did, src_rse, dst_rse):
logging.debug('add rule for %s from %s to %s' % (did, src_rse, dst_rse))
r = client.add_replication_rule([did, ], 1, dst_rse, lifetime=604800, account='c3po', source_replica_expression=src_rse, activity='Data Brokering', asynchronous=True)
logging.debug(r)
def place_replica(once=False,
thread=0,
did_queue=None,
waiting_time=100,
dry_run=False,
sampling=False,
algorithms='t2_free_space_only_pop_with_network',
datatypes='NTUP,DAOD',
dest_rse_expr='type=DATADISK',
max_bytes_hour=100000000000000,
max_files_hour=100000,
max_bytes_hour_rse=50000000000000,
max_files_hour_rse=10000,
min_popularity=8,
min_recent_requests=5,
max_replicas=5):
"""
Thread to run the placement algorithm to decide if and where to put new replicas.
"""
try:
c3po_options = config_get_options('c3po')
client = None
if 'algorithms' in c3po_options:
algorithms = config_get('c3po', 'algorithms')
algorithms = algorithms.split(',')
if not dry_run:
if len(algorithms) != 1:
logging.error('Multiple algorithms are only allowed in dry_run mode')
return
client = Client(auth_type='x509_proxy', account='c3po', creds={'client_proxy': '/opt/rucio/etc/ddmadmin.long.proxy'})
instances = {}
for algorithm in algorithms:
module_path = 'rucio.daemons.c3po.algorithms.' + algorithm
module = __import__(module_path, globals(), locals(), ['PlacementAlgorithm'])
instance = module.PlacementAlgorithm(datatypes, dest_rse_expr, max_bytes_hour, max_files_hour, max_bytes_hour_rse, max_files_hour_rse, min_popularity, min_recent_requests, max_replicas)
instances[algorithm] = instance
params = {
'dry_run': dry_run,
'sampling': sampling,
'datatypes': datatypes,
'dest_rse_expr': dest_rse_expr,
'max_bytes_hour': max_bytes_hour,
'max_files_hour': max_files_hour,
'max_bytes_hour_rse': max_bytes_hour_rse,
'max_files_hour_rse': max_files_hour_rse,
'min_recent_requests': min_recent_requests,
'min_popularity': min_popularity
}
instance_id = str(uuid4()).split('-')[0]
elastic_url = config_get('c3po', 'elastic_url')
elastic_index = config_get('c3po', 'elastic_index')
ca_cert = False
if 'ca_cert' in c3po_options:
ca_cert = config_get('c3po', 'ca_cert')
auth = False
if ('elastic_user' in c3po_options) and ('elastic_pass' in c3po_options):
auth = HTTPBasicAuth(config_get('c3po', 'elastic_user'), config_get('c3po', 'elastic_pass'))
w = waiting_time
while not GRACEFUL_STOP.is_set():
if w < waiting_time:
w += 10
sleep(10)
continue
len_dids = did_queue.qsize()
if len_dids > 0:
logging.debug('(%s) %d did(s) in queue' % (instance_id, len_dids))
else:
logging.debug('(%s) no dids in queue' % (instance_id))
for _ in range(0, len_dids):
did = did_queue.get()
for algorithm, instance in instances.items():
logging.info('(%s:%s) Retrieved %s:%s from queue. Run placement algorithm' % (algorithm, instance_id, did[0], did[1]))
decision = instance.place(did)
decision['@timestamp'] = datetime.utcnow().isoformat()
decision['algorithm'] = algorithm
decision['instance_id'] = instance_id
decision['params'] = params
create_rule = True
if sampling and 'error_reason' not in decision:
create_rule = bool(ord(md5(decision['did']).hexdigest()[-1]) & 1)
decision['create_rule'] = create_rule
# write the output to ES for further analysis
index_url = elastic_url + '/' + elastic_index + '-' + datetime.utcnow().strftime('%Y-%m') + '/record/'
try:
if ca_cert:
r = post(index_url, data=dumps(decision), verify=ca_cert, auth=auth)
else:
r = post(index_url, data=dumps(decision))
if r.status_code != 201:
logging.error(r)
logging.error('(%s:%s) could not write to ElasticSearch' % (algorithm, instance_id))
except RequestException as e:
logging.error('(%s:%s) could not write to ElasticSearch' % (algorithm, instance_id))
logging.error(e)
continue
logging.debug(decision)
if 'error_reason' in decision:
logging.error('(%s:%s) The placement algorithm ran into an error: %s' % (algorithm, instance_id, decision['error_reason']))
continue
logging.info('(%s:%s) Decided to place a new replica for %s on %s' % (algorithm, instance_id, decision['did'], decision['destination_rse']))
if (not dry_run) and create_rule:
# DO IT!
try:
add_rule(client, {'scope': did[0], 'name': did[1]}, decision.get('source_rse'), decision.get('destination_rse'))
except RucioException as e:
logging.debug(e)
w = 0
except Exception as e:
logging.critical(e)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(once=False,
threads=1,
only_workload=False,
dry_run=False,
sampling=False,
algorithms='t2_free_space_only_pop_with_network',
datatypes='NTUP,DAOD',
dest_rse_expr='type=DATADISK',
max_bytes_hour=100000000000000,
max_files_hour=100000,
max_bytes_hour_rse=50000000000000,
max_files_hour_rse=10000,
min_popularity=8,
min_recent_requests=5,
max_replicas=5):
"""
Starts up the main thread
"""
logging.info('activating C-3PO')
thread_list = []
try:
if only_workload:
logging.info('running in workload-collector-only mode')
thread_list.append(Thread(target=read_workload, name='read_workload', kwargs={'thread': 0, 'waiting_time': 1800}))
thread_list.append(Thread(target=print_workload, name='print_workload', kwargs={'thread': 0, 'waiting_time': 600}))
else:
logging.info('running in placement mode')
did_queue = Queue()
dc = JediDIDCollector(did_queue)
thread_list.append(Thread(target=read_free_space, name='read_free_space', kwargs={'thread': 0, 'waiting_time': 1800}))
thread_list.append(Thread(target=read_dids, name='read_dids', kwargs={'thread': 0, 'did_collector': dc}))
thread_list.append(Thread(target=place_replica, name='place_replica', kwargs={'thread': 0,
'did_queue': did_queue,
'waiting_time': 10,
'algorithms': algorithms,
'dry_run': dry_run,
'sampling': sampling,
'datatypes': datatypes,
'dest_rse_expr': dest_rse_expr,
'max_bytes_hour': max_bytes_hour,
'max_files_hour': max_files_hour,
'max_bytes_hour_rse': max_bytes_hour_rse,
'max_files_hour_rse': max_files_hour_rse,
'min_popularity': min_popularity,
'min_recent_requests': min_recent_requests,
'max_replicas': max_replicas}))
for t in thread_list:
t.start()
logging.info('waiting for interrupts')
while len(thread_list) > 0:
[t.join(timeout=3) for t in thread_list if t and t.isAlive()]
except Exception as exception:
logging.critical(exception)
|
server.py
|
from socket import *
from threading import Thread
debug = False
# Global user list
# Contains instances of the registered users
registeredUsers = []
# USER CLASS
# Defines an user
# ip and username must be unique
class User:
username = ""
ip = ""
state = "busy"
connectionStatus = False
connectionSocket = ""
def __init__(self, username):
self.username = username
def connnectSocket(self, connectionSocket):
self.ip = connectionSocket.getpeername()[0]
self.connectionSocket = connectionSocket
self.connectionStatus = True
def setState(self, newState):
self.state = newState
def tag(self):
return self.username + '@' + self.ip
def print(self):
printStr = '----\n' + self.username + '\n' + self.ip + '\n' + self.state + '\n' + \
"Connected: " + str(self.connectionStatus) + '\n' + str(connectionSocket) + '\n----'
print(printStr)
@staticmethod
def reconFromAddr(ipAddr):
for user in registeredUsers:
if user.ip == ipAddr:
return user
return -1
@staticmethod
def reconFromUsr(username):
for user in registeredUsers:
if user.username == username:
return user
return -1
@staticmethod
def authenticate(connectionSocket):
msg = "CONNECTION ACK. PLEASE AUTH WITH USERNAME"
connectionSocket.send(msg.encode('utf-8'))
rec = connectionSocket.recv(1024)
rec = rec.decode('utf-8')
user = User.reconFromUsr(rec)
if user == -1:
msg = "AUTH FAILED"
else:
msg = "AUTH AS " + user.username
user.connnectSocket(connectionSocket)
connectionSocket.send(msg.encode('utf-8'))
return user
######################
# COMMANDS FUNCTIONS #
######################
# RING
# Sends a ring to destination
def ring(sender, dest):
if dest.connectionStatus == False:
print("Failed to RING: dest offline")
return -1
ringMessage = "RING " + sender.username
dest.connectionSocket.send(ringMessage.encode('utf-8'))
def getOnline():
onStr = ""
for u in registeredUsers:
if u.connectionStatus == True:
onStr += (u.username + '\n')
return onStr
# CONNECTION HANDLER
# Handle a connection with a client
def handler(connecitonSocket):
user = User.authenticate(connecitonSocket)
if user == -1:
print(connecitonSocket.getpeername(), "failed to authenticate")
connecitonSocket.close()
else:
print(connecitonSocket.getpeername(), "authenticated as", user.username)
while True:
message = user.connectionSocket.recv(1024)
message = message.decode("utf-8")
print(user.tag(), ': ', message)
handlerExit = commandHandler(message, user)
if debug:
for u in registeredUsers:
u.print()
if handlerExit == "CLOSE":
print("User", user.tag(), "requested connection close")
break
elif handlerExit == "":
print("User", user.tag(), "forced connection close (blank)")
break
user.connectionStatus = False
user.connectionSocket.close()
# COMMAND HANDLER
# Recognizes the command and execute work
# Or launch a handler for the command
# Returns the name of the command decoded
def commandHandler(message, user):
cmd = message.split()
if message == "":
return ""
if cmd[0] == "RING":
dest = User.reconFromUsr(cmd[1])
ring(user, dest)
elif cmd[0] == "SETSTATE":
user.setState(message[9:])
elif cmd[0] == "GETSTATE":
u = User.reconFromUsr(cmd[1])
user.connectionSocket.send(u.state.encode('utf-8'))
elif cmd[0] == "GETONLINE":
user.connectionSocket.send(getOnline().encode('utf-8'))
# Returns the code for the command as connHndlr checks for CLOSE
return cmd[0]
# SERVER STARTUP
# Setting up and launching server
serverPort = 1200
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
binding = ("", serverPort)
serverSocket.bind(binding)
# Load users
# TODO implement from a file
registeredUsers.append(User("mac"))
registeredUsers.append(User("surface"))
registeredUsers.append(User("leo"))
registeredUsers.append(User("ricky"))
registeredUsers.append(User("localhost"))
# Launch server
serverSocket.listen()
print("Server is listening...")
while True:
connectionSocket, clientAddr = serverSocket.accept()
print("Request from:", clientAddr[0])
# user = User.reconFromAddr(clientAddr[0])
# user.connnectSocket(connectionSocket)
thread = Thread(target=handler, args=(connectionSocket, ))
print("Opened connection with:", connectionSocket.getpeername(), "using Thread", thread)
thread.start()
serverSocket.close()
|
hilo.py
|
import threading
def worker():
print ('Estoy trabajando')
threads = list()
for i in range(3):
t = threading.Thread(target=worker)
threads.append(t) """Agrega el hilo a threads"""
t.start()
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python fo.py
#
# Edited by Eilif Sommer Øyre
# 17.01.2020
from threading import Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
for j in range(int(1e6)):
i += 1
def decrementingFunction():
global i
for j in range(int(1e6)):
i -= 1
def main():
print(i)
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
main.py
|
import sys
import time
from threading import Thread
import pygame
from pygame.sprite import Sprite
from random import randint, choice
from baby.graphics.Panel import Color, Panel
_HORIZONTAL_OFFSET = 0
_VERTICAL_OFFSET = 0
def ms_to_sec(milliseconds) -> float:
return milliseconds / 1000
def should_draw_shape():
return randint(0, 100) <= _TOLERANCE
def set_settings(sprite_options) -> None:
global PIXEL_DELTA_MAX, framerate, _TOLERANCE, thread, MOVEMENT, color_left, color_right, color_up, color_down, color_full, sprites
MOVEMENT = randint(0, 100)
PIXEL_DELTA_MAX = randint(1, 10)
framerate = randint(1, 8)
_TOLERANCE = randint(1, 30)
sprites = choice(sprite_options)
for sprite in sprites.sprites():
sprite.color = Color(MOVEMENT)
setting_timeout = randint(1, 15)
print(f"""
MOVEMENT {MOVEMENT}
PIXEL_DELTA_MAX {PIXEL_DELTA_MAX}
framerate {framerate}
_TOLERANCE {_TOLERANCE}
setting_timeout {setting_timeout}
sprites {sprites.sprites()}
""")
def wait_then_set():
time.sleep(setting_timeout)
set_settings(sprite_options)
thread = Thread(target=wait_then_set, daemon=True).start()
def main():
global sprites
MOVEMENT = 5
pygame.init()
infoObject = pygame.display.Info()
SCREEN_WIDTH = infoObject.current_w
SCREEN_HEIGHT = infoObject.current_h
left_x = 0
left_y = 0
right_x = SCREEN_WIDTH / 2
right_y = 0
up_x = 0
up_y = 0
down_y = SCREEN_HEIGHT / 2
down_x = 0
left = Panel(x=left_x, y=left_y, width=SCREEN_WIDTH, height=SCREEN_HEIGHT)
right = Panel(x=right_x, y=right_y, width=SCREEN_WIDTH / 2, height=SCREEN_HEIGHT)
up = Panel(x=up_x, y=up_y, width=SCREEN_WIDTH, height=SCREEN_HEIGHT / 2)
down = Panel(x=down_x, y=down_y, width=SCREEN_WIDTH, height=SCREEN_HEIGHT / 2)
full = Panel(x=right_x, y=right_y, width=SCREEN_WIDTH, height=SCREEN_HEIGHT)
# sprite_list_name = [Shape() for _ in range(0, 1)]
vertical_sprites = pygame.sprite.Group([left, right])
horizontal_sprites = pygame.sprite.Group([up, down])
full_sprites = pygame.sprite.Group([full, ])
set_settings([vertical_sprites, horizontal_sprites, full_sprites])
clock = pygame.time.Clock()
screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
print("framerate:", framerate)
print("tolerance:", _TOLERANCE)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
sprites.draw(screen)
sprites.update()
pygame.display.flip()
clock.tick(framerate)
if __name__ == "__main__":
main()
|
s20.py
|
""" Orbivo S20. """
import binascii
import struct
import logging
import socket
import threading
import time
_LOGGER = logging.getLogger(__name__)
# S20 UDP port
PORT = 10000
# UDP best-effort.
RETRIES = 3
TIMEOUT = 1.0
DISCOVERY_TIMEOUT = 1.0
# Timeout after which to renew device subscriptions
SUBSCRIPTION_TIMEOUT = 60
# Packet constants.
MAGIC = b'\x68\x64'
DISCOVERY = b'\x00\x06\x71\x61'
DISCOVERY_RESP = b'\x00\x2a\x71\x61'
SUBSCRIBE = b'\x00\x1e\x63\x6c'
SUBSCRIBE_RESP = b'\x00\x18\x63\x6c'
CONTROL = b'\x00\x17\x64\x63'
CONTROL_RESP = b'\x00\x17\x73\x66'
PADDING_1 = b'\x20\x20\x20\x20\x20\x20'
PADDING_2 = b'\x00\x00\x00\x00'
ON = b'\x01'
OFF = b'\x00'
# Socket
_SOCKET = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Buffer
_BUFFER = {}
def _listen():
""" Listen on socket. """
while True:
data, addr = _SOCKET.recvfrom(1024)
_BUFFER[addr[0]] = data
def _setup():
""" Set up module.
Open a UDP socket, and listen in a thread.
"""
_SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
_SOCKET.bind(('', PORT))
udp = threading.Thread(target=_listen)
udp.daemon = True
udp.start()
def _device_time(tab):
ts = struct.unpack('<L', tab)[0] - 2208988800
return ts
def discover(timeout=DISCOVERY_TIMEOUT):
""" Discover devices on the local network.
:param timeout: Optional timeout in seconds.
:returns: Set of discovered host addresses.
"""
hosts = {}
payload = MAGIC + DISCOVERY
for _ in range(RETRIES):
_SOCKET.sendto(bytearray(payload), ('255.255.255.255', PORT))
start = time.time()
while time.time() < start + timeout:
for host, data in _BUFFER.copy().items():
if not _is_discovery_response(data):
continue
if host not in hosts:
_LOGGER.debug("Discovered device at %s", host)
entry = {}
entry['mac'] = data[7:13]
entry['imac'] = data[19:25]
entry['next'] = 0
entry['st'] = int(binascii.hexlify(data[-1]))
entry['time'] = _device_time(data[37:41])
entry['serverTime'] = int(time.time())
hosts[host] = entry
return hosts
def _is_discovery_response(data):
""" Is this a discovery response?
:param data: Payload.
"""
return data[0:6] == (MAGIC + DISCOVERY_RESP)
def _is_subscribe_response(data):
""" Is this a subscribe response?
:param data: Payload.
"""
return data[0:6] == (MAGIC + SUBSCRIBE_RESP)
def _is_control_response(data):
""" Is this a control response?
:param data: Payload.
"""
return data[0:6] == (MAGIC + CONTROL_RESP)
class S20Exception(Exception):
""" S20 exception. """
pass
class S20(object):
""" Controls an Orbivo S20 WiFi Smart Socket.
http://www.orvibo.com/en_products_view.asp?mid=15&pid=4&id=234
Protocol documentation: http://pastebin.com/LfUhsbcS
"""
def __init__(self, host, mac = None):
""" Initialize S20 object.
:param host: IP or hostname of device.
"""
self.host = host
if not mac:
(self._mac, self._mac_reversed) = self._discover_mac()
else:
if type(mac) is str:
self._mac = binascii.a2b_hex(''.join(mac.split(':')))
else:
self._mac = mac
ba = bytearray(self._mac)
ba.reverse()
self._mac_reversed = ba
self._subscribe()
@property
def on(self):
""" State property.
:returns: State of device (on/off).
"""
return self._subscribe()
@on.setter
def on(self, state):
""" Change device state.
:param state: True (on) or False (off).
"""
if state:
self._turn_on()
else:
self._turn_off()
def _discover_mac(self):
""" Discovers MAC address of device.
Discovery is done by sending a UDP broadcast.
All configured devices reply. The response contains
the MAC address in both needed formats.
Discovery of multiple switches must be done synchronously.
:returns: Tuple of MAC address and reversed MAC address.
"""
mac = None
mac_reversed = None
cmd = MAGIC + DISCOVERY
resp = self._udp_transact(cmd, self._discovery_resp,
broadcast=True,
timeout=DISCOVERY_TIMEOUT)
if resp:
(mac, mac_reversed) = resp
if mac is None:
raise S20Exception("Couldn't discover {}".format(self.host))
return (mac, mac_reversed)
def _subscribe(self):
""" Subscribe to the device.
A subscription serves two purposes:
- Returns state (on/off).
- Enables state changes on the device
for a short period of time.
"""
cmd = MAGIC + SUBSCRIBE + self._mac \
+ PADDING_1 + self._mac_reversed + PADDING_1
status = self._udp_transact(cmd, self._subscribe_resp,
broadcast=False, timeout=TIMEOUT)
if status is not None:
self.last_subscribed = time.time()
return status == ON
else:
raise S20Exception(
"No status could be found for {}".format(self.host))
def _subscription_is_recent(self):
""" Check if subscription occurred recently.
:returns: Yes (True) or no (False)
"""
return self.last_subscribed > time.time() - SUBSCRIPTION_TIMEOUT
def _control(self, state):
""" Control device state.
Possible states are ON or OFF.
:param state: Switch to this state.
"""
# Renew subscription if necessary
if not self._subscription_is_recent():
self._subscribe()
cmd = MAGIC + CONTROL + self._mac + PADDING_1 + PADDING_2 + state
_LOGGER.debug("Sending new state to %s: %s", self.host, ord(state))
ack_state = self._udp_transact(cmd, self._control_resp,
False, TIMEOUT,
state)
if ack_state is None:
raise S20Exception(
"Device didn't acknowledge control request: {}".format(
self.host))
def _discovery_resp(self, data):
""" Handle a discovery response.
:param data: Payload.
:param addr: Address tuple.
:returns: MAC and reversed MAC.
"""
if _is_discovery_response(data):
_LOGGER.debug("Discovered MAC of %s: %s", self.host,
binascii.hexlify(data[7:13]).decode())
return (data[7:13], data[19:25])
def _subscribe_resp(self, data):
""" Handle a subscribe response.
:param data: Payload.
:returns: State (ON/OFF)
"""
if _is_subscribe_response(data):
status = data[23]
_LOGGER.debug("Successfully subscribed to %s, state: %s",
self.host, ord(status))
return status
def _control_resp(self, data, state):
""" Handle a control response.
:param data: Payload.
:param state: Requested state.
:returns: Acknowledged state.
"""
if _is_control_response(data):
ack_state = data[22]
if state == ack_state:
_LOGGER.debug("Received state ack from %s, state: %s",
self.host, ord(ack_state))
return ack_state
def _udp_transact(self, payload, handler,
broadcast, timeout, *args):
""" Complete a UDP transaction.
UDP is stateless and not guaranteed, so we have to
take some mitigation steps:
- Send payload multiple times.
- Wait for a while to receive response.
:param payload: Payload to send.
:param handler: Response handler.
:param args: Arguments to pass to response handler.
:param broadcast: Send a broadcast instead.
:param timeout: Timeout in seconds.
"""
if self.host in _BUFFER:
del _BUFFER[self.host]
host = self.host
if broadcast:
host = '255.255.255.255'
retval = None
for _ in range(RETRIES):
_SOCKET.sendto(bytearray(payload), (host, PORT))
start = time.time()
while time.time() < start + timeout:
data = _BUFFER.get(self.host, None)
if data:
retval = handler(data, *args)
# Return as soon as a response is received
if retval is not None:
return retval
def _turn_on(self):
""" Turn on the device. """
self._control(ON)
def _turn_off(self):
""" Turn off the device. """
self._control(OFF)
_setup()
|
zlp_connections.py
|
# Copyright (c) 2020, FADA-CATEC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper module for python thrift interface to ZLP Service.
This module contains utility classes and methods which ease the usage of the thrift interface to
communicate with the ZLP Service."""
import os
import sys
import time
import socket
import copy
import threading
import thriftpy
from thriftpy.protocol import TBinaryProtocolFactory
from thriftpy.server import TThreadedServer, TSimpleServer
from thriftpy.thrift import TProcessor, TClient
from thriftpy.transport import TBufferedTransportFactory, TServerSocket, TSocket
class EventChannelInterfaceHandler(object):
"""This class implement the functions of ClientEventChannel thrift interface.
Attributes:
property_changed_callback (object): callback to handle settings changes on laser projector system
geo_tree_changed_callback (object): callback to handle changes on geotree operator
service_state_changed_callback (object): callback to handle changes on services state
function_module_changed_callback (object): callback to handle changes on function module state
rc_command_received_callback (object): callback to handle remote control commands reception
on_reflection_state_changed_callback (object): callback to handle changes on reflection state
"""
def __init__(self):
"""Initialize the EventChannelInterfaceHandler object."""
self.property_changed_callback = lambda x, y: None
self.geo_tree_changed_callback = lambda x, y: None
self.service_state_changed_callback = lambda x, y: None
self.function_module_changed_callback = lambda x, y, z: None
self.rc_command_received_callback = lambda a, b, c, d, e: None
self.on_reflection_state_changed_callback = lambda a, b: None
def PropertyChanged(self, name, value):
"""Set callback function to handle settings changes on laser projector system.
Args:
name (str): full path of property that was changed
value (int): value of property
"""
self.property_changed_callback(name, value)
def GeoTreeChanged(self, changed_flags, element_names):
"""Set callback function to handle changes on geotree operator.
Args:
changed_flags (int): integer value with flags of type GeoTreeChangedFlags
element_names (enum): identification of changed element (within the GeoTreeElemId enumeration )
"""
self.geo_tree_changed_callback(changed_flags, element_names)
def ServiceStateChanged(self, oldState, newState):
"""Set callback function to handle changes on services state.
Args:
oldState (enum): old state (within the ServiceStates enumeration) before change
newState (enum): new state (within the ServiceStates enumeration) after change
"""
self.service_state_changed_callback(oldState, newState)
def FunctionModuleStateChanged(self, functionModID, oldState, newState):
"""Set callback function to handle changes on function module state.
Args:
functionModID (str): identificator name of function module
oldState (enum): old state (within the FunctionModuleStates enumeration) before change
newState (enum): new state (within the FunctionModuleStates enumeration) after change
"""
self.function_module_changed_callback(functionModID, oldState, newState)
def RemoteControlFrameReceived(self, rc_id, command, toggle, projector, timestamp):
"""Set callback function to handle remote control commands reception.
Args:
rc_id (str): address of RC-device
command (enum): enum with command codes for remotecontrol functions
toggle (bool): toggle function active
projector (str): serial number of the projector
timestamp (int): timestamp
"""
self.rc_command_received_callback(rc_id, command, toggle, projector, timestamp)
def onReflectionStateChanged(self, elementName, state):
"""Set callback function to handle changes on reflection state.
Args:
elementName (str): name of the element that changed state
state (bool): true if a reflection was detected; False otherwise
"""
self.on_reflection_state_changed_callback(elementName, state)
class ThriftClient(TClient):
"""This class implement the functions to carry out the connection with the ZLP Service.
Args:
event_handler (object): object with functions of ClientEventChannel thrift interface
Attributes:
thrift_interface (obj): load the interface description file (interface.thrift) for the communication between
ZLP-Service and a remote client
"""
def __init__(self, event_handler=EventChannelInterfaceHandler()):
"""Initialize the ThriftClient object."""
self._event_channel = None
self._event_channel_handler = event_handler
_interface_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "interface.thrift")
self.thrift_interface = thriftpy.load(_interface_file, module_name="zlaser_thrift")
def init_client(self, ip, port):
"""Establish a connection to thrift server of ZLP Service. Init client opening sockets and init events handler.
Args:
ip (str): ipv6 network address of ZLP-Service
port (str): port number on which ZLP-Service listens for requests
"""
client_socket = TSocket(ip, port, socket_family=socket.AF_INET, socket_timeout=50000)
transport = TBufferedTransportFactory().get_transport(client_socket)
protocol = TBinaryProtocolFactory().get_protocol(transport)
transport.open()
super().__init__(self.thrift_interface.ServiceInterface, protocol)
def init_event_channel(self):
"""Create a thrift server and register it at ZLP Service to receive events."""
if self._event_channel_handler and not self._event_channel:
processor = TProcessor(self.thrift_interface.ClientEventChannel, self._event_channel_handler)
server_socket = TServerSocket(host="0.0.0.0", port=0, socket_family=socket.AF_INET, client_timeout=200000)
server_socket.client_timeout = 1000*60*10
self._event_channel = TSimpleServer(processor, server_socket)
t = threading.Thread(target=self._event_channel.serve, daemon=True)
t.start()
time.sleep(1)
connection = self._event_channel.trans.sock.getsockname()
self.ConnectClientEventChannel(connection[1])
def set_property_changed_callback(self, callback):
"""Set callback function related with laser projector settings changes.
Args:
callback (object): callback function to set
Raises:
ValueError
"""
if self._event_channel_handler:
self._event_channel_handler.property_changed_callback = callback
else:
raise ValueError("Error: Can't install callback, because event_handler = none!")
def set_geotree_changed_callback(self, callback):
"""Set callback function related with geotree operator changes.
Args:
callback (object): callback function to set
Raises:
ValueError
"""
if self._event_channel_handler:
self._event_channel_handler.geo_tree_changed_callback = callback
else:
raise ValueError("Error: Can't install callback, because event_handler = none!")
def set_function_module_state_changed_callback(self, callback):
"""Set callback function related with function module state changes.
Args:
callback (object): callback function to set
Raises:
ValueError
"""
if self._event_channel_handler:
self._event_channel_handler.function_module_changed_callback = callback
else:
raise ValueError("Error: Can't install callback, because event_handler = none!")
def set_rc_command_received_callback(self, callback):
"""Set callback function related with remote control commands reception.
Args:
callback (object): callback function to set
Raises:
ValueError
"""
if self._event_channel_handler:
self._event_channel_handler.rc_command_received_callback = callback
else:
raise ValueError("Error: Can't install callback, because event_handler = none!")
def set_reflection_state_changed_callback(self, callback):
"""Set callback function related with reflection state changes.
Args:
callback (object): callback function to set
Raises:
ValueError
"""
if self._event_channel_handler:
self._event_channel_handler.on_reflection_state_changed_callback = callback
else:
raise ValueError("Error: Can't install callback, because event_handler = none!")
class ProjectorClient(object):
"""This class implements the functions for connecting to the projector and basic projection features.
Attributes:
projector_id (str): serial number of the projector
module_id (str): function module identification name
"""
def __init__(self):
"""Initialize the ProjectorClient object."""
self.projector_id = ""
self.module_id = ""
self.__thrift_client = ThriftClient()
self.cv = threading.Condition()
def get_thrift_client(self):
"""Return the object generated to communicate with the projector.
Returns:
object: thrift client object generated to communicate with the projector
"""
try:
return self.__thrift_client
except Exception as e:
return e
def connect(self,server_IP,connection_port):
"""Create and connect the client to thrift server (located at projector) of ZLP-Service and establish an event channel if
needed.
Args:
server_IP (str): ipv6 network address of ZLP-Service
connection_port (str): port number on which ZLP-Service listens for requests
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple is
an information message string
"""
try:
if not self.__thrift_client._event_channel:
self.__thrift_client.init_client(server_IP, connection_port)
self.__thrift_client.init_event_channel()
success = True
message = "Client connected"
else:
success = False
message = "Projector already connected"
except Exception as e:
success = False
message = e
return success,message
def disconnect(self):
"""Disconnect from ZLP Service thrift server and close own event server.
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple is
an information message string
"""
try:
self.__thrift_client.RemoveGeoTreeElem("")
self.__thrift_client.FunctionModuleRelease(self.module_id)
self.__thrift_client.DisconnectClientEventChannel()
self.__thrift_client.close()
if self.__thrift_client._event_channel:
self.__thrift_client._event_channel.close()
self.__thrift_client._event_channel = None
success = True
message = "Projector disconnected"
except Exception as e:
success = False
message = e
return success,message
def connection_status(self):
"""Get status of projection connection.
Returns:
bool: status of the event channel object. Projector connected if true, disconnected otherwise
"""
return self.__thrift_client._event_channel
def transfer_license(self, lic_path):
"""Transfer data of the local license file to remote file at ZLP-Service.
Args:
lic_path (str): license file path
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple is
an information message string
"""
try:
license_path = os.path.abspath(lic_path)
license_file = os.path.basename(license_path)
content = open(license_path, 'r').read()
self.__thrift_client.TransferDataToFile(content, license_file, True)
self.__thrift_client.LoadLicense(license_file)
success = True
message = "License transfered."
except self.__thrift_client.thrift_interface.CantWriteFile as e:
success = False
message = e
except FileNotFoundError as e:
success = False
message = e
except Exception as e:
success = False
message = e
return success,message
def check_license(self):
"""Check if license is valid.
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple is
an information message string
"""
try:
success = self.__thrift_client.CheckLicense()
if success:
message = "License is valid"
else:
message = "License is not valid"
except Exception as e:
success = False
message = e
return success,message
def scan_projectors(self, scan_addresses=""):
"""Scan the network for projectors. Get a list of active projectors.
Args:
scan_addresses (str): addresses or address to scan
Returns:
tuple[list, bool, str]: the first value in the returned tuple is a list of serial numbers of the projectors found,
the second a bool success value and the third value in the tuple is an information message string
"""
try:
self.__thrift_client.SetProperty("config.projectorManager.cmdGetProjectors.scan", "1")
self.__thrift_client.SetProperty("config.projectorManager.cmdGetProjectors.scanAddresses", scan_addresses)
self.__thrift_client.SetProperty("config.projectorManager.cmdGetProjectors", "1")
serial_list = self.__thrift_client.GetProperty("config.projectorManager.cmdGetProjectors.result.entries")
self.__thrift_client.SetProperty("config.projectorManager.cmdGetProjectors.scan", "0")
if serial_list:
serial_list = serial_list.split(" ")
success = True
message = ""
else:
serial_list = []
success = False
message = "No projectors found"
except Exception as e:
serial_list = []
success = False
message = e
return serial_list,success,message
def property_changed_callback(self, prop, value):
"""Callback function related with laser projector settings changes.
Args:
prop (str): full path of property that was changed
value (int): value of property
"""
self.cv.acquire()
self.cv.notify()
self.cv.release()
def activate_projector(self,projector_IP):
"""Set properties to activate a projector.
Args:
projector_IP (str): address of the projector to scan
Returns:
tuple[str, bool, str]: the first value in the returned tuple is the serial number string of the activated projector,
the second a bool success value and the third value in the tuple is an information message string
"""
try:
projectors, success, message = self.scan_projectors(projector_IP)
if success:
self.__thrift_client.set_property_changed_callback(self.property_changed_callback)
self.__thrift_client.RegisterForChangedProperty("config.licenseState.IsValid")
self.cv.acquire()
self.projector_id = projectors[0]
self.__thrift_client.SetProperty("config.projectorManager.cmdActivateProjector.serial", self.projector_id)
self.__thrift_client.SetProperty("config.projectorManager.cmdActivateProjector.active", "1")
self.__thrift_client.SetProperty("config.projectorManager.cmdActivateProjector", "1")
self.cv.wait()
self.cv.release()
message = "Projector activated"
except Exception as e:
success = False
message = e
return self.projector_id,success,message
def deactivate_projector(self):
"""Set properties to deactivate a projector.
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple is
an information message string
"""
try:
projector_property_path = "config.projectorManager.projectors." + self.projector_id
self.__thrift_client.SetProperty(projector_property_path + ".cmdShowProjection.show", "0")
self.__thrift_client.SetProperty(projector_property_path + ".cmdShowProjection", "1")
self.__thrift_client.SetProperty("config.projectorManager.cmdActivateProjector.serial", self.projector_id)
self.__thrift_client.SetProperty("config.projectorManager.cmdActivateProjector.active", "0")
self.__thrift_client.SetProperty("config.projectorManager.cmdActivateProjector", "1")
success = True
message = "Projector deactivated:" + self.projector_id
except Exception as e:
success = False
message = e
return success,message
def function_module_create(self):
"""Create function module to operate with GeoTreeElements (coordinate systems and projection elements).
Returns:
tuple[str, bool, str]: the first value in the returned tuple is the function module identification name string,
the second is a bool success value and the third value in the tuple is an information message string
"""
try:
self.module_id = self.__thrift_client.FunctionModuleCreate("zFunctModRegister3d", "3DReg")
success = True
message = "Function module created"
except Exception as e:
success = False
message = e
return self.module_id,success,message
def start_project(self, cs_name):
"""Start projection on the surface of all projection elements that belong to the active coordinate system.
Args:
cs_name (str): name of the active coordinate system
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple is
an information message string
"""
try:
if not cs_name:
success = False
message = "None Coordinate System set"
if not self.__thrift_client.GetGeoTreeElement(cs_name).activated:
success = False
message = "Coordinate_system is not activated"
if self.is_empty(cs_name):
success = False
message = "Nothing to project"
else:
self.__thrift_client.TriggerProjection()
success = True
message = "Projecting elements from [" + cs_name + "] coordinate system."
except Exception as e:
success = False
message = e
return success,message
def stop_project(self):
"""Stop projection of all elements.
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple is
an information message string
"""
try:
projector_property_path = "config.projectorManager.projectors." + self.projector_id
self.__thrift_client.SetProperty(projector_property_path + ".cmdShowProjection.show", "0")
self.__thrift_client.SetProperty(projector_property_path + ".cmdShowProjection", "1")
success = True
message = "Projection stopped"
except Exception as e:
success = False
message = e
return success,message
def update_project(self,cs_name):
"""Update changes on figures projected (restart projection).
Args:
cs_name (str): name of the coordinate system to update
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple is
an information message string
"""
try:
self.stop_project()
self.start_project(cs_name)
success = True
message = "Projection updated."
except Exception as e:
success = False
message = e
return success,message
def is_empty(self, cs_name):
"""Check if coordinate system has associated projection elements.
Args:
cs_name (str): name of the coordinate system to check
Returns:
bool: true if there is any projection element defined at the coordinate system, false otherwise
"""
is_empty = False
try:
geo_tree_list = self.__thrift_client.GetGeoTreeIds()
# elemType = 1024, 1025, 1026, 1027 refers to projection element identificator at the projector device
matches = [elem.name for elem in geo_tree_list if elem.elemType in (1024,1025,1026,1027)]
proj_elems = [self.__thrift_client.GetProjectionElement(name) for name in matches]
for proj_elem in proj_elems:
if proj_elem.activated == True and proj_elem.coordinateSystemList[0] == cs_name:
proj_elems_actives = proj_elem
if not proj_elems_actives:
is_empty = True
except Exception:
is_empty = True
return is_empty
def on_reflection_change(self, name, reflection):
"""Default callback for reflection state change, events handler. It is used for running code when
pointer is reflected.
Args:
name (str): name of the pointer that changed state
reflection (bool): true if a reflection was detected; False otherwise
"""
self.stop_project()
def scan_pointer(self, reflection_callback=None):
"""Set callback for reflection state change.
Args:
reflection_callback (object): callback function
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple is
an information message string
"""
try:
if reflection_callback is None:
# use default
reflection_callback = self.on_reflection_change
self.__thrift_client.set_reflection_state_changed_callback(reflection_callback)
success = True
message = "Reflection callback set."
except Exception as e:
success = False
message = e
return success,message
|
amazon.py
|
"""
femagtools.amazon
~~~~~~~~~~~~~~~~~
Running FEMAG on Amazon Cloud EC2
.. note: To use this engine you have to install the boto3 module from amazon
"""
import os
import threading
import time
import logging
import femagtools.job
from .config import Config
logger = logging.getLogger(__name__)
class MissingConfigurationException(Exception):
def __init__(self, message):
Exception.__init__(self, "Missing configuration: {}".format(message))
class Engine(object):
config_class = Config
default_config = {
'ENGINE': 'amazon',
'SERVER_LOCATION': 'eu-central-1',
'INSTANCE_TYPE': 't2.micro',
'ACL': 'authenticated-read',
'IMAGE_ID': 'ami-b0cc23df',
'FINISH_TASK_FILENAME': 'exit_code',
'COMPANY_NAME': 'femag'
}
"""The Amazon Engine
This engine uses the boto3 Python module to interact
with the amazon ec2 and s3 services
Args:
buckets (:obj:`list`): Existing buckets with femag calculation files
configfile (str): Filename of config file
.. :note: If possible you should use the same location for all services
"""
def __init__(self, buckets=None, configfile='config.ini'):
self.buckets = buckets
self.job = None
# Amazon file storage
self.s3_resource = self._create_amazon_resource('s3')
# Amazon Server administration
self.ec2_resource = self._create_amazon_resource('ec2')
# Create instance of config
self.config = Config(self.default_config)
self.config.from_ini_file(configfile)
def _create_amazon_resource(self, resource):
import boto3
return boto3.resource(resource)
def _create_data_buckets(self):
"""Create unique S3 Buckets for calculation
Args:
ACL (str): ACL-Rules for Amazon
"""
# If buckets exsists map them with a folder
if self.buckets:
for idx, bucket in enumerate(self.buckets):
self.job.tasks[idx].id = bucket['id']
self.job.tasks[idx].directory = bucket['folder']
return
bucketConfiguration = {'LocationConstraint': self.config['SERVER_LOCATION']}
# Create a bucket for every calculation
for t in self.job.tasks:
self.s3_resource.create_bucket(ACL=self.config['ACL'],
Bucket=t.id,
CreateBucketConfiguration=bucketConfiguration)
logger.debug("Created buckets")
def _upload_files_to_s3(self):
"""Upload all files to Amazon S3 for this calculation
"""
if self.buckets:
logger.info("Files are already uploaded")
return
threads = []
for t in self.job.tasks:
thread = threading.Thread(target=self._upload, args=(t, ))
threads.append(thread)
thread.start()
logger.info("Uploading files: ")
self._wait_for_threads_finished(threads, "Upload files")
def _upload(self, task):
"""Upload thread for uploading one directory
:internal:
Args:
task (py:class:`CloudTask`): The task which belongs to the uploading folder
"""
# Upload one single tar_file
task.tar_file.close()
name = os.path.basename(task.file)
Body = open(task.file, 'rb')
self.s3_resource.Object(task.id, name).put(Body=Body)
def _wait_for_threads_finished(self, threads, operation):
"""Wait until all threads are finished
:internal:
Args:
threads (:obj:`list`): List of threads to check if they are finished
operation (str): Name of the operation to write a meaningful log message
"""
# Wait until all threads are not running
while not all([t.isAlive() is False for t in threads]):
time.sleep(5)
# timer.cancel
logger.info("{} is finished".format(operation))
def _start_instances(self):
"""Start all instances for the calculation
"""
# Prepare arguemtns for instance start
param = {'MinCount': 1, 'MaxCount': 1 }
if self.config.get('IMAGE_ID', None):
param['ImageId'] = self.config['IMAGE_ID']
else:
raise MissingConfigurationException('image_id')
if self.config.get('INSTANCE_TYPE', None):
param['InstanceType'] = self.config['INSTANCE_TYPE']
else:
raise MissingConfigurationException('instance_type')
if self.config.get('IAM_INSTANCE_PROFILE', None):
param['IamInstanceProfile'] = {'Name': self.config['IAM_INSTANCE_PROFILE'] }
if self.config.get('KEY_NAME', None):
param['KeyName'] = self.config['KEY_NAME']
# Set security group id as list
if self.config['SECURITY_GROUP_IDS']:
param['SecurityGroupIds'] = []
for security_group in [s for s in self.config['SECURITY_GROUP_IDS'] if s]:
param['SecurityGroupIds'].append(security_group)
param['DryRun'] = self.config.get('DRY_RUN', False)
threads = []
for idx, t in enumerate(self.job.tasks):
thread = threading.Thread(target=self._start_instance, args=(param, t))
threads.append(thread)
thread.start()
self._wait_for_threads_finished(threads, "Start instances")
def _start_instance(self, param, task):
"""Start one instance
:internal:
Args:
task (Task): the task for calculation
"""
user_data = self._read_cloud_init(task.id)
if user_data:
param['UserData'] = user_data
instances = self.ec2_resource.create_instances(**param)
instance = instances[0] # We only started one instance
logger.info("Instance started: {}".format(instance.id))
self._add_tag(task.id, instance.id)
instance.wait_until_running()
instance.load() # Reload the data to get public dns etc.
logger.info("Instance {} is running: Public dns: {}".format(instance.id, instance.public_dns_name))
task.ec2_instance = instance.id
def _add_tag(self, task_id, instance_id):
"""Add a tag to the instance
:internal:
Args:
task_id (int): The task id (Same as the S3 Bucket name)
instance_id (int): The instance_id to set the tag to the right instance
"""
tag = '{}-{}'.format(task_id, self.config.get('COMPANY_NAME', 'femag'))
self.ec2_resource.create_tags(Resources=[instance_id], Tags=[{'Key': 'Name', 'Value': tag}])
def _read_cloud_init(self, bucket_name):
"""Read the cloud init file and if there is a line which starts with {{ENV}}
then put all config options as environment variables.
"""
user_data = ""
# Set all config options as environment variable
if os.path.isfile(self.config.get('CLOUD_INIT', None)):
with open(self.config['CLOUD_INIT'], 'rt') as f:
for line in f:
if line.startswith('{{ENV}}'):
# Add config
for key, value in sorted(self.config.items()):
user_data += "export {}={}\n".format(key, value)
# add other important stuff
user_data += "export BUCKET_NAME={}\n".format(bucket_name)
continue
user_data += line
return user_data
def _join(self, timeout=20, filename='exit_code'):
"""Wait until all instances are finished with the calulation.
:internal:
Args:
timeout (int): How long we wait between a check
filename (str): What is the filename of the exit_code
"""
import botocore # For exception
finished_tasks = []
client = self.s3_resource.meta.client
while len(finished_tasks) < len(self.job.tasks):
for t in [task for task in self.job.tasks if task not in finished_tasks]:
try:
client.get_object(Bucket=t.id, Key=filename)
except botocore.exceptions.ClientError:
# Instance not ready
time.sleep(2)
continue
finished_tasks.append(t)
logger.info("Calculation is finished for instance {}".format(t.id))
self.ec2_resource.instances.filter(InstanceIds=[t.ec2_instance]).terminate()
time.sleep(timeout)
logger.info("Calculations are finished")
def _get_result_data_from_S3(self):
"""Get all the calculated files to the correct folder
"""
import boto3
client = self.s3_resource.meta.client
transfer = boto3.s3.transfer.S3Transfer(client)
for t in self.job.tasks:
bucket = t.id
folder = t.directory
files = client.list_objects(Bucket=bucket)['Contents']
logger.debug("Starting new folder")
for file in files:
file_name = file['Key']
transfer.download_file(bucket, file_name, os.path.join("{}/{}".format(folder, file_name)))
logger.debug("Downloaded file {}".format(file_name))
def _get_status_code(self, filename='exit_code'):
"""Get the status code from the caluclation
Args:
filename (str): Filename of exit_code
"""
status_code = []
for t in self.job.tasks:
dir = "{}/{}".format(t.directory, filename)
file = open(dir, 'r')
status_code.append(file.read())
return status_code
def _cleanup(self):
threads = []
for t in self.job.tasks:
thread = threading.Thread(target=self._delete_bucket, args=(t.id, ))
threads.append(thread)
thread.start()
logger.info("Deleting buckets: ")
self._wait_for_threads_finished(threads, "Deleting buckets")
# Clean up volumes
client = self.ec2_resource.meta.client
volumes = client.describe_volumes(Filters=[{'Name': 'status', 'Values': ['available']}])['Volumes']
for v in volumes:
client.delete_volume(VolumeId=v['VolumeId'])
def _delete_bucket(self, bucket_name):
bucket = self.s3_resource.Bucket(bucket_name)
for key in bucket.objects.all():
key.delete()
bucket.delete()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# FEMAG STUFF
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def create_job(self, workdir):
"""Create a FEMAG :py:class:`CloudJob`
Args:
workdir (str): The workdir where the calculation files are stored
Return:
Cloud job (:class:`CloudJob`)
"""
self.job = femagtools.job.CloudJob(workdir)
return self.job
def submit(self):
"""Starts the FEMAG calculation(s) on Amazon
Return:
length of started tasks (int)
"""
self._create_data_buckets()
self._upload_files_to_s3()
self._start_instances()
return len(self.job.tasks)
def join( self ):
"""Wait until all calculations are finished
Return:
list of all calculations status (C = Ok, X = error) (:obj:`list`)
"""
status = []
# Wait until all tasks are finished
self._join(timeout=20, filename=self.config['FINISH_TASK_FILENAME'])
# get all files
self._get_result_data_from_S3()
# Remove buckets if cleanup is set
if int(self.config.get('DELETE_BUCKETS', 0)):
self._cleanup()
status = self._get_status_code(filename=self.config['FINISH_TASK_FILENAME'])
for t, r in zip(self.job.tasks, status):
t.status = 'C' if int(r)==0 else 'X'
return status
|
runrun.py
|
import subprocess, time, os, sys, re, socket
from blessings import Terminal
bless_term = Terminal()
MAXLINES=100
def print_red(s, **kw):
print (bless_term.red(s), **kw)
def run(*args, **kw):
# print ("DBG", args, kw)
if 'showoutput' in kw:
showoutput = kw['showoutput']
# print("showoutput:", showoutput)
del kw['showoutput']
else:
showoutput = False
if 'timeout' in kw:
timeout = float(kw['timeout'])
if showoutput:
print("running", args[0], "with timeout:", timeout, end=' ')
del kw['timeout']
else:
timeout = 0
try:
if not timeout:
timeout = 10**10
# print ("args:", args)
proc = subprocess.Popen(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
t0 = time.time()
out = ""
complete = False
while time.time() < t0 + timeout:
line = proc.stdout.readline().decode('utf8')
# print ("DBG", type(out), type(line))
out += line
i = 0
while line != "":
if showoutput:
sys.stdout.write(line)
i += 1
if i >= MAXLINES:
break
line = proc.stdout.readline().decode('utf8')
out += line
if proc.poll() != None:
complete = True
#get all output
line = proc.stdout.readline().decode('utf8')
out += line
while line != "":
if showoutput:
sys.stdout.write(line)
sys.stdout.write(line)
line = proc.stdout.readline().decode('utf8')
out += line
sys.stdout.flush()
break
## sys.stdout.write(".")
## sys.stdout.flush()
time.sleep(0.2)
if not complete:
proc.kill()
except subprocess.CalledProcessError as e:
out = e.output
return out, complete
#
# run a git command, capture the output
#
def git(cmd, show=False, debug=False):
if debug:
print_red ("git %s" % cmd)
if hasattr(cmd, "lower"):
cmd = cmd.split()
out, good = run(["git"] + cmd, showoutput=show)
if not good:
err = "ERROR -- git command did not complete"
print (err, file=sys.stderr)
out += "\n\n" + err
return out, not good
def get_branch():
out, err = git("rev-parse --abbrev-ref HEAD")
return out.strip()
def get_repo():
out, err = git("remote -v")
return out.split()[1]
def get_author():
return git("log -1 --pretty=format:'%an'")[0].strip().replace("'", "")
def get_username():
return git("config --get user.name")[0].strip()
def git_status(show=False, debug=False):
out, err = git("status --porcelain", show=show, debug=debug)
changes=0
for row in out.split("\n"):
row = row.strip()
if not row:
continue
if row[:2] != "??":
changes += 1
return changes
import subprocess as sp
from threading import Thread
from queue import Queue, Empty
import time
def test_func(s):
if not s:
return ""
# print ("----------PARSE------------")
# print (s)
# print ("~~~~~~~~~~~~~~~~~")
N = 7
for L in s.split():
try:
N = int(L.strip())
except:
pass
# print ("RESULT:", N)
# print ("----------/PARSE------------")
return "BOOM " * N
def stdout_thread(o, q):
def getchar():
return o.read(1)
for c in iter(getchar, b''):
q.put(c)
o.close()
def get_sub_stdout(q):
r = b''
while True:
try:
c = q.get(False)
except Empty:
# print (" EMPTY")
break
else:
# print (" DATA")
r += c
return r
def escape_ansi(line):
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
# ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', line)
SLEEPYTIME = .1
SSH_FORCE_TIMEOUT = 30
class runner:
def __init__(self, cmd):
self.pobj = sp.Popen(cmd.split(), stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.STDOUT)
self.q = Queue()
self.t = Thread(target=stdout_thread, args=(self.pobj.stdout, self.q))
self.t.daemon = True
self.t.start()
self.in_dat = ''
self.t0 = time.time()
# Use advanced machine learning algorithms to ascertain if we have a prompt:
def has_prompt(self, s): # A proud moment in hell
# print ("SSS:", s)
if "\n" in s:
s = s.split("\n")[-1]
s = escape_ansi(s.strip())
i = s.find(self.prompt)
if i<0 or i>12:
# print ("FAIL")
return False
# print("PROMPT FOUND")
return True
#
# call interact with user input, returns next process text+prompt
#
def interact(self, cmd=None, expect=None):
if cmd != None: #typically None for first interaction to get prompt
#if '', still need to write to stdin to keep rolling ball
# print ("===%s==="%cmd)
self.pobj.stdin.write(bytes(cmd, 'utf-8'))
self.pobj.stdin.write(b'\n')
try:
self.pobj.stdin.flush()
except:
return ''
self.in_dat = cmd
if expect==None:
expect=[]
elif hasattr(expect, "lower"):
expect = [expect]
# print ("EXPECT:", expect)
o_new = get_sub_stdout(self.q).decode('utf8')
o_dat = o_new
while not o_new:
br = False
for ex in expect:
# print ("TEST:", ex, o_new, "||", ex in o_new, "|||")
if ex in o_new: #additional triggers to return such as Y/n prompts
br = True
break
if br:
break
o_new = get_sub_stdout(self.q).decode('utf8')
o_dat += o_new
time.sleep(SLEEPYTIME)
# print ("DBG A")
# remove echo:
# if o_dat.find(self.in_dat+"\r\n")==0:
# o_dat=o_dat[len(self.in_dat)+2:]
return o_dat, self.has_prompt(o_dat)
def first(self):
o_dat = ""
t0 = time.time()
while True:
# print (" FIRST:",o_dat)
if time.time()-t0 > SSH_FORCE_TIMEOUT:
return o_dat, True
o_dat += get_sub_stdout(self.q).decode('utf8')
spl = o_dat.rstrip().split("\n")
if len(spl) >= 2 and "last login" in spl[-2].lower():
break
if "timed out" in spl[-1]:
return o_dat, True
time.sleep(SLEEPYTIME)
# print (o_dat)
prompt = escape_ansi(spl[-1])
prompt.replace("\r", ']').strip()
i = prompt.find(':')
if i > 0:
# print ("III:", i)
prompt = prompt[0:i+1]
self.prompt = prompt
print ("PROMPT: >>>%s<<<" % prompt)
sys.stdout.flush()
return o_dat, False
def exit(self):
self.pobj.stdin.write(bytes('exit', 'utf-8'))
self.pobj.stdin.write(b'\n')
time.sleep(2)
o_new = get_sub_stdout(self.q).decode('utf8')
print (o_new)
sys.stdout.flush()
if __name__=="__main__":
cmd = "ssh -tt -4 localhost"
# cmd = "echoz foo"
print (cmd, end="\n\n")
run = runner(cmd)
o = run.first() #get initial startup spam + prompt
print (o)
run.interact("pwd")
run.exit()
print ("DONE")
# if __name__ == "__main__":
# print("test run.py")
# cmd = sys.argv[1:]
# s, err = run(cmd, timeout=10, showoutput=False)
# print("output----------\n", s)
# print("end output------")
# print("completed:", err)
|
microservice.py
|
import socket
import threading
from ..default import ArchNetClientHandler
from functools import update_wrapper
from multiprocessing import Process
def setupmethod(f):
def wrapper_func(self, *args, **kwargs):
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Microservice(object):
"""This class implements a Microservice. To implement the proper microservice, use the following nomenclature:
```python
class MyMicroservice(Microservice):
def __init__(self, *args, **kwargs):
super().__init__( *args, **kwargs)
```
"""
def __init__(self, port, hostname, microservicename, backlog = 100):
self.microservice_name = microservicename
self.port = port
self.hostname = hostname
self.backlog = backlog
self.tcpserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcpserver.bind((
self.hostname,
self.port
))
def start(self):
self.tcpserver.listen(self.backlog)
while True:
self.client(
self.tcpserver.accept()
)
def client(self, client_tupple):
client, client_add = client_tupple
Process(target=self.ClientThread, args=(client, client_add,)).start()
@setupmethod
def client_thread(self, f):
self.ClientThread = f
return f
|
debug_eval_mini_srcgame_add_map_bn.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
USED_DEVICES = "-1"
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES
import sys
import threading
import time
import tensorflow as tf
from absl import app
from absl import flags
from pysc2 import maps
from pysc2.lib import stopwatch
import lib.config as C
import param as P
import mini_source_agent_add_map_bn as mini_source_agent
from mini_network_add_map_bn import MiniNetwork
# from pysc2.env import sc2_env
from lib import my_sc2_env as sc2_env
from lib.replay_buffer import Buffer
from strategy.terran_agent import DummyTerran
from strategy_env import SimulatePlatform
import unit.protoss_unit as P
import unit.terran_unit as T
from datetime import datetime
import multiprocessing as mp
import numpy as np
from logging import warning as logging
FLAGS = flags.FLAGS
flags.DEFINE_bool("training", True, "Whether to train agents.")
flags.DEFINE_bool("on_server", True, "Whether is running on server.")
flags.DEFINE_bool("debug_mode", True, "Whether is debuging")
flags.DEFINE_integer("num_for_update", 100, "Number of episodes for each train.")
flags.DEFINE_string("log_path", "./logs/", "Path for log.")
flags.DEFINE_string("device", USED_DEVICES, "Device for training.")
# Simple64
flags.DEFINE_string("map", "Simple64", "Name of a map to use.")
flags.DEFINE_bool("render", False, "Whether to render with pygame.")
flags.DEFINE_integer("screen_resolution", 64, "Resolution for screen feature layers.")
flags.DEFINE_integer("minimap_resolution", 64, "Resolution for minimap feature layers.")
flags.DEFINE_enum("agent_race", "P", sc2_env.races.keys(), "Agent's race.")
flags.DEFINE_enum("bot_race", "T", sc2_env.races.keys(), "Bot's race.")
flags.DEFINE_enum("difficulty", "7", sc2_env.difficulties.keys(), "Bot's strength.")
flags.DEFINE_integer("max_agent_steps", 18000, "Total agent steps.")
flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.")
flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.")
flags.DEFINE_bool("trace", False, "Whether to trace the code execution.")
flags.DEFINE_bool("save_replay", False, "Whether to replays_save a replay at the end.")
flags.DEFINE_string("replay_dir", "multi-agent/", "dir of replay to replays_save.")
# 20200825-101942_mini
# 20200828-160609_source
flags.DEFINE_string("restore_model_path", "./model/20200901-213813_mini/", "path for restore model")
flags.DEFINE_bool("restore_model", True, "Whether to restore old model")
flags.DEFINE_string("restore_from", "mini", "mini (for Thought-Game) or source (for Real game)")
flags.DEFINE_string("restore_to", "source", "mini (for Thought-Game) or source (for Real game)")
flags.DEFINE_bool("load_latest", False, "Load latest or bestest model, default is False")
flags.DEFINE_integer("parallel", 10, "How many processes to run in parallel.")
flags.DEFINE_integer("thread_num", 5, "How many thread to run in the process.")
flags.DEFINE_integer("port_num", 7770, "the start port to create distribute tf")
flags.DEFINE_integer("max_iters", 100, "the rl agent max run iters")
flags.DEFINE_string("game_version", None, "game version of SC2")
flags.DEFINE_bool("freeze_head", False, "Whether freeze_head train agents.")
flags.DEFINE_bool("use_bn", False, "Whether use batch_norm to training.")
flags.DEFINE_bool("use_sep_net", False, "Whether use seperate network for policy and value model.")
flags.DEFINE_integer("ob_space_add", 4, "Add state space from thought game.")
flags.DEFINE_integer("act_space_add", 5, "Add action space from thought game.")
flags.DEFINE_bool("add_image", True, "Whether add image for input.")
flags.DEFINE_bool("image_debug", True, "Whether debug image for input.")
flags.DEFINE_string("weighted_sum_type", "AddWeight", "add weighted sum type, AddWeight, AdaptiveWeight, AttentionWeight")
FLAGS(sys.argv)
# set the play map
play_map = C.get_map_class('lib.config.' + FLAGS.map)
C.my_sub_pos = play_map.my_sub_pos
C.enemy_sub_pos = play_map.enemy_sub_pos
C.enemy_main_pos = play_map.enemy_main_pos
C.base_camera_pos = play_map.base_camera_pos
if not FLAGS.on_server or FLAGS.debug_mode:
PARALLEL = 1
THREAD_NUM = 1
MAX_AGENT_STEPS = 18000
DEVICE = ['/gpu:0']
NUM_FOR_UPDATE = 1
TRAIN_ITERS = 1
PORT_NUM = FLAGS.port_num
else:
PARALLEL = FLAGS.parallel
THREAD_NUM = FLAGS.thread_num
MAX_AGENT_STEPS = FLAGS.max_agent_steps
if USED_DEVICES == '-1':
DEVICE = ['/cpu:0']
else:
DEVICE = ['/gpu:' + str(dev) for dev in range(len(FLAGS.device.split(',')))]
NUM_FOR_UPDATE = FLAGS.num_for_update
TRAIN_ITERS = FLAGS.max_iters
PORT_NUM = FLAGS.port_num
LOG = FLAGS.log_path
if not os.path.exists(LOG):
os.makedirs(LOG)
SERVER_DICT = {"worker": [], "ps": []}
# define some global variable
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
Counter = 0
Waiting_Counter = 0
Update_Counter = 0
Result_List = []
'''
ps -ef |grep liuruoze | grep 'SC2_x64' | awk '{print $2}' | xargs kill -9
kill -9 `ps -ef |grep liuruoze | grep eval_mini_srcgame_add_map_bn | awk '{print $2}' `
'''
def run_thread(agent, game_num, Synchronizer, difficulty):
global UPDATE_EVENT, ROLLING_EVENT, Counter, Waiting_Counter, Update_Counter, Result_List
num = 0
all_num = 0
proc_name = mp.current_process().name
C._FPS = 22.4 / FLAGS.step_mul # 5.6
step_mul = FLAGS.step_mul # 4
C.difficulty = difficulty
with sc2_env.SC2Env(
map_name=FLAGS.map,
agent_race=FLAGS.agent_race,
bot_race=FLAGS.bot_race,
difficulty=difficulty,
step_mul=step_mul,
score_index=-1,
game_steps_per_episode=MAX_AGENT_STEPS,
screen_size_px=(FLAGS.screen_resolution, FLAGS.screen_resolution),
minimap_size_px=(FLAGS.minimap_resolution, FLAGS.minimap_resolution),
visualize=False,
game_version=FLAGS.game_version) as env:
# env = available_actions_printer.AvailableActionsPrinter(env)
agent.set_env(env)
while all_num != game_num * TRAIN_ITERS:
agent.play_right_add(verbose=FLAGS.debug_mode)
if FLAGS.training:
# check if the num of episodes is enough to update
num += 1
all_num += 1
reward = agent.result['reward']
Counter += 1
Result_List.append(reward)
logging("(diff: %d) %d epoch: %s get %d/%d episodes! return: %d!" %
(int(difficulty), Update_Counter, proc_name, len(Result_List), game_num * THREAD_NUM, reward))
# time for update
if num == game_num:
num = 0
ROLLING_EVENT.clear()
# worker stops rolling, wait for update
if agent.index != 0 and THREAD_NUM > 1:
Waiting_Counter += 1
if Waiting_Counter == THREAD_NUM - 1: # wait for all the workers stop
UPDATE_EVENT.set()
ROLLING_EVENT.wait()
# update!
else:
if THREAD_NUM > 1:
UPDATE_EVENT.wait()
Synchronizer.wait() # wait for other processes to update
agent.update_network(Result_List)
Result_List.clear()
agent.global_buffer.reset()
Synchronizer.wait()
Update_Counter += 1
# finish update
UPDATE_EVENT.clear()
Waiting_Counter = 0
ROLLING_EVENT.set()
if FLAGS.save_replay:
env.save_replay(FLAGS.replay_dir)
agent.reset()
def Worker(index, update_game_num, Synchronizer, cluster, model_path, log_path):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
worker = tf.train.Server(cluster, job_name="worker", task_index=index, config=config)
sess = tf.Session(target=worker.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=index, device=DEVICE[index % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path,
ob_space_add=FLAGS.ob_space_add, act_space_add=FLAGS.act_space_add,
freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn,
use_sep_net=FLAGS.use_sep_net, restore_model=FLAGS.restore_model,
restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to,
load_latest=FLAGS.load_latest, add_image=FLAGS.add_image)
global_buffer = Buffer()
agents = []
for i in range(THREAD_NUM):
agent = mini_source_agent.MiniSourceAgent(index=i, global_buffer=global_buffer, net=Net,
restore_model=FLAGS.restore_model, rl_training=FLAGS.training,
strategy_agent=None, ob_space_add=FLAGS.ob_space_add, image_debug=FLAGS.image_debug)
agents.append(agent)
print("Worker %d: waiting for cluster connection..." % index)
sess.run(tf.report_uninitialized_variables())
print("Worker %d: cluster ready!" % index)
while len(sess.run(tf.report_uninitialized_variables())):
print("Worker %d: waiting for variable initialization..." % index)
time.sleep(1)
print("Worker %d: variables initialized" % index)
game_num = np.ceil(update_game_num // THREAD_NUM)
UPDATE_EVENT.clear()
ROLLING_EVENT.set()
# Run threads
threads = []
for i in range(THREAD_NUM - 1):
t = threading.Thread(target=run_thread, args=(agents[i], game_num, Synchronizer, FLAGS.difficulty))
threads.append(t)
t.daemon = True
t.start()
time.sleep(3)
run_thread(agents[-1], game_num, Synchronizer, FLAGS.difficulty)
for t in threads:
t.join()
def Parameter_Server(Synchronizer, cluster, log_path, model_path, procs):
config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False,
)
config.gpu_options.allow_growth = True
server = tf.train.Server(cluster, job_name="ps", task_index=0, config=config)
sess = tf.Session(target=server.target, config=config)
summary_writer = tf.summary.FileWriter(log_path)
Net = MiniNetwork(sess=sess, summary_writer=summary_writer, rl_training=FLAGS.training,
cluster=cluster, index=0, device=DEVICE[0 % len(DEVICE)],
ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path,
ob_space_add=FLAGS.ob_space_add, act_space_add=FLAGS.act_space_add,
freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn,
use_sep_net=FLAGS.use_sep_net,
restore_model=FLAGS.restore_model,
restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to,
load_latest=FLAGS.load_latest, add_image=FLAGS.add_image)
agent = mini_source_agent.MiniSourceAgent(index=-1, net=Net, restore_model=FLAGS.restore_model, rl_training=FLAGS.training,
ob_space_add=FLAGS.ob_space_add, image_debug=FLAGS.image_debug)
print("Parameter server: waiting for cluster connection...")
sess.run(tf.report_uninitialized_variables())
print("Parameter server: cluster ready!")
print("Parameter server: initializing variables...")
agent.init_network()
print("Parameter server: variables initialized")
update_counter = 0
max_win_rate = 0.
latest_win_rate = 0.
while update_counter < TRAIN_ITERS:
agent.reset_old_network()
# wait for update
Synchronizer.wait()
logging("Update Network!")
# TODO count the time , compare cpu and gpu
time.sleep(1)
# update finish
Synchronizer.wait()
logging("Update Network finished!")
steps, win_rate = agent.update_summary(update_counter)
logging("Steps: %d, win rate: %f" % (steps, win_rate))
update_counter += 1
if win_rate >= max_win_rate:
agent.save_model()
max_win_rate = win_rate
latest_win_rate = win_rate
agent.net.save_latest_policy()
return max_win_rate, latest_win_rate
def _main(unused_argv):
# create distribute tf cluster
start_port = PORT_NUM
SERVER_DICT["ps"].append("localhost:%d" % start_port)
for i in range(PARALLEL):
SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i))
Cluster = tf.train.ClusterSpec(SERVER_DICT)
now = datetime.now()
model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
if not os.path.exists(model_path):
os.makedirs(model_path)
log_path = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_source/"
UPDATE_GAME_NUM = NUM_FOR_UPDATE
per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL)
Synchronizer = mp.Barrier(PARALLEL + 1)
# Run parallel process
procs = []
for index in range(PARALLEL):
p = mp.Process(name="Worker_%d" % index, target=Worker, args=(index, per_update_num, Synchronizer, Cluster, model_path, log_path))
procs.append(p)
p.daemon = True
p.start()
time.sleep(1)
max_win_rate, latest_win_rate = Parameter_Server(Synchronizer, Cluster, log_path, model_path, procs)
print('#######################')
print('Best Win_rate:', max_win_rate)
print('Latest Win_rate:', latest_win_rate)
print('#######################')
for p in procs:
p.join()
'''
if FLAGS.profile:
print(stopwatch.sw)
'''
if __name__ == "__main__":
app.run(_main)
|
acc_threshold_detector.py
|
#***************************************************************************************************
# Imports
#***************************************************************************************************
# Global imports
import os
import threading
import math
import time
import traceback
# Project imports
import mooving_iot.utils.logger as logger
import mooving_iot.project_config as prj_cfg
import mooving_iot.utils.exit as utils_exit
import mooving_iot.drivers.acc.acc as drv_acc
#***************************************************************************************************
# Module logger
#***************************************************************************************************
_log = logger.Logger(os.path.basename(__file__)[0:-3], prj_cfg.LogLevel.INFO)
#***************************************************************************************************
# Public classes
#***************************************************************************************************
class AccAngles:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __str__(self):
return 'X: {0:.2f}, Y: {0:.2f}, Z: {0:.2f}.'.format(self.x, self.y, self.z)
class AccThresholdDetector:
DEFAULT_ACC_ANGLE_X = -50
DEFAULT_ACC_ANGLE_Y = 0
DEFAULT_ACC_ANGLE_Z = 40
def __init__(self, acc_driver : drv_acc.Acc):
self._acc_driver = acc_driver
self._last_acc_data = drv_acc.AccData(0, 0, 0)
self._acc_angles = AccAngles(0, 0, 0)
self._acc_out_of_thr_peak_count = 0
self._acc_out_of_thr_start_time_ms = 0
self._is_acc_out_of_thr = False
self._angle_out_of_thr_start_time_ms = None
self._angle_out_of_thr_stop_time_ms = None
self._acc_total_duration_ms = None
self._acc_peak_count = None
self._angle_threshold_degree = None
self._angle_total_duration_ms = None
self._data_lock = threading.Lock()
self._update_thread = threading.Thread(target=self._update)
self._update_thread.start()
def set_acc_threshold(self, threshold_mg, peak_duration_ms, total_duration_ms, peak_count):
with self._data_lock:
self._acc_driver.set_acc_threshold(threshold_mg, peak_duration_ms)
self._acc_total_duration_ms = total_duration_ms
self._acc_peak_count = peak_count
def set_angles_threshold(self, threshold_degree, total_duration_ms):
with self._data_lock:
self._angle_threshold_degree = threshold_degree
self._angle_total_duration_ms = total_duration_ms
def get_angles(self) -> AccAngles:
with self._data_lock:
return self._acc_angles
def is_acc_out_of_threshold(self) -> bool:
with self._data_lock:
if self._acc_peak_count != None:
current_time_ms = int(time.time() * 1000)
end_time_ms = self._acc_out_of_thr_start_time_ms + self._acc_total_duration_ms
if current_time_ms > end_time_ms:
self._is_acc_out_of_thr = self._acc_out_of_thr_peak_count >= self._acc_peak_count
self._acc_out_of_thr_start_time_ms = current_time_ms
self._acc_out_of_thr_peak_count = 0
return self._is_acc_out_of_thr
else:
return False
def is_angles_out_of_threshold(self) -> bool:
with self._data_lock:
if ((self._angle_out_of_thr_start_time_ms == None) or
(self._angle_total_duration_ms == None)):
return False
else:
current_time_ms = int(time.time() * 1000)
return ((self._angle_out_of_thr_start_time_ms + self._angle_total_duration_ms)
<= current_time_ms)
def clear(self):
with self._data_lock:
self._angle_out_of_thr_start_time_ms = None
self._acc_out_of_thr_peak_count = 0
def _update(self):
try:
acc_data_updated = self._acc_driver.get_data_updated_event()
while True:
acc_data_updated.wait()
acc_data_updated.clear()
with self._data_lock:
self._last_acc_data = self._acc_driver.get_last_data()
is_acc_data_threshold = self._acc_driver.is_acc_out_of_threshold()
current_time_ms = int(time.time() * 1000)
_log.debug(
'Acc data updated: x = {x} mg, y = {y} mg, z = {z} mg. Out of threshold: {thr}.'
.format(
x=self._last_acc_data.x_mg,
y=self._last_acc_data.y_mg,
z=self._last_acc_data.z_mg,
thr=is_acc_data_threshold))
self._calculate_angles()
_log.debug(
'Acc angles updated: x = {x}, y = {y}, z = {z}.'
.format(
x=self._acc_angles.x,
y=self._acc_angles.y,
z=self._acc_angles.z))
if self._calc_is_angles_out_of_thr():
self._angle_out_of_thr_stop_time_ms = None
if self._angle_out_of_thr_start_time_ms == None:
self._angle_out_of_thr_start_time_ms = current_time_ms
else:
if self._angle_out_of_thr_stop_time_ms == None:
self._angle_out_of_thr_stop_time_ms = current_time_ms
elif ((self._angle_out_of_thr_stop_time_ms + self._angle_total_duration_ms)
<= current_time_ms):
self._angle_out_of_thr_start_time_ms = None
if (self._acc_peak_count != None) and is_acc_data_threshold:
self._acc_out_of_thr_peak_count += 1
except:
_log.error(traceback.format_exc())
utils_exit.exit(1)
def _calculate_angles(self):
x_pow2 = self._last_acc_data.x_mg ** 2
y_pow2 = self._last_acc_data.y_mg ** 2
z_pow2 = self._last_acc_data.z_mg ** 2
g_vector_length = math.sqrt(x_pow2 + y_pow2 + z_pow2)
x_angle = math.degrees(math.asin(self._last_acc_data.x_mg / g_vector_length))
y_angle = math.degrees(math.asin(self._last_acc_data.y_mg / g_vector_length))
z_angle = math.degrees(math.asin(self._last_acc_data.z_mg / g_vector_length))
self._acc_angles = AccAngles(x_angle, y_angle, z_angle)
def _calc_is_angles_out_of_thr(self):
if (self._angle_threshold_degree == None):
_log.debug('return false')
return False
if (
((AccThresholdDetector.DEFAULT_ACC_ANGLE_X - self._angle_threshold_degree)
<= self._acc_angles.x <=
(AccThresholdDetector.DEFAULT_ACC_ANGLE_X + self._angle_threshold_degree))
and
((AccThresholdDetector.DEFAULT_ACC_ANGLE_Y - self._angle_threshold_degree)
<= self._acc_angles.y <=
(AccThresholdDetector.DEFAULT_ACC_ANGLE_Y + self._angle_threshold_degree))
and
((AccThresholdDetector.DEFAULT_ACC_ANGLE_Z - self._angle_threshold_degree)
<= self._acc_angles.z <=
(AccThresholdDetector.DEFAULT_ACC_ANGLE_Z + self._angle_threshold_degree))
):
return False
else:
return True
|
dashboard.py
|
import collections
import threading
import time
from typing import Any
from typing import DefaultDict
from typing import Dict
from typing import List
from typing import Optional
import numpy as np
from packaging import version
from optuna._imports import try_import
from optuna._study_direction import StudyDirection
import optuna.logging
import optuna.study
import optuna.trial
with try_import() as _imports:
from bokeh import __version__ as bokeh_version
import bokeh.command.bootstrap
import bokeh.document # NOQA
import bokeh.layouts
import bokeh.models
import bokeh.models.widgets
import bokeh.plotting
import bokeh.themes
import tornado.gen
if version.parse(bokeh_version) >= version.parse("2.0.0"):
raise ImportError(
"Your version of bokeh is " + bokeh_version + " . "
"Please install bokeh version earlier than 2.0.0. "
"Bokeh can be installed by executing `$ pip install 'bokeh<2.0.0'`. "
"For further information, please refer to the installation guide of bokeh. ",
name="bokeh",
)
_mode: Optional[str] = None
_study: Optional[optuna.study.Study] = None
_HEADER_FORMAT = """
<style>
body {{
margin: 20px;
}}
h1, p {{
margin: 10px 0px;
}}
</style>
<h1>Optuna Dashboard (Beta)</h1>
<p>
<b>Study name:</b> {study_name}<br>
</p>
"""
_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
if _imports.is_successful():
class _CompleteTrialsWidget(object):
def __init__(
self, trials: List[optuna.trial.FrozenTrial], direction: StudyDirection
) -> None:
complete_trials = [
trial for trial in trials if trial.state == optuna.trial.TrialState.COMPLETE
]
self.trial_ids = set([trial._trial_id for trial in complete_trials])
self.direction = direction
values = [trial.value for trial in complete_trials]
if direction == StudyDirection.MINIMIZE:
best_values = np.minimum.accumulate(values, axis=0)
else:
best_values = np.maximum.accumulate(values, axis=0)
self.cds = bokeh.models.ColumnDataSource(
{
"#": list(range(len(complete_trials))),
"value": values,
"best_value": best_values,
}
)
self.best_value = best_values[-1] if complete_trials else np.inf
def create_figure(self) -> bokeh.plotting.Figure:
figure = bokeh.plotting.figure(height=150)
figure.circle(x="#", y="value", source=self.cds, alpha=0.3, color="navy")
figure.line(x="#", y="best_value", source=self.cds, color="firebrick")
figure.xaxis[0].axis_label = "Number of Trials"
figure.yaxis[0].axis_label = "Objective Value"
return figure
def update(self, new_trials: List[optuna.trial.FrozenTrial]) -> None:
stream_dict: DefaultDict[str, List[Any]] = collections.defaultdict(list)
for trial in new_trials:
if trial.state != optuna.trial.TrialState.COMPLETE:
continue
if trial._trial_id in self.trial_ids:
continue
stream_dict["#"].append(len(self.trial_ids))
stream_dict["value"].append(trial.value)
if self.direction == StudyDirection.MINIMIZE:
self.best_value = min(self.best_value, trial.value)
else:
self.best_value = max(self.best_value, trial.value)
stream_dict["best_value"].append(self.best_value)
self.trial_ids.add(trial._trial_id)
if stream_dict:
self.cds.stream(stream_dict)
class _AllTrialsWidget(object):
def __init__(self, trials: List[optuna.trial.FrozenTrial]) -> None:
self.cds = bokeh.models.ColumnDataSource(self.trials_to_dict(trials))
def create_table(self) -> bokeh.models.widgets.DataTable:
return bokeh.models.widgets.DataTable(
source=self.cds,
columns=[
bokeh.models.widgets.TableColumn(field=field, title=field)
for field in [
"number",
"state",
"value",
"params",
"datetime_start",
"datetime_complete",
]
],
)
def update(
self,
old_trials: List[optuna.trial.FrozenTrial],
new_trials: List[optuna.trial.FrozenTrial],
) -> None:
modified_indices = []
modified_trials = []
for i, old_trial in enumerate(old_trials):
new_trial = new_trials[i]
if old_trial != new_trial:
modified_indices.append(i)
modified_trials.append(new_trial)
patch_dict = self.trials_to_dict(modified_trials)
patch_dict = {k: list(zip(modified_indices, v)) for k, v in patch_dict.items()}
self.cds.patch(patch_dict)
self.cds.stream(self.trials_to_dict(new_trials[len(old_trials) :]))
@staticmethod
def trials_to_dict(trials: List[optuna.trial.FrozenTrial]) -> Dict[str, List[Any]]:
return {
"number": [trial.number for trial in trials],
"state": [trial.state.name for trial in trials],
"value": [trial.value for trial in trials],
"params": [str(trial.params) for trial in trials],
"datetime_start": [
trial.datetime_start.strftime(_DATETIME_FORMAT)
if trial.datetime_start is not None
else None
for trial in trials
],
"datetime_complete": [
trial.datetime_complete.strftime(_DATETIME_FORMAT)
if trial.datetime_complete is not None
else None
for trial in trials
],
}
class _DashboardApp(object):
def __init__(self, study: optuna.study.Study, launch_update_thread: bool) -> None:
self.study = study
self.launch_update_thread = launch_update_thread
self.lock = threading.Lock()
def __call__(self, doc: bokeh.document.Document) -> None:
self.doc = doc
self.current_trials: Optional[List[optuna.trial.FrozenTrial]] = self.study.trials
self.new_trials: Optional[List[optuna.trial.FrozenTrial]] = None
self.complete_trials_widget = _CompleteTrialsWidget(
self.current_trials, self.study.direction
)
self.all_trials_widget = _AllTrialsWidget(self.current_trials)
self.doc.title = "Optuna Dashboard (Beta)"
header = _HEADER_FORMAT.format(study_name=self.study.study_name)
self.doc.add_root(
bokeh.layouts.layout(
[
[bokeh.models.widgets.Div(text=header)],
[self.complete_trials_widget.create_figure()],
[self.all_trials_widget.create_table()],
],
sizing_mode="scale_width",
)
)
if self.launch_update_thread:
thread = threading.Thread(target=self.thread_loop)
self.stop_event = threading.Event()
thread.daemon = True
thread.start()
self.doc.on_session_destroyed(lambda _: self.stop_event.set())
def thread_loop(self) -> None:
while not self.stop_event.is_set():
time.sleep(1)
new_trials = self.study.trials
with self.lock:
need_to_add_callback = self.new_trials is None
self.new_trials = new_trials
if need_to_add_callback:
self.doc.add_next_tick_callback(self.update_callback)
@tornado.gen.coroutine
def update_callback(self) -> Any:
with self.lock:
current_trials = self.current_trials
new_trials = self.new_trials
self.current_trials = self.new_trials
self.new_trials = None
assert current_trials is not None
assert new_trials is not None
self.complete_trials_widget.update(new_trials)
self.all_trials_widget.update(current_trials, new_trials)
def _get_this_source_path() -> str:
path = __file__
# Sometimes __file__ points to a *.pyc file, but Bokeh doesn't accept it.
if path.endswith(".pyc"):
path = path[:-1]
return path
def _serve(study: optuna.study.Study, bokeh_allow_websocket_origins: List[str]) -> None:
global _mode, _study
_imports.check()
# We want to pass the mode (launching a server? or, just writing an HTML?) and a target study
# to our Bokeh app. Unfortunately, as we are using `bokeh.command.bootstrap.main` to launch
# our Bokeh app, we cannot directly pass Python objects to it. Therefore, we have no choice but
# to use global variables to pass them.
_mode = "serve"
_study = study
# TODO(akiba): Stop using Bokeh's CLI entry point, and start the HTTP server by ourselves.
# This is not a very clean way to launch Bokeh server.
# Another seemingly better way is to
# instantiate and launch `bokeh.server.server.Server` by ourselves. However, in this way,
# for some reason, we found that the CDS update is not reflected to browsers, at least on Bokeh
# version 0.12.15. In addition, we will need to do many configuration to servers, which can be
# done automatically with the following one line. So, for now, we decided to use this way.
command = ["bokeh", "serve", "--show", _get_this_source_path()]
for bokeh_allow_websocket_origin in bokeh_allow_websocket_origins:
command.extend(["--allow-websocket-origin", bokeh_allow_websocket_origin])
bokeh.command.bootstrap.main(command)
def _write(study: optuna.study.Study, out_path: str) -> None:
global _mode, _study
_imports.check()
_mode = "html"
_study = study
bokeh.command.bootstrap.main(["bokeh", "html", _get_this_source_path(), "-o", out_path])
def _run() -> None:
# Please note that `_study` and `optuna.dashboard._study` are different here. Here, this module
# is loaded inside Bokeh, and thus it is not `optuna.dashboard`, but `bk_script_????`.
study = optuna.dashboard._study
mode = optuna.dashboard._mode
assert study is not None
app = _DashboardApp(study, launch_update_thread=(mode == "serve"))
doc = bokeh.plotting.curdoc()
app(doc)
if __name__.startswith("bk_script_"):
# Here, this module is loaded inside Bokeh. Therefore, we should launch the Bokeh app.
_run()
|
test_server.py
|
import asyncio
import json
import os
import time
import urllib.parse
import uuid
from contextlib import ExitStack
from http import HTTPStatus
from multiprocessing import Process, Manager
from multiprocessing.managers import DictProxy
from pathlib import Path
from typing import List, Text, Type, Generator, NoReturn, Dict, Optional
from unittest.mock import Mock, ANY
import pytest
import requests
from _pytest import pathlib
from _pytest.monkeypatch import MonkeyPatch
from aioresponses import aioresponses
from freezegun import freeze_time
from mock import MagicMock
from ruamel.yaml import StringIO
from sanic import Sanic
from sanic.testing import SanicASGITestClient
import rasa
import rasa.constants
import rasa.core.jobs
import rasa.nlu
import rasa.server
import rasa.shared.constants
import rasa.shared.utils.io
import rasa.utils.io
from rasa.core import utils
from rasa.core.agent import Agent, load_agent
from rasa.core.channels import (
channel,
CollectingOutputChannel,
RestInput,
SlackInput,
CallbackInput,
)
from rasa.core.channels.slack import SlackBot
from rasa.core.tracker_store import InMemoryTrackerStore
from rasa.model import unpack_model
from rasa.nlu.test import CVEvaluationResult
from rasa.shared.core import events
from rasa.shared.core.constants import (
ACTION_SESSION_START_NAME,
ACTION_LISTEN_NAME,
REQUESTED_SLOT,
SESSION_START_METADATA_SLOT,
)
from rasa.shared.core.domain import Domain, SessionConfig
from rasa.shared.core.events import (
Event,
UserUttered,
SlotSet,
BotUttered,
ActionExecuted,
SessionStarted,
)
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.shared.nlu.constants import INTENT_NAME_KEY
from rasa.train import TrainingResult
from rasa.utils.endpoints import EndpointConfig
from tests.core.conftest import DEFAULT_STACK_CONFIG
from tests.nlu.utilities import ResponseTest
from tests.utilities import json_of_latest_request, latest_request
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters(
{
"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"entities": [],
},
}
),
BotUttered("Welcome!", {"test": True}),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
# sequence of events expected at the beginning of trackers
session_start_sequence: List[Event] = [
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
]
@pytest.fixture
def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicASGITestClient:
return rasa_server_without_api.asgi_client
@pytest.fixture
def rasa_app(rasa_server: Sanic) -> SanicASGITestClient:
return rasa_server.asgi_client
@pytest.fixture
def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicASGITestClient:
return rasa_nlu_server.asgi_client
@pytest.fixture
def rasa_app_core(rasa_core_server: Sanic) -> SanicASGITestClient:
return rasa_core_server.asgi_client
@pytest.fixture
def rasa_secured_app(rasa_server_secured: Sanic) -> SanicASGITestClient:
return rasa_server_secured.asgi_client
@pytest.fixture()
async def tear_down_scheduler() -> Generator[None, None, None]:
yield None
rasa.core.jobs.__scheduler = None
async def test_root(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_root_without_enable_api(rasa_app_without_api: SanicASGITestClient):
_, response = await rasa_app_without_api.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_root_secured(rasa_secured_app: SanicASGITestClient):
_, response = await rasa_secured_app.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_version(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/version")
content = response.json()
assert response.status == HTTPStatus.OK
assert content.get("version") == rasa.__version__
assert (
content.get("minimum_compatible_version")
== rasa.constants.MINIMUM_COMPATIBLE_VERSION
)
async def test_status(rasa_app: SanicASGITestClient, trained_rasa_model: Text):
_, response = await rasa_app.get("/status")
model_file = response.json()["model_file"]
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert os.path.isfile(model_file)
assert model_file == trained_rasa_model
async def test_status_nlu_only(
rasa_app_nlu: SanicASGITestClient, trained_nlu_model: Text
):
_, response = await rasa_app_nlu.get("/status")
model_file = response.json()["model_file"]
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert "model_file" in response.json()
assert model_file == trained_nlu_model
async def test_status_secured(rasa_secured_app: SanicASGITestClient):
_, response = await rasa_secured_app.get("/status")
assert response.status == HTTPStatus.UNAUTHORIZED
async def test_status_not_ready_agent(rasa_app: SanicASGITestClient):
rasa_app.app.agent = None
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.CONFLICT
@pytest.fixture
def shared_statuses() -> DictProxy:
return Manager().dict()
@pytest.fixture
def background_server(
shared_statuses: DictProxy, tmpdir: pathlib.Path, monkeypatch: MonkeyPatch
) -> Generator[Process, None, None]:
# Create a fake model archive which the mocked train function can return
fake_model = Path(tmpdir) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
# Fake training function which blocks until we tell it to stop blocking
# If we can send a status request while this is blocking, we can be sure that the
# actual training is also not blocking
async def mocked_training_function(*_, **__) -> TrainingResult:
# Tell the others that we are now blocking
shared_statuses["started_training"] = True
# Block until somebody tells us to not block anymore
while shared_statuses.get("stop_training") is not True:
time.sleep(1)
return TrainingResult(model=fake_model_path)
def run_server(monkeypatch: MonkeyPatch) -> NoReturn:
import sys
monkeypatch.setattr(
sys.modules["rasa.train"], "train_async", mocked_training_function,
)
from rasa import __main__
sys.argv = ["rasa", "run", "--enable-api"]
__main__.main()
server = Process(target=run_server, args=(monkeypatch,))
yield server
server.terminate()
@pytest.fixture()
def training_request(
shared_statuses: DictProxy, tmp_path: Path
) -> Generator[Process, None, None]:
def send_request() -> None:
payload = {}
project_path = Path("examples") / "formbot"
for file in [
"domain.yml",
"config.yml",
Path("data") / "rules.yml",
Path("data") / "stories.yml",
Path("data") / "nlu.yml",
]:
full_path = project_path / file
# Read in as dictionaries to avoid that keys, which are specified in
# multiple files (such as 'version'), clash.
content = rasa.shared.utils.io.read_yaml_file(full_path)
payload.update(content)
concatenated_payload_file = tmp_path / "concatenated.yml"
rasa.shared.utils.io.write_yaml(payload, concatenated_payload_file)
payload_as_yaml = concatenated_payload_file.read_text()
response = requests.post(
"http://localhost:5005/model/train",
data=payload_as_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"force_training": True},
)
shared_statuses["training_result"] = response.status_code
train_request = Process(target=send_request)
yield train_request
train_request.terminate()
# Due to unknown reasons this test can not be run in pycharm, it
# results in segfaults...will skip in that case - test will still get run on CI.
# It also doesn't run on Windows because of Process-related calls and an attempt
# to start/terminate a process. We will investigate this case further later:
# https://github.com/RasaHQ/rasa/issues/6302
@pytest.mark.skipif("PYCHARM_HOSTED" in os.environ, reason="results in segfault")
@pytest.mark.skip_on_windows
def test_train_status_is_not_blocked_by_training(
background_server: Process, shared_statuses: DictProxy, training_request: Process
):
background_server.start()
def is_server_ready() -> bool:
try:
return (
requests.get("http://localhost:5005/status").status_code
== HTTPStatus.OK
)
except Exception:
return False
# wait until server is up before sending train request and status test loop
start = time.time()
while not is_server_ready() and time.time() - start < 60:
time.sleep(1)
assert is_server_ready()
training_request.start()
# Wait until the blocking training function was called
start = time.time()
while (
shared_statuses.get("started_training") is not True and time.time() - start < 60
):
time.sleep(1)
# Check if the number of currently running trainings was incremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == HTTPStatus.OK
assert response.json()["num_active_training_jobs"] == 1
# Tell the blocking training function to stop
shared_statuses["stop_training"] = True
start = time.time()
while shared_statuses.get("training_result") is None and time.time() - start < 60:
time.sleep(1)
assert shared_statuses.get("training_result")
# Check that the training worked correctly
assert shared_statuses["training_result"] == HTTPStatus.OK
# Check if the number of currently running trainings was decremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == HTTPStatus.OK
assert response.json()["num_active_training_jobs"] == 0
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse(rasa_app: SanicASGITestClient, response_test: ResponseTest):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
rjs = response.json()
assert response.status == HTTPStatus.OK
assert all(prop in rjs for prop in ["entities", "intent", "text"])
assert rjs["entities"] == response_test.expected_response["entities"]
assert rjs["text"] == response_test.expected_response["text"]
assert rjs["intent"] == response_test.expected_response["intent"]
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse?emulation_mode=wit",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=dialogflow",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=luis",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse_with_different_emulation_mode(
rasa_app: SanicASGITestClient, response_test: ResponseTest
):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
assert response.status == HTTPStatus.OK
async def test_parse_without_nlu_model(rasa_app_core: SanicASGITestClient):
_, response = await rasa_app_core.post("/model/parse", json={"text": "hello"})
assert response.status == HTTPStatus.OK
rjs = response.json()
assert all(prop in rjs for prop in ["entities", "intent", "text"])
async def test_parse_on_invalid_emulation_mode(rasa_app_nlu: SanicASGITestClient):
_, response = await rasa_app_nlu.post(
"/model/parse?emulation_mode=ANYTHING", json={"text": "hello"}
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_stack_success_with_md(
rasa_app: SanicASGITestClient,
default_domain_path: Text,
default_stack_config: Text,
default_nlu_data: Text,
tmp_path: Path,
):
payload = dict(
domain=Path(default_domain_path).read_text(),
config=Path(default_stack_config).read_text(),
stories=Path("data/test_stories/stories_defaultdomain.md").read_text(),
nlu=Path(default_nlu_data).read_text(),
)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.OK
assert response.headers["filename"] is not None
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_nlu_success(
rasa_app: SanicASGITestClient,
default_stack_config: Text,
default_nlu_data: Text,
default_domain_path: Text,
tmp_path: Path,
):
domain_data = rasa.shared.utils.io.read_yaml_file(default_domain_path)
config_data = rasa.shared.utils.io.read_yaml_file(default_stack_config)
nlu_data = rasa.shared.utils.io.read_yaml_file(default_nlu_data)
# combine all data into our payload
payload = {
key: val for d in [domain_data, config_data, nlu_data] for key, val in d.items()
}
data = StringIO()
rasa.shared.utils.io.write_yaml(payload, data)
_, response = await rasa_app.post(
"/model/train",
data=data.getvalue(),
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_core_success_with(
rasa_app: SanicASGITestClient,
default_stack_config: Text,
default_stories_file: Text,
default_domain_path: Text,
tmp_path: Path,
):
payload = f"""
{Path(default_domain_path).read_text()}
{Path(default_stack_config).read_text()}
{Path(default_stories_file).read_text()}
"""
_, response = await rasa_app.post(
"/model/train",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_with_retrieval_events_success(
rasa_app: SanicASGITestClient, default_stack_config: Text, tmp_path: Path
):
with ExitStack() as stack:
domain_file = stack.enter_context(
open("data/test_domains/default_retrieval_intents.yml")
)
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(
open("data/test_stories/stories_retrieval_intents.md")
)
responses_file = stack.enter_context(open("data/test_responses/default.md"))
nlu_file = stack.enter_context(
open("data/test_nlu/default_retrieval_intents.md")
)
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
responses=responses_file.read(),
nlu=nlu_file.read(),
)
_, response = await rasa_app.post("/model/train", json=payload, timeout=60 * 5)
assert response.status == HTTPStatus.OK
assert_trained_model(response.body, tmp_path)
def assert_trained_model(response_body: bytes, tmp_path: Path) -> None:
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response_body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
@pytest.mark.parametrize(
"payload",
[
{"config": None, "stories": None, "nlu": None, "domain": None, "force": True},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"force": False,
"save_to_default_model_directory": True,
},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"save_to_default_model_directory": False,
},
],
)
def test_deprecation_warnings_json_payload(payload: Dict):
with pytest.warns(FutureWarning):
rasa.server._validate_json_training_payload(payload)
async def test_train_with_yaml(rasa_app: SanicASGITestClient, tmp_path: Path):
training_data = """
stories:
- story: My story
steps:
- intent: greet
- action: utter_greet
rules:
- rule: My rule
steps:
- intent: greet
- action: utter_greet
intents:
- greet
nlu:
- intent: greet
examples: |
- hi
- hello
responses:
utter_greet:
- text: Hi
language: en
polices:
- name: RulePolicy
pipeline:
- name: KeywordIntentClassifier
"""
_, response = await rasa_app.post(
"/model/train",
data=training_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert_trained_model(response.body, tmp_path)
async def test_train_with_invalid_yaml(rasa_app: SanicASGITestClient):
invalid_yaml = """
rules:
rule my rule
"""
_, response = await rasa_app.post(
"/model/train",
data=invalid_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
@pytest.mark.parametrize(
"headers, expected",
[({}, False), ({"force_training": False}, False), ({"force_training": True}, True)],
)
def test_training_payload_from_yaml_force_training(
headers: Dict, expected: bool, tmp_path: Path
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request, tmp_path)
assert payload.get("force_training") == expected
@pytest.mark.parametrize(
"headers, expected",
[
({}, rasa.shared.constants.DEFAULT_MODELS_PATH),
({"save_to_default_model_directory": False}, ANY),
(
{"save_to_default_model_directory": True},
rasa.shared.constants.DEFAULT_MODELS_PATH,
),
],
)
def test_training_payload_from_yaml_save_to_default_model_directory(
headers: Dict, expected: Text, tmp_path: Path
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request, tmp_path)
assert payload.get("output")
assert payload.get("output") == expected
async def test_train_missing_config(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config=None)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_missing_training_data(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config="config data")
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_internal_error(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config="config data", nlu="nlu data")
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_evaluate_stories(
rasa_app: SanicASGITestClient, default_stories_file: Text
):
stories = rasa.shared.utils.io.read_file(default_stories_file)
_, response = await rasa_app.post(
"/model/test/stories",
data=stories,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
js = response.json()
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert not js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_stories_not_ready_agent(
rasa_app_nlu: SanicASGITestClient, default_stories_file: Text
):
stories = rasa.shared.utils.io.read_file(default_stories_file)
_, response = await rasa_app_nlu.post("/model/test/stories", data=stories)
assert response.status == HTTPStatus.CONFLICT
async def test_evaluate_stories_end_to_end(
rasa_app: SanicASGITestClient, end_to_end_test_story_file: Text
):
stories = rasa.shared.utils.io.read_file(end_to_end_test_story_file)
_, response = await rasa_app.post("/model/test/stories?e2e=true", data=stories)
assert response.status == HTTPStatus.OK
js = response.json()
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert js["is_end_to_end_evaluation"]
assert js["actions"] != []
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_intent(rasa_app: SanicASGITestClient, default_nlu_data: Text):
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_on_just_nlu_model(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text
):
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_with_model_param(
rasa_app: SanicASGITestClient, trained_nlu_model: Text, default_nlu_data: Text
):
_, response = await rasa_app.get("/status")
previous_model_file = response.json()["model_file"]
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app.post(
f"/model/test/intents?model={trained_nlu_model}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
_, response = await rasa_app.get("/status")
assert previous_model_file == response.json()["model_file"]
async def test_evaluate_intent_with_model_server(
rasa_app: SanicASGITestClient,
trained_rasa_model: Text,
default_nlu_data: Text,
tear_down_scheduler: None,
):
production_model_server_url = (
"https://example.com/webhooks/actions?model=production"
)
test_model_server_url = "https://example.com/webhooks/actions?model=test"
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
with aioresponses() as mocked:
# Mock retrieving the production model from the model server
mocked.get(
production_model_server_url,
body=Path(trained_rasa_model).read_bytes(),
headers={"ETag": "production"},
)
# Mock retrieving the test model from the model server
mocked.get(
test_model_server_url,
body=Path(trained_rasa_model).read_bytes(),
headers={"ETag": "test"},
)
agent_with_model_server = await load_agent(
model_server=EndpointConfig(production_model_server_url)
)
rasa_app.app.agent = agent_with_model_server
_, response = await rasa_app.post(
f"/model/test/intents?model={test_model_server_url}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
production_model_server = rasa_app.app.agent.model_server
# Assert that the model server URL for the test didn't override the production
# model server URL
assert production_model_server.url == production_model_server_url
# Assert the tests didn't break pulling the models
assert production_model_server.kwargs.get("wait_time_between_pulls") != 0
async def test_cross_validation(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3},
)
assert response.status == HTTPStatus.OK
response_body = response.json()
for required_key in {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}:
assert required_key in response_body
details = response_body[required_key]
assert all(
key in details for key in ["precision", "f1_score", "report", "errors"]
)
async def test_cross_validation_with_md(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text
):
payload = """
## intent: greet
- Hi
- Hello
"""
_, response = await rasa_app_nlu.post(
"/model/test/intents", data=payload, params={"cross_validation_folds": 3},
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_cross_validation_with_callback_success(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text, monkeypatch: MonkeyPatch
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
mocked_cross_validation = Mock(
return_value=(
CVEvaluationResult({}, {}, {}),
CVEvaluationResult({}, {}, {}),
CVEvaluationResult({}, {}, {}),
)
)
monkeypatch.setattr(
rasa.nlu, rasa.nlu.cross_validate.__name__, mocked_cross_validation
)
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
# Sleep to give event loop time to process things in the background
await asyncio.sleep(1)
mocked_cross_validation.assert_called_once()
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["data"]
response_body = json.loads(content)
for required_key in {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}:
assert required_key in response_body
details = response_body[required_key]
assert all(
key in details for key in ["precision", "f1_score", "report", "errors"]
)
async def test_cross_validation_with_callback_error(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text, monkeypatch: MonkeyPatch
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
monkeypatch.setattr(
rasa.nlu, rasa.nlu.cross_validate.__name__, Mock(side_effect=ValueError())
)
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
await asyncio.sleep(1)
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["json"]
assert content["code"] == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_callback_unexpected_error(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text, monkeypatch: MonkeyPatch
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
async def raiseUnexpectedError() -> NoReturn:
raise ValueError()
monkeypatch.setattr(
rasa.server,
rasa.server._training_payload_from_yaml.__name__,
Mock(side_effect=ValueError()),
)
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
await asyncio.sleep(1)
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["json"]
assert content["code"] == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_predict(rasa_app: SanicASGITestClient):
data = {
"Events": {
"value": [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": [],
"intent": {"confidence": 0.57, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
},
]
}
}
_, response = await rasa_app.post(
"/model/predict",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json()
assert response.status == HTTPStatus.OK
assert "scores" in content
assert "tracker" in content
assert "policy" in content
@freeze_time("2018-01-01")
async def test_requesting_non_existent_tracker(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/conversations/madeupid/tracker")
content = response.json()
assert response.status == HTTPStatus.OK
assert content["paused"] is False
assert content["slots"] == {
"name": None,
REQUESTED_SLOT: None,
SESSION_START_METADATA_SLOT: None,
}
assert content["sender_id"] == "madeupid"
assert content["events"] == [
{
"event": "action",
"name": "action_session_start",
"policy": None,
"confidence": 1,
"timestamp": 1514764800,
"action_text": None,
},
{"event": "session_started", "timestamp": 1514764800},
{
"event": "action",
INTENT_NAME_KEY: "action_listen",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
"action_text": None,
},
]
assert content["latest_message"] == {
"text": None,
"intent": {},
"entities": [],
"message_id": None,
"metadata": {},
}
@pytest.mark.parametrize("event", test_events)
async def test_pushing_event(rasa_app: SanicASGITestClient, event: Event):
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = event.as_dict()
# Remove timestamp so that a new one is assigned on the server
serialized_event.pop("timestamp")
time_before_adding_events = time.time()
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json()
assert tracker is not None
assert len(tracker.get("events")) == 4
deserialized_events = [Event.from_parameters(event) for event in tracker["events"]]
# there is an initial session start sequence at the beginning of the tracker
assert deserialized_events[:3] == session_start_sequence
assert deserialized_events[3] == event
assert deserialized_events[3].timestamp > time_before_adding_events
async def test_push_multiple_events(rasa_app: SanicASGITestClient):
conversation_id = str(uuid.uuid1())
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json()
assert tracker is not None
# there is an initial session start sequence at the beginning
assert [
Event.from_parameters(event) for event in tracker.get("events")
] == session_start_sequence + test_events
@pytest.mark.parametrize(
"params", ["?execute_side_effects=true&output_channel=callback", ""]
)
async def test_pushing_event_while_executing_side_effects(
rasa_server: Sanic, params: Text
):
input_channel = CallbackInput(EndpointConfig("https://example.com/callback"))
channel.register([input_channel], rasa_server, "/webhooks/")
rasa_app = rasa_server.asgi_client
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = test_events[1].as_dict()
with aioresponses() as mocked:
mocked.post(
"https://example.com/callback",
repeat=True,
headers={"Content-Type": "application/json"},
)
await rasa_app.post(
f"{conversation}/tracker/events{params}",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
r = latest_request(mocked, "post", "https://example.com/callback")
if not params:
assert r is None
else:
message_received = json_of_latest_request(r)
assert message_received.get("recipient_id") == sender_id
assert message_received.get("text") == serialized_event.get("text")
async def test_post_conversation_id_with_slash(rasa_app: SanicASGITestClient):
conversation_id = str(uuid.uuid1())
id_len = len(conversation_id) // 2
conversation_id = conversation_id[:id_len] + "/+-_\\=" + conversation_id[id_len:]
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": "application/json"},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json()
assert tracker is not None
# there is a session start sequence at the start
assert [
Event.from_parameters(event) for event in tracker.get("events")
] == session_start_sequence + test_events
async def test_put_tracker(rasa_app: SanicASGITestClient):
data = [event.as_dict() for event in test_events]
_, response = await rasa_app.put(
"/conversations/pushtracker/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json()
assert response.status == HTTPStatus.OK
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
_, tracker_response = await rasa_app.get("/conversations/pushtracker/tracker")
tracker = tracker_response.json()
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
async def test_sorted_predict(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "sortedpredict")
_, response = await rasa_app.post("/conversations/sortedpredict/predict")
scores = response.json()["scores"]
sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"]))
assert scores == sorted_scores
async def _create_tracker_for_sender(app: SanicASGITestClient, sender_id: Text) -> None:
data = [event.as_dict() for event in test_events[:3]]
_, response = await app.put(
f"/conversations/{sender_id}/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
async def test_get_tracker_with_jwt(rasa_secured_app: SanicASGITestClient):
# token generated with secret "core" and algorithm HS256
# on https://jwt.io/
# {"user": {"username": "testadmin", "role": "admin"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic"
"m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u"
"QRre7IWTuIDrCn5AIw"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
# {"user": {"username": "testuser", "role": "user"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb"
"2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l"
"HJHOxxC_w7TtwCrs"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.FORBIDDEN
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
def test_list_routes(default_agent: Agent):
app = rasa.server.create_app(default_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {
"hello",
"version",
"status",
"retrieve_tracker",
"append_events",
"replace_events",
"retrieve_story",
"execute_action",
"trigger_intent",
"predict",
"add_message",
"train",
"evaluate_stories",
"evaluate_intents",
"tracker_predict",
"parse",
"load_model",
"unload_model",
"get_domain",
}
async def test_unload_model_error(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "model_file" in response.json() and response.json()["model_file"] is not None
_, response = await rasa_app.delete("/model")
assert response.status == HTTPStatus.NO_CONTENT
async def test_get_domain(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get(
"/domain", headers={"accept": rasa.server.JSON_CONTENT_TYPE}
)
content = response.json()
assert response.status == HTTPStatus.OK
assert "config" in content
assert "intents" in content
assert "entities" in content
assert "slots" in content
assert "responses" in content
assert "actions" in content
async def test_get_domain_invalid_accept_header(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/domain")
assert response.status == HTTPStatus.NOT_ACCEPTABLE
async def test_load_model(rasa_app: SanicASGITestClient, trained_core_model: Text):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
old_fingerprint = response.json()["fingerprint"]
data = {"model_file": trained_core_model}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.NO_CONTENT
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert old_fingerprint != response.json()["fingerprint"]
async def test_load_model_from_model_server(
rasa_app: SanicASGITestClient, trained_core_model: Text, tear_down_scheduler: None
):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
old_fingerprint = response.json()["fingerprint"]
endpoint = EndpointConfig("https://example.com/model/trained_core_model")
with open(trained_core_model, "rb") as f:
with aioresponses(passthrough=["http://127.0.0.1"]) as mocked:
headers = {}
fs = os.fstat(f.fileno())
headers["Content-Length"] = str(fs[6])
mocked.get(
"https://example.com/model/trained_core_model",
content_type="application/x-tar",
body=f.read(),
)
data = {"model_server": {"url": endpoint.url}}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.NO_CONTENT
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert old_fingerprint != response.json()["fingerprint"]
async def test_load_model_invalid_request_body(rasa_app: SanicASGITestClient):
_, response = await rasa_app.put("/model")
assert response.status == HTTPStatus.BAD_REQUEST
async def test_load_model_invalid_configuration(rasa_app: SanicASGITestClient):
data = {"model_file": "some-random-path"}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_execute(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "test_execute")
data = {INTENT_NAME_KEY: "utter_greet"}
_, response = await rasa_app.post("/conversations/test_execute/execute", json=data)
assert response.status == HTTPStatus.OK
parsed_content = response.json()
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_execute_with_missing_action_name(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_missing_action_name"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"wrong-key": "utter_greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_execute_with_not_existing_action(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_not_existing_action"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_trigger_intent(rasa_app: SanicASGITestClient):
data = {INTENT_NAME_KEY: "greet"}
_, response = await rasa_app.post(
"/conversations/test_trigger/trigger_intent", json=data
)
assert response.status == HTTPStatus.OK
parsed_content = response.json()
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_trigger_intent_with_entity(rasa_app: SanicASGITestClient):
entity_name = "name"
entity_value = "Sara"
data = {INTENT_NAME_KEY: "greet", "entities": {entity_name: entity_value}}
_, response = await rasa_app.post(
"/conversations/test_trigger/trigger_intent", json=data
)
assert response.status == HTTPStatus.OK
parsed_content = response.json()
last_slot_set_event = [
event
for event in parsed_content["tracker"]["events"]
if event["event"] == "slot"
][-1]
assert parsed_content["tracker"]
assert parsed_content["messages"]
assert last_slot_set_event["name"] == entity_name
assert last_slot_set_event["value"] == entity_value
async def test_trigger_intent_with_missing_intent_name(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_missing_action_name"
data = {"wrong-key": "greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_trigger_intent_with_not_existing_intent(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_not_existing_intent"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {INTENT_NAME_KEY: "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == HTTPStatus.NOT_FOUND
@pytest.mark.parametrize(
"input_channels, output_channel_to_use, expected_channel",
[
(None, "slack", CollectingOutputChannel),
([], None, CollectingOutputChannel),
([RestInput()], "slack", CollectingOutputChannel),
([RestInput()], "rest", CollectingOutputChannel),
(
[RestInput(), SlackInput("test", slack_signing_secret="foobar")],
"slack",
SlackBot,
),
],
)
def test_get_output_channel(
input_channels: List[Text], output_channel_to_use: Text, expected_channel: Type
):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": output_channel_to_use}
actual = rasa.server._get_output_channel(request, None)
assert isinstance(actual, expected_channel)
@pytest.mark.parametrize(
"input_channels, expected_channel",
[
([], CollectingOutputChannel),
([RestInput()], CollectingOutputChannel),
([RestInput(), SlackInput("test", slack_signing_secret="foobar")], SlackBot),
],
)
def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": "latest"}
tracker = DialogueStateTracker.from_events(
"default", [UserUttered("text", input_channel="slack")]
)
actual = rasa.server._get_output_channel(request, tracker)
assert isinstance(actual, expected_channel)
def test_app_when_app_has_no_input_channels():
request = MagicMock()
class NoInputChannels:
pass
request.app = NoInputChannels()
actual = rasa.server._get_output_channel(
request, DialogueStateTracker.from_events("default", [])
)
assert isinstance(actual, CollectingOutputChannel)
@pytest.mark.parametrize(
"conversation_events,until_time,fetch_all_sessions,expected",
# conversation with one session
[
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# conversation with multiple sessions
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID, story 1
steps:
- intent: greet
user: |-
hi
- action: utter_greet
- story: some-conversation-ID, story 2
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# conversation with multiple sessions, but setting `all_sessions=false`
# means only the last one is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
False,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# the default for `all_sessions` is `false` - this test checks that
# only the latest session is returned in that case
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
None,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# `until` parameter means only the first session is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=1),
SessionStarted(timestamp=2),
UserUttered("hi", {"name": "greet"}, timestamp=3),
ActionExecuted("utter_greet", timestamp=4),
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=5),
SessionStarted(timestamp=6),
UserUttered("bye bye", {"name": "goodbye"}, timestamp=7),
ActionExecuted("utter_goodbye", timestamp=8),
],
4,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# empty conversation
([], None, True, 'version: "2.0"'),
# Conversation with slot
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
SlotSet(REQUESTED_SLOT, "some value"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet
- slot_was_set:
- requested_slot: some value""",
),
],
)
async def test_get_story(
rasa_app: SanicASGITestClient,
monkeypatch: MonkeyPatch,
conversation_events: List[Event],
until_time: Optional[float],
fetch_all_sessions: Optional[bool],
expected: Text,
):
conversation_id = "some-conversation-ID"
tracker_store = InMemoryTrackerStore(Domain.empty())
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
tracker_store.save(tracker)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
url = f"/conversations/{conversation_id}/story?"
query = {}
if fetch_all_sessions is not None:
query["all_sessions"] = fetch_all_sessions
if until_time is not None:
query["until"] = until_time
_, response = await rasa_app.get(url + urllib.parse.urlencode(query))
assert response.status == HTTPStatus.OK
assert response.content.decode().strip() == expected
async def test_get_story_does_not_update_conversation_session(
rasa_app: SanicASGITestClient, monkeypatch: MonkeyPatch
):
conversation_id = "some-conversation-ID"
# domain with short session expiration time of one second
domain = Domain.empty()
domain.session_config = SessionConfig(
session_expiration_time=1 / 60, carry_over_slots=True
)
monkeypatch.setattr(rasa_app.app.agent, "domain", domain)
# conversation contains one session that has expired
now = time.time()
conversation_events = [
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=now - 10),
SessionStarted(timestamp=now - 9),
UserUttered("hi", {"name": "greet"}, timestamp=now - 8),
ActionExecuted("utter_greet", timestamp=now - 7),
]
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
# the conversation session has expired
assert rasa_app.app.agent.create_processor()._has_session_expired(tracker)
tracker_store = InMemoryTrackerStore(domain)
tracker_store.save(tracker)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
_, response = await rasa_app.get(f"/conversations/{conversation_id}/story")
assert response.status == HTTPStatus.OK
# expected story is returned
assert (
response.content.decode().strip()
== """version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet"""
)
# the tracker has the same number of events as were initially added
assert len(tracker.events) == len(conversation_events)
# the last event is still the same as before
assert tracker.events[-1].timestamp == conversation_events[-1].timestamp
@pytest.mark.parametrize(
"initial_tracker_events,events_to_append,expected_events",
[
(
# the tracker is initially empty, and no events are appended
# so we'll just expect the session start sequence with an `action_listen`
[],
[],
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
],
),
(
# the tracker is initially empty, and a user utterance is appended
# we expect a tracker with a session start sequence and a user utterance
[],
[UserUttered("/greet", {"name": "greet", "confidence": 1.0})],
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
],
),
(
# the tracker is initially empty, and a session start sequence is appended
# we'll just expect the session start sequence
[],
[ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()],
[ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()],
),
(
# the tracker already contains some events - we can simply append events
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
],
[ActionExecuted("utter_greet")],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
ActionExecuted("utter_greet"),
],
),
],
)
async def test_update_conversation_with_events(
rasa_app: SanicASGITestClient,
monkeypatch: MonkeyPatch,
initial_tracker_events: List[Event],
events_to_append: List[Event],
expected_events: List[Event],
):
conversation_id = "some-conversation-ID"
domain = Domain.empty()
tracker_store = InMemoryTrackerStore(domain)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
if initial_tracker_events:
tracker = DialogueStateTracker.from_events(
conversation_id, initial_tracker_events
)
tracker_store.save(tracker)
fetched_tracker = await rasa.server.update_conversation_with_events(
conversation_id, rasa_app.app.agent.create_processor(), domain, events_to_append
)
assert list(fetched_tracker.events) == expected_events
|
multistart_solvers.py
|
"""
This module defines solvers that use multiple starting points in order to have a higher chance at finding the global minimum.
"""
from . import utils, logging
from .solvers import Solver, default_solver
import numpy as np
import scipy as sp
import scipy.optimize
from qsrs import native_from_object
import time
from math import pi, gamma, sqrt
import multiprocessing as mp
import sys
from .persistent_aposmm import initialize_APOSMM, decide_where_to_start_localopt, update_history_dist, add_to_local_H
def distance_for_x(x, options, circuit):
"""Calculate the distance between circuit and the target for input x based on the distance metric"""
return options.objective.gen_error_func(circuit, options)(x)
# TODO I've left this up for debate before deleting, but with the new code, it is the job of the passed Objective to manage creating an equivalent single-valued objective to use, and by default it generates one in exactly the same way as is generated here.
if options.inner_solver.distance_metric == "Frobenius":
return options.objective.gen_error_func(circuit, options)(x)
elif options.inner_solver.distance_metric == "Residuals":
return np.sum(options.error_residuals(options.target, circuit.matrix(x), np.eye(options.target.shape[0]))**2)
def optimize_worker(circuit, options, q, x0, error_func):
"""Worker function used to run the inner solver in parallel"""
_, xopt = options.inner_solver.solve_for_unitary(circuit, options, x0)
q.put((error_func(xopt), xopt))
class MultiStart_Solver(Solver):
"""A higher accuracy solver based on APOSMM https://www.mcs.anl.gov/~jlarson/APOSMM/
MultiStart_Solver generally gets better results than other optimizers due to the advanced algorithm
to start multiple local optimizers ("inner solvers") and find the global optimum more often.
"""
def __init__(self, num_threads):
"""Create a MultiStart_Solver instance. Pass num_threads to set how many threads to use in parallel optimization runs"""
self.num_threads = num_threads
self.ctx = mp.get_context('fork') if sys.platform != 'win32' else mp.get_context()
def solve_for_unitary(self, circuit, options, x0=None):
"""Optimize the given circuit based on the provided options with initial point x0 (optional).
Args:
circuit: A qsearch.gates.Gate describing the circuit to optimize
options: This uses the following options:
- inner_solver : which optimizer to use for local optimization runs
- target : the target unitary of synthesis
- logger : A qsearch.logging.Logger that will be used for logging the synthesis process.
- error_func : The function that the Solver will attempt to minimize.
- error_residuals : A function that returns an array of real-valued residuals to be used by a least-squares-based Solver.
x0: the starting point for the optimzier
"""
if 'inner_solver' not in options:
options.inner_solver = default_solver(options)
U = options.target
error_func = options.objective.gen_error_func(circuit, options)
logger = options.logger if "logger" in options else logging.Logger(verbosity=options.verbosity, stdout_enabled=options.stdout_enabled, output_file=options.log_file)
#np.random.seed(4) # usually we do not want fixed seeds, but it can be useful for some debugging
n = circuit.num_inputs # the number of parameters to optimize (the length that v should be when passed to one of the lambdas created above)
initial_sample_size = 100 # How many points do you want to sample before deciding where to start runs.
num_localopt_runs = self.num_threads # How many localopt runs to start?
specs = {'lb': np.zeros(n),
'ub': np.ones(n),
'standalone': True,
'initial_sample_size':initial_sample_size}
_, _, rk_const, ld, mu, nu, _, H = initialize_APOSMM([],specs,None)
initial_sample = np.random.uniform(0, 1, (initial_sample_size, n))
add_to_local_H(H, initial_sample, specs, on_cube=True)
for i, x in enumerate(initial_sample):
H['f'][i] = error_func(2*np.pi*x)
H[['returned']] = True
update_history_dist(H, n)
starting_inds = decide_where_to_start_localopt(H, n, initial_sample_size, rk_const, ld, mu, nu)
starting_points = H['x'][starting_inds[:num_localopt_runs]]
start = time.time()
q = self.ctx.Queue()
processes = []
rets = []
for x0 in starting_points:
p = self.ctx.Process(target=optimize_worker, args=(circuit, options, q, 2*np.pi*x0, error_func))
processes.append(p)
p.start()
for p in processes:
ret = q.get() # will block
rets.append(ret)
for p in processes:
p.join()
end = time.time()
best_found = np.argmin([r[0] for r in rets])
best_val = rets[best_found][0]
xopt = rets[best_found][1]
return (circuit.matrix(xopt), xopt)
class NaiveMultiStart_Solver(Solver):
"""A naive but effective multi-start solver which tries to cover as much of the optimization space at once"""
def __init__(self, num_threads):
"""Create a NaiveMultiStart_Solver instance. Pass num_threads to set how many threads to use in parallel optimization runs"""
self.threads = num_threads if num_threads else 1
self.ctx = mp.get_context('fork') if sys.platform != 'win32' else mp.get_context()
def solve_for_unitary(self, circuit, options, x0=None):
if 'inner_solver' not in options:
options.inner_solver = default_solver(options)
U = options.target
logger = options.logger if "logger" in options else logging.Logger(verbosity=options.verbosity, stdout_enabled=options.stdout_enabled, output_file=options.log_file)
n = circuit.num_inputs
initial_samples = [np.random.uniform((i - 1)/self.threads, i/self.threads, (circuit.num_inputs,)) for i in range(1, self.threads+1)]
q = self.ctx.Queue()
processes = []
rets = []
for x0 in initial_samples:
p = self.ctx.Process(target=optimize_worker, args=(circuit, options, q, x0))
processes.append(p)
p.start()
for p in processes:
ret = q.get() # will block
rets.append(ret)
for p in processes:
p.join()
best_found = np.argmin([r[0] for r in rets])
best_val = rets[best_found][0]
xopt = rets[best_found][1]
return (circuit.matrix(xopt), xopt)
|
test_sys.py
|
import unittest, test.support
from test.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import platform
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'fatal error if run with a trace function')
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
elif sys.platform == 'win32':
expected = 'mbcs'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = "C"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-c", code]
if isolated:
args.append("-I")
elif encoding:
env['PYTHONIOENCODING'] = encoding
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def test_c_locale_surrogateescape(self):
out = self.c_locale_get_error_handler(isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(encoding=':strict')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(encoding='iso8859-1')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
# Some sanity checks
with_pymalloc = sysconfig.get_config_var('WITH_PYMALLOC')
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('4P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
check(get_cell().__code__, size('5i9Pi3P'))
check(get_cell.__code__, size('5i9Pi3P'))
def get_cell2(x):
def inner():
return x
return inner
check(get_cell2.__code__, size('5i9Pi3P') + 1)
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('n2P' + '2nPn' + 8*'n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('n2P' + '2nPn') + 16*struct.calcsize('n2P'))
# dictionary-keyiterator
check({}.keys(), size('P'))
# dictionary-valueiterator
check({}.values(), size('P'))
# dictionary-itemiterator
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# memoryview
check(memoryview(b''), size('Pnin 2P2n2i5P 3cPn'))
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3n2P' + PySet_MINSIZE*'nP' + 'nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('nP'))
check(frozenset(sample), s + newsize*struct.calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
s = vsize('P2n15Pl4Pn9Pn11PIP')
check(int, s)
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs + 4P)
s = vsize('P2n17Pl4Pn9Pn11PIP') + struct.calcsize('34P 3P 10P 2P 4P')
# Separate block for PyDictKeysObject with 4 entries
s += struct.calcsize("2nPn") + 4*struct.calcsize("n2P")
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# dict with shared keys
check(newstyleclass().__dict__, size('n2P' + '2nPn'))
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
bazelci.py
|
#!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import base64
import codecs
import datetime
import glob
import hashlib
import json
import multiprocessing
import os
import os.path
import random
import re
import requests
from shutil import copyfile
import shutil
import stat
import subprocess
import sys
import tempfile
import threading
import time
import urllib.error
import urllib.request
import uuid
import yaml
from urllib.request import url2pathname
from urllib.parse import urlparse
# Initialize the random number generator.
random.seed()
BUILDKITE_ORG = os.environ["BUILDKITE_ORGANIZATION_SLUG"]
THIS_IS_PRODUCTION = BUILDKITE_ORG == "bazel-untrusted"
THIS_IS_TESTING = BUILDKITE_ORG == "bazel-testing"
THIS_IS_TRUSTED = BUILDKITE_ORG == "bazel-trusted"
THIS_IS_SPARTA = True
CLOUD_PROJECT = "bazel-public" if THIS_IS_TRUSTED else "bazel-untrusted"
GITHUB_BRANCH = {"bazel": "master", "bazel-trusted": "master", "bazel-testing": "testing"}[
BUILDKITE_ORG
]
SCRIPT_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/bazelci.py?{}".format(
GITHUB_BRANCH, int(time.time())
)
INCOMPATIBLE_FLAG_VERBOSE_FAILURES_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/incompatible_flag_verbose_failures.py?{}".format(
GITHUB_BRANCH, int(time.time())
)
AGGREGATE_INCOMPATIBLE_TEST_RESULT_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/aggregate_incompatible_flags_test_result.py?{}".format(
GITHUB_BRANCH, int(time.time())
)
EMERGENCY_FILE_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/emergency.yml?{}".format(
GITHUB_BRANCH, int(time.time())
)
FLAKY_TESTS_BUCKET = {
"bazel-testing": "gs://bazel-testing-buildkite-stats/flaky-tests-bep/",
"bazel-trusted": "gs://bazel-buildkite-stats/flaky-tests-bep/",
"bazel": "gs://bazel-buildkite-stats/flaky-tests-bep/",
}[BUILDKITE_ORG]
KZIPS_BUCKET = {
"bazel-testing": "gs://bazel-kzips-testing/",
"bazel-trusted": "gs://bazel-kzips/",
"bazel": "gs://bazel-kzips/",
}[BUILDKITE_ORG]
# Projects can opt out of receiving GitHub issues from --notify by adding `"do_not_notify": True` to their respective downstream entry.
DOWNSTREAM_PROJECTS_PRODUCTION = {
"Android Studio Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/android-studio.yml",
"pipeline_slug": "android-studio-plugin",
},
"Android Testing": {
"git_repository": "https://github.com/googlesamples/android-testing.git",
"http_config": "https://raw.githubusercontent.com/googlesamples/android-testing/master/bazelci/buildkite-pipeline.yml",
"pipeline_slug": "android-testing",
},
"Bazel": {
"git_repository": "https://github.com/bazelbuild/bazel.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel/master/.bazelci/postsubmit.yml",
"pipeline_slug": "bazel-bazel",
},
"Bazel Bench": {
"git_repository": "https://github.com/bazelbuild/bazel-bench.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-bench/master/.bazelci/postsubmit.yml",
"pipeline_slug": "bazel-bench",
},
"Bazel Codelabs": {
"git_repository": "https://github.com/bazelbuild/codelabs.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/codelabs/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-codelabs",
},
"Bazel Examples": {
"git_repository": "https://github.com/bazelbuild/examples.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/examples/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-bazel-examples",
},
"Bazel Federation": {
"git_repository": "https://github.com/bazelbuild/bazel-federation.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-federation/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-federation",
},
"Bazel Remote Cache": {
"git_repository": "https://github.com/buchgr/bazel-remote.git",
"http_config": "https://raw.githubusercontent.com/buchgr/bazel-remote/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-remote-cache",
},
"Bazel integration testing": {
"git_repository": "https://github.com/bazelbuild/bazel-integration-testing.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-integration-testing/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-integration-testing",
},
"Bazel skylib": {
"git_repository": "https://github.com/bazelbuild/bazel-skylib.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-skylib/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-skylib",
"owned_by_bazel": True,
},
"Bazel toolchains": {
"git_repository": "https://github.com/bazelbuild/bazel-toolchains.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-toolchains/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-toolchains",
},
"Bazel watcher": {
"git_repository": "https://github.com/bazelbuild/bazel-watcher.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-watcher/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-watcher",
},
"Bazelisk": {
"git_repository": "https://github.com/bazelbuild/bazelisk.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazelisk/master/.bazelci/config.yml",
"pipeline_slug": "bazelisk",
},
"Buildfarm": {
"git_repository": "https://github.com/bazelbuild/bazel-buildfarm.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-buildfarm/master/.bazelci/presubmit.yml",
"pipeline_slug": "buildfarm-male-farmer",
},
"Buildtools": {
"git_repository": "https://github.com/bazelbuild/buildtools.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/buildtools/master/.bazelci/presubmit.yml",
"pipeline_slug": "buildtools",
},
"CLion Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/clion.yml",
"pipeline_slug": "clion-plugin",
},
"Cartographer": {
"git_repository": "https://github.com/googlecartographer/cartographer.git",
"http_config": "https://raw.githubusercontent.com/googlecartographer/cartographer/master/.bazelci/presubmit.yml",
"pipeline_slug": "cartographer",
},
"Cloud Robotics Core": {
"git_repository": "https://github.com/googlecloudrobotics/core.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/cloud-robotics-postsubmit.yml",
"pipeline_slug": "cloud-robotics-core",
},
"Envoy": {
"git_repository": "https://github.com/envoyproxy/envoy.git",
"http_config": "https://raw.githubusercontent.com/envoyproxy/envoy/master/.bazelci/presubmit.yml",
"pipeline_slug": "envoy",
},
"FlatBuffers": {
"git_repository": "https://github.com/google/flatbuffers.git",
"http_config": "https://raw.githubusercontent.com/google/flatbuffers/master/.bazelci/presubmit.yml",
"pipeline_slug": "flatbuffers",
},
"Flogger": {
"git_repository": "https://github.com/google/flogger.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/flogger.yml",
"pipeline_slug": "flogger",
},
"Gerrit": {
"git_repository": "https://gerrit.googlesource.com/gerrit.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/gerrit-postsubmit.yml",
"pipeline_slug": "gerrit",
},
"Google Logging": {
"git_repository": "https://github.com/google/glog.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/glog-postsubmit.yml",
"pipeline_slug": "google-logging",
},
"IntelliJ Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/intellij.yml",
"pipeline_slug": "intellij-plugin",
},
"IntelliJ Plugin Aspect": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/aspect.yml",
"pipeline_slug": "intellij-plugin-aspect",
},
"Kythe": {
"git_repository": "https://github.com/kythe/kythe.git",
"http_config": "https://raw.githubusercontent.com/kythe/kythe/master/.bazelci/presubmit.yml",
"pipeline_slug": "kythe",
},
"Protobuf": {
"git_repository": "https://github.com/google/protobuf.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/protobuf-postsubmit.yml",
"pipeline_slug": "protobuf",
"owned_by_bazel": True,
},
"Stardoc": {
"git_repository": "https://github.com/bazelbuild/stardoc.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/stardoc/master/.bazelci/presubmit.yml",
"pipeline_slug": "stardoc",
"owned_by_bazel": True,
},
"Subpar": {
"git_repository": "https://github.com/google/subpar.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/subpar-postsubmit.yml",
"pipeline_slug": "subpar",
"owned_by_bazel": True,
},
"TensorFlow": {
"git_repository": "https://github.com/tensorflow/tensorflow.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/tensorflow-postsubmit.yml",
"pipeline_slug": "tensorflow",
},
"Tulsi": {
"git_repository": "https://github.com/bazelbuild/tulsi.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/tulsi/master/.bazelci/presubmit.yml",
"pipeline_slug": "tulsi-bazel-darwin",
},
"re2": {
"git_repository": "https://github.com/google/re2.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/re2-postsubmit.yml",
"pipeline_slug": "re2",
},
"rules_android": {
"git_repository": "https://github.com/bazelbuild/rules_android.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_android/master/.bazelci/postsubmit.yml",
"pipeline_slug": "rules-android",
"disabled_reason": "https://github.com/bazelbuild/rules_android/issues/15",
},
"rules_appengine": {
"git_repository": "https://github.com/bazelbuild/rules_appengine.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_appengine/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-appengine-appengine",
},
"rules_apple": {
"git_repository": "https://github.com/bazelbuild/rules_apple.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_apple/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-apple-darwin",
},
"rules_cc": {
"git_repository": "https://github.com/bazelbuild/rules_cc.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_cc/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-cc",
"owned_by_bazel": True,
},
"rules_closure": {
"git_repository": "https://github.com/bazelbuild/rules_closure.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_closure/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-closure-closure-compiler",
"owned_by_bazel": True,
},
"rules_d": {
"git_repository": "https://github.com/bazelbuild/rules_d.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_d/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-d",
},
"rules_docker": {
"git_repository": "https://github.com/bazelbuild/rules_docker.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_docker/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-docker-docker",
},
"rules_foreign_cc": {
"git_repository": "https://github.com/bazelbuild/rules_foreign_cc.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_foreign_cc/master/.bazelci/config.yaml",
"pipeline_slug": "rules-foreign-cc",
"owned_by_bazel": True,
},
"rules_go": {
"git_repository": "https://github.com/bazelbuild/rules_go.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_go/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-go-golang",
},
"rules_groovy": {
"git_repository": "https://github.com/bazelbuild/rules_groovy.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_groovy/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-groovy",
},
"rules_gwt": {
"git_repository": "https://github.com/bazelbuild/rules_gwt.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_gwt/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-gwt",
},
"rules_haskell": {
"git_repository": "https://github.com/tweag/rules_haskell.git",
"http_config": "https://raw.githubusercontent.com/tweag/rules_haskell/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-haskell-haskell",
},
"rules_jsonnet": {
"git_repository": "https://github.com/bazelbuild/rules_jsonnet.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_jsonnet/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-jsonnet",
},
"rules_jvm_external": {
"git_repository": "https://github.com/bazelbuild/rules_jvm_external.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_jvm_external/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-jvm-external",
"owned_by_bazel": True,
},
"rules_jvm_external - examples": {
"git_repository": "https://github.com/bazelbuild/rules_jvm_external.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_jvm_external/master/.bazelci/examples.yml",
"pipeline_slug": "rules-jvm-external-examples",
"owned_by_bazel": True,
},
"rules_k8s": {
"git_repository": "https://github.com/bazelbuild/rules_k8s.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_k8s/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-k8s-k8s",
"disabled_reason": "https://github.com/bazelbuild/rules_k8s/pull/580",
},
"rules_kotlin": {
"git_repository": "https://github.com/bazelbuild/rules_kotlin.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_kotlin/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-kotlin-kotlin",
},
"rules_nodejs": {
"git_repository": "https://github.com/bazelbuild/rules_nodejs.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_nodejs/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-nodejs-nodejs",
},
"rules_perl": {
"git_repository": "https://github.com/bazelbuild/rules_perl.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_perl/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-perl",
},
"rules_proto": {
"git_repository": "https://github.com/bazelbuild/rules_proto.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_proto/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-proto",
"owned_by_bazel": True,
},
"rules_python": {
"git_repository": "https://github.com/bazelbuild/rules_python.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_python/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-python-python",
"owned_by_bazel": True,
},
"rules_rust": {
"git_repository": "https://github.com/bazelbuild/rules_rust.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_rust/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-rust-rustlang",
},
"rules_sass": {
"git_repository": "https://github.com/bazelbuild/rules_sass.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_sass/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-sass",
},
"rules_scala": {
"git_repository": "https://github.com/bazelbuild/rules_scala.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_scala/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-scala-scala",
},
"rules_swift": {
"git_repository": "https://github.com/bazelbuild/rules_swift.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_swift/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-swift-swift",
"do_not_notify": "https://github.com/bazelbuild/continuous-integration/issues/915",
},
"rules_webtesting": {
"git_repository": "https://github.com/bazelbuild/rules_webtesting.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_webtesting/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-webtesting-saucelabs",
},
"upb": {
"git_repository": "https://github.com/protocolbuffers/upb.git",
"http_config": "https://raw.githubusercontent.com/protocolbuffers/upb/master/.bazelci/presubmit.yml",
"pipeline_slug": "upb",
},
}
DOWNSTREAM_PROJECTS_TESTING = {
"Bazel": DOWNSTREAM_PROJECTS_PRODUCTION["Bazel"],
"Bazelisk": DOWNSTREAM_PROJECTS_PRODUCTION["Bazelisk"],
"Federation": {
"git_repository": "https://github.com/fweikert/bazel-federation.git",
"http_config": "https://raw.githubusercontent.com/fweikert/bazel-federation/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-federation",
},
"rules_docker": DOWNSTREAM_PROJECTS_PRODUCTION["rules_docker"],
"rules_go": DOWNSTREAM_PROJECTS_PRODUCTION["rules_go"],
"rules_groovy": DOWNSTREAM_PROJECTS_PRODUCTION["rules_groovy"],
"rules_kotlin": DOWNSTREAM_PROJECTS_PRODUCTION["rules_kotlin"],
"rules_nodejs": DOWNSTREAM_PROJECTS_PRODUCTION["rules_nodejs"],
"rules_rust": DOWNSTREAM_PROJECTS_PRODUCTION["rules_rust"],
"rules_scala": DOWNSTREAM_PROJECTS_PRODUCTION["rules_scala"],
}
DOWNSTREAM_PROJECTS = {
"bazel-testing": DOWNSTREAM_PROJECTS_TESTING,
"bazel-trusted": {},
"bazel": DOWNSTREAM_PROJECTS_PRODUCTION,
}[BUILDKITE_ORG]
DOCKER_REGISTRY_PREFIX = {
"bazel-testing": "bazel-public/testing",
"bazel-trusted": "bazel-public",
"bazel": "bazel-public",
}[BUILDKITE_ORG]
# A map containing all supported platform names as keys, with the values being
# the platform name in a human readable format, and a the buildkite-agent's
# working directory.
PLATFORMS = {
"centos7": {
"name": "CentOS 7, Java 8",
"emoji-name": ":centos: 7 (Java 8)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["ubuntu1404", "centos7", "linux"],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/centos7-java8",
"python": "python3.6",
},
"debian10": {
"name": "Debian Buster, OpenJDK 11",
"emoji-name": ":debian: Buster (OpenJDK 11)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/debian10-java11",
"python": "python3.7",
},
"ubuntu1604": {
"name": "Ubuntu 16.04, OpenJDK 8",
"emoji-name": ":ubuntu: 16.04 (OpenJDK 8)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["ubuntu1604"],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu1604-java8",
"python": "python3.6",
},
"ubuntu1804": {
"name": "Ubuntu 18.04, OpenJDK 11",
"emoji-name": ":ubuntu: 18.04 (OpenJDK 11)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["ubuntu1804"],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu1804-java11",
"python": "python3.6",
},
"ubuntu1804_nojava": {
"name": "Ubuntu 18.04, no JDK",
"emoji-name": ":ubuntu: 18.04 (no JDK)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu1804-nojava",
"python": "python3.6",
},
"ubuntu2004": {
"name": "Ubuntu 20.04, OpenJDK 11",
"emoji-name": ":ubuntu: 20.04 (OpenJDK 11)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu2004-java11",
"python": "python3.8",
},
"ubuntu2004_nojava": {
"name": "Ubuntu 20.04, no JDK",
"emoji-name": ":ubuntu: 20.04 (no JDK)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu2004-nojava",
"python": "python3.8",
},
"kythe_ubuntu2004": {
"name": "Kythe (Ubuntu 20.04, OpenJDK 11)",
"emoji-name": "Kythe (:ubuntu: 20.04, OpenJDK 11)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu2004-java11-kythe",
"python": "python3.8",
},
"macos": {
"name": "macOS, OpenJDK 8",
"emoji-name": ":darwin: (OpenJDK 8)",
"downstream-root": "/Users/buildkite/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["macos"],
"queue": "macos",
"python": "python3.7",
},
"windows": {
"name": "Windows, OpenJDK 8",
"emoji-name": ":windows: (OpenJDK 8)",
"downstream-root": "c:/b/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["windows"],
"queue": "windows",
"python": "python.exe",
},
"rbe_ubuntu1604": {
"name": "RBE (Ubuntu 16.04, OpenJDK 8)",
"emoji-name": "RBE (:ubuntu: 16.04, OpenJDK 8)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu1604-java8",
"python": "python3.6",
},
}
BUILDIFIER_DOCKER_IMAGE = "gcr.io/bazel-public/buildifier"
# The platform used for various steps (e.g. stuff that formerly ran on the "pipeline" workers).
DEFAULT_PLATFORM = "ubuntu1804"
# In order to test that "the one Linux binary" that we build for our official releases actually
# works on all Linux distributions that we test on, we use the Linux binary built on our official
# release platform for all Linux downstream tests.
LINUX_BINARY_PLATFORM = "centos7"
DEFAULT_XCODE_VERSION = "11.7"
XCODE_VERSION_REGEX = re.compile(r"^\d+\.\d+(\.\d+)?$")
XCODE_VERSION_OVERRIDES = {
"10.2.1": "10.3",
"11.2": "11.2.1",
"11.3": "11.3.1",
}
ENCRYPTED_SAUCELABS_TOKEN = """
CiQAry63sOlZtTNtuOT5DAOLkum0rGof+DOweppZY1aOWbat8zwSTQAL7Hu+rgHSOr6P4S1cu4YG
/I1BHsWaOANqUgFt6ip9/CUGGJ1qggsPGXPrmhSbSPqNAIAkpxYzabQ3mfSIObxeBmhKg2dlILA/
EDql
""".strip()
BUILD_LABEL_PATTERN = re.compile(r"^Build label: (\S+)$", re.MULTILINE)
BUILDIFIER_VERSION_ENV_VAR = "BUILDIFIER_VERSION"
BUILDIFIER_WARNINGS_ENV_VAR = "BUILDIFIER_WARNINGS"
BUILDIFIER_STEP_NAME = "Buildifier"
SKIP_TASKS_ENV_VAR = "CI_SKIP_TASKS"
CONFIG_FILE_EXTENSIONS = {".yml", ".yaml"}
KYTHE_DIR = "/usr/local/kythe"
INDEX_UPLOAD_POLICY_ALWAYS = "Always"
INDEX_UPLOAD_POLICY_IF_BUILD_SUCCESS = "IfBuildSuccess"
INDEX_UPLOAD_POLICY_NEVER = "Never"
class BuildkiteException(Exception):
"""
Raised whenever something goes wrong and we should exit with an error.
"""
pass
class BinaryUploadRaceException(Exception):
"""
Raised when try_publish_binaries wasn't able to publish a set of binaries,
because the generation of the current file didn't match the expected value.
"""
pass
class BuildkiteClient(object):
_ENCRYPTED_BUILDKITE_API_TOKEN = """
CiQA4DEB9ldzC+E39KomywtqXfaQ86hhulgeDsicds2BuvbCYzsSUAAqwcvXZPh9IMWlwWh94J2F
exosKKaWB0tSRJiPKnv2NPDfEqGul0ZwVjtWeASpugwxxKeLhFhPMcgHMPfndH6j2GEIY6nkKRbP
uwoRMCwe
""".strip()
_ENCRYPTED_BUILDKITE_API_TESTING_TOKEN = """
CiQAMTBkWjL1C+F5oon3+cC1vmum5+c1y5+96WQY44p0Lxd0PeASUQAy7iU0c6E3W5EOSFYfD5fA
MWy/SHaMno1NQSUa4xDOl5yc2kizrtxPPVkX4x9pLNuGUY/xwAn2n1DdiUdWZNWlY1bX2C4ex65e
P9w8kNhEbw==
""".strip()
_BUILD_STATUS_URL_TEMPLATE = (
"https://api.buildkite.com/v2/organizations/{}/pipelines/{}/builds/{}"
)
_NEW_BUILD_URL_TEMPLATE = (
"https://api.buildkite.com/v2/organizations/{}/pipelines/{}/builds"
)
_RETRY_JOB_URL_TEMPLATE = (
"https://api.buildkite.com/v2/organizations/{}/pipelines/{}/builds/{}/jobs/{}/retry"
)
def __init__(self, org, pipeline):
self._org = org
self._pipeline = pipeline
self._token = self._get_buildkite_token()
def _get_buildkite_token(self):
return decrypt_token(
encrypted_token=self._ENCRYPTED_BUILDKITE_API_TESTING_TOKEN
if THIS_IS_TESTING
else self._ENCRYPTED_BUILDKITE_API_TOKEN,
kms_key="buildkite-testing-api-token"
if THIS_IS_TESTING
else "buildkite-untrusted-api-token",
)
def _open_url(self, url, params = []):
try:
params_str = "".join("&{}={}".format(k, v) for k, v in params)
return (
urllib.request.urlopen("{}?access_token={}{}".format(url, self._token, params_str))
.read()
.decode("utf-8", "ignore")
)
except urllib.error.HTTPError as ex:
raise BuildkiteException("Failed to open {}: {} - {}".format(url, ex.code, ex.reason))
def get_build_info(self, build_number):
"""Get build info for a pipeline with a given build number
See https://buildkite.com/docs/apis/rest-api/builds#get-a-build
Parameters
----------
build_number : the build number
Returns
-------
dict
the metadata for the build
"""
url = self._BUILD_STATUS_URL_TEMPLATE.format(self._org, self._pipeline, build_number)
output = self._open_url(url)
return json.loads(output)
def get_build_info_list(self, params):
"""Get a list of build infos for this pipeline
See https://buildkite.com/docs/apis/rest-api/builds#list-builds-for-a-pipeline
Parameters
----------
params : the parameters to filter the result
Returns
-------
list of dict
the metadata for a list of builds
"""
url = self._BUILD_STATUS_URL_TEMPLATE.format(self._org, self._pipeline, "")
output = self._open_url(url, params)
return json.loads(output)
def get_build_log(self, job):
return self._open_url(job["raw_log_url"])
@staticmethod
def _check_response(response, expected_status_code):
if response.status_code != expected_status_code:
eprint("Exit code:", response.status_code)
eprint("Response:\n", response.text)
response.raise_for_status()
def trigger_new_build(self, commit, message = None, env = {}):
"""Trigger a new build at a given commit and return the build metadata.
See https://buildkite.com/docs/apis/rest-api/builds#create-a-build
Parameters
----------
commit : the commit we want to build at
message : the message we should as the build titile
env : (optional) the environment variables to set
Returns
-------
dict
the metadata for the build
"""
url = self._NEW_BUILD_URL_TEMPLATE.format(self._org, self._pipeline)
data = {
"commit": commit,
"branch": "master",
"message": message if message else f"Trigger build at {commit}",
"env": env,
}
response = requests.post(url + "?access_token=" + self._token, json = data)
BuildkiteClient._check_response(response, requests.codes.created)
return json.loads(response.text)
def trigger_job_retry(self, build_number, job_id):
"""Trigger a job retry and return the job metadata.
See https://buildkite.com/docs/apis/rest-api/jobs#retry-a-job
Parameters
----------
build_number : the number of the build we want to retry
job_id : the id of the job we want to retry
Returns
-------
dict
the metadata for the job
"""
url = self._RETRY_JOB_URL_TEMPLATE.format(self._org, self._pipeline, build_number, job_id)
response = requests.put(url + "?access_token=" + self._token)
BuildkiteClient._check_response(response, requests.codes.ok)
return json.loads(response.text)
def wait_job_to_finish(self, build_number, job_id, interval_time=30, logger=None):
"""Wait a job to finish and return the job metadata
Parameters
----------
build_number : the number of the build we want to wait
job_id : the id of the job we want to wait
interval_time : (optional) the interval time to check the build status, default to 30s
logger : (optional) a logger to report progress
Returns
-------
dict
the latest metadata for the job
"""
t = 0
build_info = self.get_build_info(build_number)
while True:
for job in build_info["jobs"]:
if job["id"] == job_id:
state = job["state"]
if state != "scheduled" and state != "running" and state != "assigned":
return job
break
else:
raise BuildkiteException(f"job id {job_id} doesn't exist in build " + build_info["web_url"])
url = build_info["web_url"]
if logger:
logger.log(f"Waiting for {url}, waited {t} seconds...")
time.sleep(interval_time)
t += interval_time
build_info = self.get_build_info(build_number)
def wait_build_to_finish(self, build_number, interval_time=30, logger=None):
"""Wait a build to finish and return the build metadata
Parameters
----------
build_number : the number of the build we want to wait
interval_time : (optional) the interval time to check the build status, default to 30s
logger : (optional) a logger to report progress
Returns
-------
dict
the latest metadata for the build
"""
t = 0
build_info = self.get_build_info(build_number)
while build_info["state"] == "scheduled" or build_info["state"] == "running":
url = build_info["web_url"]
if logger:
logger.log(f"Waiting for {url}, waited {t} seconds...")
time.sleep(interval_time)
t += interval_time
build_info = self.get_build_info(build_number)
return build_info
def decrypt_token(encrypted_token, kms_key):
return (
subprocess.check_output(
[
gcloud_command(),
"kms",
"decrypt",
"--project",
"bazel-untrusted",
"--location",
"global",
"--keyring",
"buildkite",
"--key",
kms_key,
"--ciphertext-file",
"-",
"--plaintext-file",
"-",
],
input=base64.b64decode(encrypted_token),
env=os.environ,
)
.decode("utf-8")
.strip()
)
def eprint(*args, **kwargs):
"""
Print to stderr and flush (just in case).
"""
print(*args, flush=True, file=sys.stderr, **kwargs)
def is_windows():
return os.name == "nt"
def gsutil_command():
return "gsutil.cmd" if is_windows() else "gsutil"
def gcloud_command():
return "gcloud.cmd" if is_windows() else "gcloud"
def downstream_projects_root(platform):
downstream_root = os.path.expandvars(PLATFORMS[platform]["downstream-root"])
if platform == "windows" and os.path.exists("d:/b"):
# If this is a Windows machine with a local SSD, the build directory is
# on drive D.
downstream_root = downstream_root.replace("c:/b/", "d:/b/")
if not os.path.exists(downstream_root):
os.makedirs(downstream_root)
return downstream_root
def fetch_configs(http_url, file_config):
"""
If specified fetches the build configuration from file_config or http_url, else tries to
read it from .bazelci/presubmit.yml.
Returns the json configuration as a python data structure.
"""
if file_config is not None and http_url is not None:
raise BuildkiteException("file_config and http_url cannot be set at the same time")
return load_config(http_url, file_config)
def load_config(http_url, file_config, allow_imports=True):
if http_url:
config = load_remote_yaml_file(http_url)
else:
file_config = file_config or ".bazelci/presubmit.yml"
with open(file_config, "r") as fd:
config = yaml.safe_load(fd)
# Legacy mode means that there is exactly one task per platform (e.g. ubuntu1604_nojdk),
# which means that we can get away with using the platform name as task ID.
# No other updates are needed since get_platform_for_task() falls back to using the
# task ID as platform if there is no explicit "platforms" field.
if "platforms" in config:
config["tasks"] = config.pop("platforms")
if "tasks" not in config:
config["tasks"] = {}
imports = config.pop("imports", None)
if imports:
if not allow_imports:
raise BuildkiteException("Nested imports are not allowed")
for i in imports:
imported_tasks = load_imported_tasks(i, http_url, file_config)
config["tasks"].update(imported_tasks)
return config
def load_remote_yaml_file(http_url):
with urllib.request.urlopen(http_url) as resp:
reader = codecs.getreader("utf-8")
return yaml.safe_load(reader(resp))
def load_imported_tasks(import_name, http_url, file_config):
if "/" in import_name:
raise BuildkiteException("Invalid import '%s'" % import_name)
old_path = http_url or file_config
new_path = "%s%s" % (old_path[: old_path.rfind("/") + 1], import_name)
if http_url:
http_url = new_path
else:
file_config = new_path
imported_config = load_config(http_url=http_url, file_config=file_config, allow_imports=False)
namespace = import_name.partition(".")[0]
tasks = {}
for task_name, task_config in imported_config["tasks"].items():
fix_imported_task_platform(task_name, task_config)
fix_imported_task_name(namespace, task_config)
fix_imported_task_working_directory(namespace, task_config)
tasks["%s_%s" % (namespace, task_name)] = task_config
return tasks
def fix_imported_task_platform(task_name, task_config):
if "platform" not in task_config:
task_config["platform"] = task_name
def fix_imported_task_name(namespace, task_config):
old_name = task_config.get("name")
task_config["name"] = "%s (%s)" % (namespace, old_name) if old_name else namespace
def fix_imported_task_working_directory(namespace, task_config):
old_dir = task_config.get("working_directory")
task_config["working_directory"] = os.path.join(namespace, old_dir) if old_dir else namespace
def print_collapsed_group(name):
eprint("\n\n--- {0}\n\n".format(name))
def print_expanded_group(name):
eprint("\n\n+++ {0}\n\n".format(name))
def use_bazelisk_migrate():
"""
If USE_BAZELISK_MIGRATE is set, we use `bazelisk --migrate` to test incompatible flags.
"""
return bool(os.environ.get("USE_BAZELISK_MIGRATE"))
def bazelisk_flags():
return ["--migrate"] if use_bazelisk_migrate() else []
def calculate_flags(task_config, task_config_key, json_profile_key, tmpdir, test_env_vars):
include_json_profile = task_config.get("include_json_profile", [])
json_profile_flags = []
json_profile_out = None
if json_profile_key in include_json_profile:
json_profile_out = os.path.join(tmpdir, "{}.profile.gz".format(json_profile_key))
json_profile_flags = get_json_profile_flags(json_profile_out)
flags = task_config.get(task_config_key) or []
flags += json_profile_flags
# We have to add --test_env flags to `build`, too, otherwise Bazel
# discards its analysis cache between `build` and `test`.
if test_env_vars:
flags += ["--test_env={}".format(v) for v in test_env_vars]
return flags, json_profile_out
def execute_commands(
task_config,
platform,
git_repository,
git_commit,
git_repo_location,
use_bazel_at_commit,
use_but,
save_but,
needs_clean,
build_only,
test_only,
monitor_flaky_tests,
incompatible_flags,
bazel_version=None,
):
# If we want to test incompatible flags, we ignore bazel_version and always use
# the latest Bazel version through Bazelisk.
if incompatible_flags:
bazel_version = None
if not bazel_version:
# The last good version of Bazel can be specified in an emergency file.
# However, we only use last_good_bazel for pipelines that do not
# explicitly specify a version of Bazel.
try:
emergency_settings = load_remote_yaml_file(EMERGENCY_FILE_URL)
bazel_version = emergency_settings.get("last_good_bazel")
except urllib.error.HTTPError:
# Ignore this error. The Setup step will have already complained about
# it by showing an error message.
pass
if build_only and test_only:
raise BuildkiteException("build_only and test_only cannot be true at the same time")
if use_bazel_at_commit and use_but:
raise BuildkiteException("use_bazel_at_commit cannot be set when use_but is true")
tmpdir = tempfile.mkdtemp()
sc_process = None
try:
if platform == "macos":
activate_xcode(task_config)
# If the CI worker runs Bazelisk, we need to forward all required env variables to the test.
# Otherwise any integration test that invokes Bazel (=Bazelisk in this case) will fail.
test_env_vars = ["LocalAppData"] if platform == "windows" else ["HOME"]
if git_repo_location:
os.chdir(git_repo_location)
elif git_repository:
clone_git_repository(git_repository, platform, git_commit)
# We use one binary for all Linux platforms (because we also just release one binary for all
# Linux versions and we have to ensure that it works on all of them).
binary_platform = platform if platform in ["macos", "windows"] else LINUX_BINARY_PLATFORM
if use_bazel_at_commit:
print_collapsed_group(":gcloud: Downloading Bazel built at " + use_bazel_at_commit)
bazel_binary = download_bazel_binary_at_commit(
tmpdir, binary_platform, use_bazel_at_commit
)
os.environ["USE_BAZEL_VERSION"] = bazel_binary
elif use_but:
print_collapsed_group(":gcloud: Downloading Bazel Under Test")
bazel_binary = download_bazel_binary(tmpdir, binary_platform)
os.environ["USE_BAZEL_VERSION"] = bazel_binary
else:
bazel_binary = "bazel"
if bazel_version:
os.environ["USE_BAZEL_VERSION"] = bazel_version
if "USE_BAZEL_VERSION" in os.environ and not task_config.get(
"skip_use_bazel_version_for_test", False
):
# This will only work if the bazel binary in $PATH is actually a bazelisk binary
# (https://github.com/bazelbuild/bazelisk).
test_env_vars.append("USE_BAZEL_VERSION")
for key, value in task_config.get("environment", {}).items():
# We have to explicitly convert the value to a string, because sometimes YAML tries to
# be smart and converts strings like "true" and "false" to booleans.
os.environ[key] = str(value)
# Set BAZELISK_SHUTDOWN to 1 when we use bazelisk --migrate on Windows.
# This is a workaround for https://github.com/bazelbuild/continuous-integration/issues/1012
if use_bazelisk_migrate() and platform == "windows":
os.environ["BAZELISK_SHUTDOWN"] = "1"
cmd_exec_func = execute_batch_commands if platform == "windows" else execute_shell_commands
cmd_exec_func(task_config.get("setup", None))
# Allow the config to override the current working directory.
required_prefix = os.getcwd()
requested_working_dir = os.path.abspath(task_config.get("working_directory", ""))
if os.path.commonpath([required_prefix, requested_working_dir]) != required_prefix:
raise BuildkiteException("working_directory refers to a path outside the workspace")
os.chdir(requested_working_dir)
if platform == "windows":
execute_batch_commands(task_config.get("batch_commands", None))
else:
execute_shell_commands(task_config.get("shell_commands", None))
bazel_version = print_bazel_version_info(bazel_binary, platform)
print_environment_variables_info()
if incompatible_flags:
print_expanded_group("Build and test with the following incompatible flags:")
for flag in incompatible_flags:
eprint(flag + "\n")
execute_bazel_run(
bazel_binary, platform, task_config.get("run_targets", None), incompatible_flags
)
if task_config.get("sauce"):
sc_process = start_sauce_connect_proxy(platform, tmpdir)
if needs_clean:
execute_bazel_clean(bazel_binary, platform)
build_targets, test_targets, index_targets = calculate_targets(
task_config, platform, bazel_binary, build_only, test_only
)
if build_targets:
build_flags, json_profile_out_build = calculate_flags(task_config, "build_flags", "build", tmpdir, test_env_vars)
try:
execute_bazel_build(
bazel_version,
bazel_binary,
platform,
build_flags,
build_targets,
None,
incompatible_flags,
)
if save_but:
upload_bazel_binary(platform)
finally:
if json_profile_out_build:
upload_json_profile(json_profile_out_build, tmpdir)
if test_targets:
test_flags, json_profile_out_test = calculate_flags(task_config, "test_flags", "test", tmpdir, test_env_vars)
if not is_windows():
# On platforms that support sandboxing (Linux, MacOS) we have
# to allow access to Bazelisk's cache directory.
# However, the flag requires the directory to exist,
# so we create it here in order to not crash when a test
# does not invoke Bazelisk.
bazelisk_cache_dir = get_bazelisk_cache_directory(platform)
os.makedirs(bazelisk_cache_dir, mode=0o755, exist_ok=True)
test_flags.append("--sandbox_writable_path={}".format(bazelisk_cache_dir))
test_bep_file = os.path.join(tmpdir, "test_bep.json")
stop_request = threading.Event()
upload_thread = threading.Thread(
target=upload_test_logs_from_bep, args=(test_bep_file, tmpdir, stop_request)
)
try:
upload_thread.start()
try:
execute_bazel_test(
bazel_version,
bazel_binary,
platform,
test_flags,
test_targets,
test_bep_file,
monitor_flaky_tests,
incompatible_flags,
)
if monitor_flaky_tests:
upload_bep_logs_for_flaky_tests(test_bep_file)
finally:
if json_profile_out_test:
upload_json_profile(json_profile_out_test, tmpdir)
finally:
stop_request.set()
upload_thread.join()
if index_targets:
index_flags, json_profile_out_index = calculate_flags(task_config, "index_flags", "index", tmpdir, test_env_vars)
index_upload_policy = task_config.get("index_upload_policy", "IfBuildSuccess")
index_upload_gcs = task_config.get("index_upload_gcs", False)
try:
should_upload_kzip = True if index_upload_policy == INDEX_UPLOAD_POLICY_ALWAYS else False
try:
execute_bazel_build_with_kythe(
bazel_version,
bazel_binary,
platform,
index_flags,
index_targets,
None,
incompatible_flags
)
if index_upload_policy == INDEX_UPLOAD_POLICY_IF_BUILD_SUCCESS:
should_upload_kzip = True
except subprocess.CalledProcessError as e:
# If not running with Always policy, raise the build error.
if index_upload_policy != INDEX_UPLOAD_POLICY_ALWAYS:
handle_bazel_failure(e, "build")
if should_upload_kzip:
try:
merge_and_upload_kythe_kzip(platform, index_upload_gcs)
except subprocess.CalledProcessError:
raise BuildkiteException("Failed to upload kythe kzip")
finally:
if json_profile_out_index:
upload_json_profile(json_profile_out_index, tmpdir)
finally:
terminate_background_process(sc_process)
if tmpdir:
shutil.rmtree(tmpdir)
def activate_xcode(task_config):
# Get the Xcode version from the config.
xcode_version = task_config.get("xcode_version", DEFAULT_XCODE_VERSION)
print_collapsed_group("Activating Xcode {}...".format(xcode_version))
# Ensure it's a valid version number.
if not isinstance(xcode_version, str):
raise BuildkiteException(
"Version number '{}' is not a string. Did you forget to put it in quotes?".format(
xcode_version
)
)
if not XCODE_VERSION_REGEX.match(xcode_version):
raise BuildkiteException(
"Invalid Xcode version format '{}', must match the format X.Y[.Z].".format(
xcode_version
)
)
# This is used to replace e.g. 11.2 with 11.2.1 without having to update all configs.
xcode_version = XCODE_VERSION_OVERRIDES.get(xcode_version, xcode_version)
# Check that the selected Xcode version is actually installed on the host.
xcode_path = "/Applications/Xcode{}.app".format(xcode_version)
if not os.path.exists(xcode_path):
raise BuildkiteException("Xcode not found at '{}'.".format(xcode_path))
# Now activate the specified Xcode version and let it install its required components.
# The CI machines have a sudoers config that allows the 'buildkite' user to run exactly
# these two commands, so don't change them without also modifying the file there.
execute_command(["/usr/bin/sudo", "/usr/bin/xcode-select", "--switch", xcode_path])
execute_command(["/usr/bin/sudo", "/usr/bin/xcodebuild", "-runFirstLaunch"])
def get_bazelisk_cache_directory(platform):
# The path relies on the behavior of Go's os.UserCacheDir()
# and of the Go version of Bazelisk.
cache_dir = "Library/Caches" if platform == "macos" else ".cache"
return os.path.join(os.environ.get("HOME"), cache_dir, "bazelisk")
def tests_with_status(bep_file, status):
return set(label for label, _ in test_logs_for_status(bep_file, status=[status]))
def start_sauce_connect_proxy(platform, tmpdir):
print_collapsed_group(":saucelabs: Starting Sauce Connect Proxy")
os.environ["SAUCE_USERNAME"] = "bazel_rules_webtesting"
os.environ["SAUCE_ACCESS_KEY"] = saucelabs_token()
os.environ["TUNNEL_IDENTIFIER"] = str(uuid.uuid4())
os.environ["BUILD_TAG"] = str(uuid.uuid4())
readyfile = os.path.join(tmpdir, "sc_is_ready")
if platform == "windows":
cmd = ["sauce-connect.exe", "-i", os.environ["TUNNEL_IDENTIFIER"], "-f", readyfile]
else:
cmd = ["sc", "-i", os.environ["TUNNEL_IDENTIFIER"], "-f", readyfile]
sc_process = execute_command_background(cmd)
wait_start = time.time()
while not os.path.exists(readyfile):
if time.time() - wait_start > 60:
raise BuildkiteException(
"Sauce Connect Proxy is still not ready after 60 seconds, aborting!"
)
time.sleep(1)
print("Sauce Connect Proxy is ready, continuing...")
return sc_process
def saucelabs_token():
return decrypt_token(encrypted_token=ENCRYPTED_SAUCELABS_TOKEN, kms_key="saucelabs-access-key")
def is_pull_request():
third_party_repo = os.getenv("BUILDKITE_PULL_REQUEST_REPO", "")
return len(third_party_repo) > 0
def has_flaky_tests(bep_file):
return len(test_logs_for_status(bep_file, status=["FLAKY"])) > 0
def print_bazel_version_info(bazel_binary, platform):
print_collapsed_group(":information_source: Bazel Info")
version_output = execute_command_and_get_output(
[bazel_binary]
+ common_startup_flags(platform)
+ ["--nomaster_bazelrc", "--bazelrc=/dev/null", "version"]
)
execute_command(
[bazel_binary]
+ common_startup_flags(platform)
+ ["--nomaster_bazelrc", "--bazelrc=/dev/null", "info"]
)
match = BUILD_LABEL_PATTERN.search(version_output)
return match.group(1) if match else "unreleased binary"
def print_environment_variables_info():
print_collapsed_group(":information_source: Environment Variables")
for key, value in os.environ.items():
eprint("%s=(%s)" % (key, value))
def upload_bazel_binary(platform):
print_collapsed_group(":gcloud: Uploading Bazel Under Test")
if platform == "windows":
binary_dir = r"bazel-bin\src"
binary_name = r"bazel.exe"
binary_nojdk_name = r"bazel_nojdk.exe"
else:
binary_dir = "bazel-bin/src"
binary_name = "bazel"
binary_nojdk_name = "bazel_nojdk"
execute_command(["buildkite-agent", "artifact", "upload", binary_name], cwd=binary_dir)
execute_command(["buildkite-agent", "artifact", "upload", binary_nojdk_name], cwd=binary_dir)
def merge_and_upload_kythe_kzip(platform, index_upload_gcs):
print_collapsed_group(":gcloud: Uploading kythe kzip")
kzips = glob.glob("bazel-out/*/extra_actions/**/*.kzip", recursive=True)
build_number = os.getenv("BUILDKITE_BUILD_NUMBER")
git_commit = os.getenv("BUILDKITE_COMMIT")
final_kzip_name = "{}-{}-{}.kzip".format(build_number, platform, git_commit)
execute_command([f"{KYTHE_DIR}/tools/kzip", "merge", "--output", final_kzip_name] + kzips)
execute_command(["buildkite-agent", "artifact", "upload", final_kzip_name])
if index_upload_gcs:
pipeline = os.getenv("BUILDKITE_PIPELINE_SLUG")
destination = KZIPS_BUCKET + pipeline + "/" + final_kzip_name
print("Uploading to GCS {}".format(destination))
execute_command(
[
gsutil_command(),
"cp",
final_kzip_name,
destination,
]
)
def download_binary(dest_dir, platform, binary_name):
source_step = create_label(platform, "Bazel", build_only=True)
execute_command(
["buildkite-agent", "artifact", "download", binary_name, dest_dir, "--step", source_step]
)
bazel_binary_path = os.path.join(dest_dir, binary_name)
st = os.stat(bazel_binary_path)
os.chmod(bazel_binary_path, st.st_mode | stat.S_IEXEC)
return bazel_binary_path
def download_bazel_binary(dest_dir, platform):
binary_name = "bazel.exe" if platform == "windows" else "bazel"
return download_binary(dest_dir, platform, binary_name)
def download_bazel_nojdk_binary(dest_dir, platform):
binary_name = "bazel_nojdk.exe" if platform == "windows" else "bazel_nojdk"
return download_binary(dest_dir, platform, binary_name)
def download_binary_at_commit(dest_dir, platform, bazel_git_commit, bazel_binary_url, bazel_binary_path):
try:
execute_command(
[
gsutil_command(),
"cp",
bazel_binary_url,
bazel_binary_path,
]
)
except subprocess.CalledProcessError as e:
raise BuildkiteException(
"Failed to download Bazel binary at %s, error message:\n%s" % (bazel_git_commit, str(e))
)
st = os.stat(bazel_binary_path)
os.chmod(bazel_binary_path, st.st_mode | stat.S_IEXEC)
return bazel_binary_path
def download_bazel_binary_at_commit(dest_dir, platform, bazel_git_commit):
url = bazelci_builds_gs_url(platform, bazel_git_commit)
path = os.path.join(dest_dir, "bazel.exe" if platform == "windows" else "bazel")
return download_binary_at_commit(dest_dir, platform, bazel_git_commit, url, path)
def download_bazel_nojdk_binary_at_commit(dest_dir, platform, bazel_git_commit):
url = bazelci_builds_nojdk_gs_url(platform, bazel_git_commit)
path = os.path.join(dest_dir, "bazel_nojdk.exe" if platform == "windows" else "bazel_nojdk")
return download_binary_at_commit(dest_dir, platform, bazel_git_commit, url, path)
def get_mirror_path(git_repository, platform):
mirror_root = {
"macos": "/usr/local/var/bazelbuild/",
"windows": "c:\\buildkite\\bazelbuild\\",
}.get(platform, "/var/lib/bazelbuild/")
return mirror_root + re.sub(r"[^0-9A-Za-z]", "-", git_repository)
def clone_git_repository(git_repository, platform, git_commit=None):
root = downstream_projects_root(platform)
project_name = re.search(r"/([^/]+)\.git$", git_repository).group(1)
clone_path = os.path.join(root, project_name)
print_collapsed_group(
"Fetching %s sources at %s" % (project_name, git_commit if git_commit else "HEAD")
)
mirror_path = get_mirror_path(git_repository, platform)
if not os.path.exists(clone_path):
if os.path.exists(mirror_path):
execute_command(
["git", "clone", "-v", "--reference", mirror_path, git_repository, clone_path]
)
else:
execute_command(["git", "clone", "-v", git_repository, clone_path])
os.chdir(clone_path)
execute_command(["git", "remote", "set-url", "origin", git_repository])
execute_command(["git", "clean", "-fdqx"])
execute_command(["git", "submodule", "foreach", "--recursive", "git clean -fdqx"])
execute_command(["git", "fetch", "origin"])
if git_commit:
# sync to a specific commit of this repository
execute_command(["git", "reset", git_commit, "--hard"])
else:
# sync to the latest commit of HEAD. Unlikely git pull this also works after a force push.
remote_head = (
subprocess.check_output(["git", "symbolic-ref", "refs/remotes/origin/HEAD"])
.decode("utf-8")
.rstrip()
)
execute_command(["git", "reset", remote_head, "--hard"])
execute_command(["git", "submodule", "sync", "--recursive"])
execute_command(["git", "submodule", "update", "--init", "--recursive", "--force"])
execute_command(["git", "submodule", "foreach", "--recursive", "git reset --hard"])
execute_command(["git", "clean", "-fdqx"])
execute_command(["git", "submodule", "foreach", "--recursive", "git clean -fdqx"])
return clone_path
def execute_batch_commands(commands):
if not commands:
return
print_collapsed_group(":batch: Setup (Batch Commands)")
batch_commands = "&".join(commands)
return subprocess.run(batch_commands, shell=True, check=True, env=os.environ).returncode
def execute_shell_commands(commands):
if not commands:
return
print_collapsed_group(":bash: Setup (Shell Commands)")
shell_command = "\n".join(["set -e"] + commands)
execute_command([shell_command], shell=True)
def handle_bazel_failure(exception, action):
msg = "bazel {0} failed with exit code {1}".format(action, exception.returncode)
if use_bazelisk_migrate():
print_collapsed_group(msg)
else:
raise BuildkiteException(msg)
def execute_bazel_run(bazel_binary, platform, targets, incompatible_flags):
if not targets:
return
print_collapsed_group("Setup (Run Targets)")
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
incompatible_flags_to_use = (
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags
)
for target in targets:
try:
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ ["run"]
+ common_build_flags(None, platform)
+ incompatible_flags_to_use
+ [target]
)
except subprocess.CalledProcessError as e:
handle_bazel_failure(e, "run")
def remote_caching_flags(platform):
# Only enable caching for untrusted and testing builds.
if CLOUD_PROJECT not in ["bazel-untrusted"]:
return []
platform_cache_key = [BUILDKITE_ORG.encode("utf-8")]
# Whenever the remote cache was known to have been poisoned increase the number below
platform_cache_key += ["cache-poisoning-20201011".encode("utf-8")]
if platform == "macos":
platform_cache_key += [
# macOS version:
subprocess.check_output(["/usr/bin/sw_vers", "-productVersion"]),
# Path to Xcode:
subprocess.check_output(["/usr/bin/xcode-select", "-p"]),
# Xcode version:
subprocess.check_output(["/usr/bin/xcodebuild", "-version"]),
]
# Use a local cache server for our macOS machines.
flags = ["--remote_cache=http://100.107.73.148"]
else:
platform_cache_key += [
# Platform name:
platform.encode("utf-8")
]
# Use RBE for caching builds running on GCE.
flags = [
"--google_default_credentials",
"--remote_cache=remotebuildexecution.googleapis.com",
"--remote_instance_name=projects/{}/instances/default_instance".format(CLOUD_PROJECT),
]
platform_cache_digest = hashlib.sha256()
for key in platform_cache_key:
eprint("Adding to platform cache key: {}".format(key))
platform_cache_digest.update(key)
platform_cache_digest.update(b":")
flags += [
"--remote_timeout=60",
"--remote_max_connections=200",
'--remote_default_platform_properties=properties:{name:"cache-silo-key" value:"%s"}'
% platform_cache_digest.hexdigest(),
]
return flags
def remote_enabled(flags):
# Detect if the project configuration enabled its own remote caching / execution.
remote_flags = ["--remote_executor", "--remote_cache", "--remote_http_cache"]
for flag in flags:
for remote_flag in remote_flags:
if flag.startswith(remote_flag):
return True
return False
def concurrent_jobs(platform):
return "75" if platform.startswith("rbe_") else str(multiprocessing.cpu_count())
def concurrent_test_jobs(platform):
if platform.startswith("rbe_"):
return "75"
elif platform == "windows":
return "8"
elif platform == "macos":
return "8"
return "12"
def common_startup_flags(platform):
if platform == "windows":
if os.path.exists("D:/b"):
# This machine has a local SSD mounted as drive D.
return ["--output_user_root=D:/b"]
else:
# This machine uses its PD-SSD as the build directory.
return ["--output_user_root=C:/b"]
return []
def common_build_flags(bep_file, platform):
flags = [
"--show_progress_rate_limit=5",
"--curses=yes",
"--color=yes",
"--terminal_columns=143",
"--show_timestamps",
"--verbose_failures",
"--jobs=" + concurrent_jobs(platform),
"--announce_rc",
"--experimental_repository_cache_hardlinks",
# Some projects set --disk_cache in their project-specific bazelrc, which we never want on
# CI, so let's just disable it explicitly.
"--disk_cache=",
]
if platform == "windows":
pass
elif platform == "macos":
flags += [
"--sandbox_writable_path=/var/tmp/_bazel_buildkite/cache/repos/v1",
"--test_env=REPOSITORY_CACHE=/var/tmp/_bazel_buildkite/cache/repos/v1",
]
else:
flags += ["--sandbox_tmpfs_path=/tmp"]
if bep_file:
flags += [
"--experimental_build_event_json_file_path_conversion=false",
"--build_event_json_file=" + bep_file,
]
return flags
def rbe_flags(original_flags, accept_cached):
# Enable remote execution via RBE.
flags = [
"--remote_executor=remotebuildexecution.googleapis.com",
"--remote_instance_name=projects/bazel-untrusted/instances/default_instance",
"--incompatible_strict_action_env",
"--google_default_credentials",
"--toolchain_resolution_debug",
]
# Enable BES / Build Results reporting.
flags += [
"--bes_backend=buildeventservice.googleapis.com",
"--bes_timeout=360s",
"--project_id=bazel-untrusted",
]
if not accept_cached:
flags += ["--noremote_accept_cached"]
# Adapted from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/.bazelrc
flags += [
# These should NOT longer need to be modified.
# All that is needed is updating the @bazel_toolchains repo pin
# in projects' WORKSPACE files.
#
# Toolchain related flags to append at the end of your .bazelrc file.
"--host_javabase=@buildkite_config//java:jdk",
"--javabase=@buildkite_config//java:jdk",
"--host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8",
"--java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8",
"--crosstool_top=@buildkite_config//cc:toolchain",
"--action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1",
]
# Platform flags:
# The toolchain container used for execution is defined in the target indicated
# by "extra_execution_platforms", "host_platform" and "platforms".
# If you are using your own toolchain container, you need to create a platform
# target with "constraint_values" that allow for the toolchain specified with
# "extra_toolchains" to be selected (given constraints defined in
# "exec_compatible_with").
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
# Don't add platform flags if they are specified already.
platform_flags = {
"--extra_toolchains": "@buildkite_config//config:cc-toolchain",
"--extra_execution_platforms": "@buildkite_config//config:platform",
"--host_platform": "@buildkite_config//config:platform",
"--platforms": "@buildkite_config//config:platform",
}
for platform_flag, value in list(platform_flags.items()):
found = False
for original_flag in original_flags:
if original_flag.startswith(platform_flag):
found = True
break
if not found:
flags += [platform_flag + "=" + value]
return flags
def compute_flags(
platform, flags, incompatible_flags, bep_file, bazel_binary, enable_remote_cache=False
):
aggregated_flags = common_build_flags(bep_file, platform)
if not remote_enabled(flags):
if platform.startswith("rbe_"):
aggregated_flags += rbe_flags(flags, accept_cached=enable_remote_cache)
elif enable_remote_cache:
aggregated_flags += remote_caching_flags(platform)
aggregated_flags += flags
if incompatible_flags:
aggregated_flags += incompatible_flags
for i, flag in enumerate(aggregated_flags):
if "$HOME" in flag:
if platform == "windows":
if os.path.exists("D:/"):
home = "D:"
else:
home = "C:/b"
elif platform == "macos":
home = "/Users/buildkite"
else:
home = "/var/lib/buildkite-agent"
aggregated_flags[i] = flag.replace("$HOME", home)
if "$OUTPUT_BASE" in flag:
output_base = execute_command_and_get_output(
[bazel_binary] + common_startup_flags(platform) + ["info", "output_base"],
print_output=False,
).strip()
aggregated_flags[i] = flag.replace("$OUTPUT_BASE", output_base)
return aggregated_flags
def execute_bazel_clean(bazel_binary, platform):
print_expanded_group(":bazel: Clean")
try:
execute_command([bazel_binary] + common_startup_flags(platform) + ["clean", "--expunge"])
except subprocess.CalledProcessError as e:
raise BuildkiteException("bazel clean failed with exit code {}".format(e.returncode))
def kythe_startup_flags():
return [f"--bazelrc={KYTHE_DIR}/extractors.bazelrc"]
def kythe_build_flags():
return [f"--override_repository=kythe_release={KYTHE_DIR}"]
def execute_bazel_build(
bazel_version, bazel_binary, platform, flags, targets, bep_file, incompatible_flags
):
print_collapsed_group(":bazel: Computing flags for build step")
aggregated_flags = compute_flags(
platform,
flags,
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags,
bep_file,
bazel_binary,
enable_remote_cache=True,
)
print_expanded_group(":bazel: Build ({})".format(bazel_version))
try:
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ ["build"]
+ aggregated_flags
+ ["--"]
+ targets
)
except subprocess.CalledProcessError as e:
handle_bazel_failure(e, "build")
def execute_bazel_build_with_kythe(
bazel_version, bazel_binary, platform, flags, targets, bep_file, incompatible_flags
):
print_collapsed_group(":bazel: Computing flags for build step")
aggregated_flags = compute_flags(
platform,
flags,
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags,
bep_file,
bazel_binary,
enable_remote_cache=False,
)
print_expanded_group(":bazel: Build ({})".format(bazel_version))
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ kythe_startup_flags()
+ ["build"]
+ kythe_build_flags()
+ aggregated_flags
+ ["--"]
+ targets
)
def calculate_targets(task_config, platform, bazel_binary, build_only, test_only):
build_targets = [] if test_only else task_config.get("build_targets", [])
test_targets = [] if build_only else task_config.get("test_targets", [])
index_targets = [] if (build_only or test_only) else task_config.get("index_targets", [])
index_targets_query = None if (build_only or test_only) else task_config.get("index_targets_query", None)
if index_targets_query:
output = execute_command_and_get_output(
[bazel_binary]
+ common_startup_flags(platform)
+ [
"--nomaster_bazelrc",
"--bazelrc=/dev/null",
"query",
index_targets_query,
],
print_output=False,
)
index_targets += output.strip().split("\n")
# Remove the "--" argument splitter from the list that some configs explicitly
# include. We'll add it back again later where needed.
build_targets = [x.strip() for x in build_targets if x.strip() != "--"]
test_targets = [x.strip() for x in test_targets if x.strip() != "--"]
index_targets = [x.strip() for x in index_targets if x.strip() != "--"]
shard_id = int(os.getenv("BUILDKITE_PARALLEL_JOB", "-1"))
shard_count = int(os.getenv("BUILDKITE_PARALLEL_JOB_COUNT", "-1"))
if shard_id > -1 and shard_count > -1:
print_collapsed_group(
":female-detective: Calculating targets for shard {}/{}".format(
shard_id + 1, shard_count
)
)
expanded_test_targets = expand_test_target_patterns(bazel_binary, platform, test_targets)
test_targets = get_targets_for_shard(expanded_test_targets, shard_id, shard_count)
return build_targets, test_targets, index_targets
def expand_test_target_patterns(bazel_binary, platform, test_targets):
included_targets, excluded_targets = partition_targets(test_targets)
excluded_string = (
" except tests(set({}))".format(" ".join("'{}'".format(t) for t in excluded_targets))
if excluded_targets
else ""
)
exclude_manual = ' except tests(attr("tags", "manual", set({})))'.format(
" ".join("'{}'".format(t) for t in included_targets)
)
eprint("Resolving test targets via bazel query")
output = execute_command_and_get_output(
[bazel_binary]
+ common_startup_flags(platform)
+ [
"--nomaster_bazelrc",
"--bazelrc=/dev/null",
"query",
"tests(set({})){}{}".format(
" ".join("'{}'".format(t) for t in included_targets),
excluded_string,
exclude_manual,
),
],
print_output=False,
)
return output.strip().split("\n")
def partition_targets(targets):
included_targets, excluded_targets = [], []
for target in targets:
if target.startswith("-"):
excluded_targets.append(target[1:])
else:
included_targets.append(target)
return included_targets, excluded_targets
def get_targets_for_shard(test_targets, shard_id, shard_count):
# TODO(fweikert): implement a more sophisticated algorithm
return sorted(test_targets)[shard_id::shard_count]
def execute_bazel_test(
bazel_version,
bazel_binary,
platform,
flags,
targets,
bep_file,
monitor_flaky_tests,
incompatible_flags,
):
aggregated_flags = [
"--flaky_test_attempts=3",
"--build_tests_only",
"--local_test_jobs=" + concurrent_test_jobs(platform),
]
# Don't enable remote caching if the user enabled remote execution / caching themselves
# or flaky test monitoring is enabled, as remote caching makes tests look less flaky than
# they are.
print_collapsed_group(":bazel: Computing flags for test step")
aggregated_flags += compute_flags(
platform,
flags,
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags,
bep_file,
bazel_binary,
enable_remote_cache=not monitor_flaky_tests,
)
print_expanded_group(":bazel: Test ({})".format(bazel_version))
try:
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ ["test"]
+ aggregated_flags
+ ["--"]
+ targets
)
except subprocess.CalledProcessError as e:
handle_bazel_failure(e, "test")
def get_json_profile_flags(out_file):
return [
"--experimental_generate_json_trace_profile",
"--experimental_profile_cpu_usage",
"--experimental_json_trace_compression",
"--profile={}".format(out_file),
]
def upload_bep_logs_for_flaky_tests(test_bep_file):
if has_flaky_tests(test_bep_file):
build_number = os.getenv("BUILDKITE_BUILD_NUMBER")
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
execute_command(
[
gsutil_command(),
"cp",
test_bep_file,
FLAKY_TESTS_BUCKET + pipeline_slug + "/" + build_number + ".json",
]
)
def upload_test_logs_from_bep(bep_file, tmpdir, stop_request):
uploaded_targets = set()
while True:
done = stop_request.isSet()
if os.path.exists(bep_file):
all_test_logs = test_logs_for_status(bep_file, status=["FAILED", "TIMEOUT", "FLAKY"])
test_logs_to_upload = [
(target, files) for target, files in all_test_logs if target not in uploaded_targets
]
if test_logs_to_upload:
files_to_upload = rename_test_logs_for_upload(test_logs_to_upload, tmpdir)
cwd = os.getcwd()
try:
os.chdir(tmpdir)
test_logs = [os.path.relpath(file, tmpdir) for file in files_to_upload]
test_logs = sorted(test_logs)
execute_command(["buildkite-agent", "artifact", "upload", ";".join(test_logs)])
finally:
uploaded_targets.update([target for target, _ in test_logs_to_upload])
os.chdir(cwd)
if done:
break
time.sleep(5)
def upload_json_profile(json_profile_path, tmpdir):
if not os.path.exists(json_profile_path):
return
print_collapsed_group(":gcloud: Uploading JSON Profile")
execute_command(["buildkite-agent", "artifact", "upload", json_profile_path], cwd=tmpdir)
def rename_test_logs_for_upload(test_logs, tmpdir):
# Rename the test.log files to the target that created them
# so that it's easy to associate test.log and target.
new_paths = []
for label, files in test_logs:
attempt = 0
if len(files) > 1:
attempt = 1
for test_log in files:
try:
new_path = test_label_to_path(tmpdir, label, attempt)
os.makedirs(os.path.dirname(new_path), exist_ok=True)
copyfile(test_log, new_path)
new_paths.append(new_path)
attempt += 1
except IOError as err:
# Log error and ignore.
eprint(err)
return new_paths
def test_label_to_path(tmpdir, label, attempt):
# remove leading //
path = label[2:]
path = path.replace("/", os.sep)
path = path.replace(":", os.sep)
if attempt == 0:
path = os.path.join(path, "test.log")
else:
path = os.path.join(path, "attempt_" + str(attempt) + ".log")
return os.path.join(tmpdir, path)
def test_logs_for_status(bep_file, status):
targets = []
with open(bep_file, encoding="utf-8") as f:
raw_data = f.read()
decoder = json.JSONDecoder()
pos = 0
while pos < len(raw_data):
try:
bep_obj, size = decoder.raw_decode(raw_data[pos:])
except ValueError as e:
eprint("JSON decoding error: " + str(e))
return targets
if "testSummary" in bep_obj:
test_target = bep_obj["id"]["testSummary"]["label"]
test_status = bep_obj["testSummary"]["overallStatus"]
if test_status in status:
outputs = bep_obj["testSummary"]["failed"]
test_logs = []
for output in outputs:
test_logs.append(url2pathname(urlparse(output["uri"]).path))
targets.append((test_target, test_logs))
pos += size + 1
return targets
def execute_command_and_get_output(args, shell=False, fail_if_nonzero=True, print_output=True):
eprint(" ".join(args))
process = subprocess.run(
args,
shell=shell,
check=fail_if_nonzero,
env=os.environ,
stdout=subprocess.PIPE,
errors="replace",
universal_newlines=True,
)
if print_output:
eprint(process.stdout)
return process.stdout
def execute_command(args, shell=False, fail_if_nonzero=True, cwd=None, print_output=True):
if print_output:
eprint(" ".join(args))
return subprocess.run(
args, shell=shell, check=fail_if_nonzero, env=os.environ, cwd=cwd
).returncode
def execute_command_background(args):
eprint(" ".join(args))
return subprocess.Popen(args, env=os.environ)
def terminate_background_process(process):
if process:
process.terminate()
try:
process.wait(timeout=10)
except subprocess.TimeoutExpired:
process.kill()
def create_step(label, commands, platform, shards=1):
if "docker-image" in PLATFORMS[platform]:
step = create_docker_step(
label, image=PLATFORMS[platform]["docker-image"], commands=commands
)
else:
step = {
"label": label,
"command": commands,
"agents": {"queue": PLATFORMS[platform]["queue"]},
}
if shards > 1:
step["label"] += " (shard %n)"
step["parallelism"] = shards
# Enforce a global 8 hour job timeout.
step["timeout_in_minutes"] = 8 * 60
# Automatically retry when an agent got lost (usually due to an infra flake).
step["retry"] = {
"automatic": [
{"exit_status": -1, "limit": 3}, # Buildkite internal "agent lost" exit code
{"exit_status": 137, "limit": 3}, # SIGKILL
{"exit_status": 143, "limit": 3}, # SIGTERM
]
}
return step
def create_docker_step(label, image, commands=None, additional_env_vars=None):
env = ["ANDROID_HOME", "ANDROID_NDK_HOME", "BUILDKITE_ARTIFACT_UPLOAD_DESTINATION"]
if additional_env_vars:
env += ["{}={}".format(k, v) for k, v in additional_env_vars.items()]
step = {
"label": label,
"command": commands,
"agents": {"queue": "default"},
"plugins": {
"docker#v3.5.0": {
"always-pull": True,
"environment": env,
"image": image,
"network": "host",
"privileged": True,
"propagate-environment": True,
"propagate-uid-gid": True,
"volumes": [
"/etc/group:/etc/group:ro",
"/etc/passwd:/etc/passwd:ro",
"/opt:/opt:ro",
"/var/lib/buildkite-agent:/var/lib/buildkite-agent",
"/var/lib/gitmirrors:/var/lib/gitmirrors:ro",
"/var/run/docker.sock:/var/run/docker.sock",
],
}
},
}
if not step["command"]:
del step["command"]
return step
def print_project_pipeline(
configs,
project_name,
http_config,
file_config,
git_repository,
monitor_flaky_tests,
use_but,
incompatible_flags,
notify,
):
task_configs = configs.get("tasks", None)
if not task_configs:
raise BuildkiteException("{0} pipeline configuration is empty.".format(project_name))
pipeline_steps = []
# If the repository is hosted on Git-on-borg, we show the link to the commit Gerrit review
buildkite_repo = os.getenv("BUILDKITE_REPO")
if is_git_on_borg_repo(buildkite_repo):
show_gerrit_review_link(buildkite_repo, pipeline_steps)
task_configs = filter_tasks_that_should_be_skipped(task_configs, pipeline_steps)
# In Bazel Downstream Project pipelines, git_repository and project_name must be specified.
is_downstream_project = (use_but or incompatible_flags) and git_repository and project_name
buildifier_config = configs.get("buildifier")
# Skip Buildifier when we test downstream projects.
if buildifier_config and not is_downstream_project:
buildifier_env_vars = {}
if isinstance(buildifier_config, str):
# Simple format:
# ---
# buildifier: latest
buildifier_env_vars[BUILDIFIER_VERSION_ENV_VAR] = buildifier_config
else:
# Advanced format:
# ---
# buildifier:
# version: latest
# warnings: all
def set_env_var(config_key, env_var_name):
if config_key in buildifier_config:
buildifier_env_vars[env_var_name] = buildifier_config[config_key]
set_env_var("version", BUILDIFIER_VERSION_ENV_VAR)
set_env_var("warnings", BUILDIFIER_WARNINGS_ENV_VAR)
if not buildifier_env_vars:
raise BuildkiteException(
'Invalid buildifier configuration entry "{}"'.format(buildifier_config)
)
pipeline_steps.append(
create_docker_step(
BUILDIFIER_STEP_NAME,
image=BUILDIFIER_DOCKER_IMAGE,
additional_env_vars=buildifier_env_vars,
)
)
# In Bazel Downstream Project pipelines, we should test the project at the last green commit.
git_commit = None
if is_downstream_project:
last_green_commit_url = bazelci_last_green_commit_url(
git_repository, DOWNSTREAM_PROJECTS[project_name]["pipeline_slug"]
)
git_commit = get_last_green_commit(last_green_commit_url)
config_hashes = set()
for task, task_config in task_configs.items():
# We override the Bazel version in downstream pipelines. This means that two tasks that
# only differ in the value of their explicit "bazel" field will be identical in the
# downstream pipeline, thus leading to duplicate work.
# Consequently, we filter those duplicate tasks here.
if is_downstream_project:
h = hash_task_config(task, task_config)
if h in config_hashes:
continue
config_hashes.add(h)
shards = task_config.get("shards", "1")
try:
shards = int(shards)
except ValueError:
raise BuildkiteException("Task {} has invalid shard value '{}'".format(task, shards))
step = runner_step(
platform=get_platform_for_task(task, task_config),
task=task,
task_name=task_config.get("name"),
project_name=project_name,
http_config=http_config,
file_config=file_config,
git_repository=git_repository,
git_commit=git_commit,
monitor_flaky_tests=monitor_flaky_tests,
use_but=use_but,
incompatible_flags=incompatible_flags,
shards=shards,
)
pipeline_steps.append(step)
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
all_downstream_pipeline_slugs = []
for _, config in DOWNSTREAM_PROJECTS.items():
all_downstream_pipeline_slugs.append(config["pipeline_slug"])
# We don't need to update last green commit in the following cases:
# 1. This job is a GitHub pull request
# 2. This job uses a custom built Bazel binary (in Bazel Downstream Projects pipeline)
# 3. This job doesn't run on master branch (could be a custom build launched manually)
# 4. We don't intend to run the same job in downstream with Bazel@HEAD (eg. google-bazel-presubmit)
# 5. We are testing incompatible flags
# 6. We are running `bazelisk --migrate` in a non-downstream pipeline
if not (
is_pull_request()
or use_but
or os.getenv("BUILDKITE_BRANCH") != "master"
or pipeline_slug not in all_downstream_pipeline_slugs
or incompatible_flags
or use_bazelisk_migrate()
):
# We need to call "Try Update Last Green Commit" even if there are failures,
# since we don't want a failing Buildifier step to block the update of
# the last green commit for this project.
# try_update_last_green_commit() ensures that we don't update the commit
# if any build or test steps fail.
pipeline_steps.append({"wait": None, "continue_on_failure": True})
pipeline_steps.append(
create_step(
label="Try Update Last Green Commit",
commands=[
fetch_bazelcipy_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"]
+ " bazelci.py try_update_last_green_commit",
],
platform=DEFAULT_PLATFORM,
)
)
if "validate_config" in configs:
pipeline_steps += create_config_validation_steps()
if use_bazelisk_migrate() and not is_downstream_project:
# Print results of bazelisk --migrate in project pipelines that explicitly set
# the USE_BAZELISK_MIGRATE env var, but that are not being run as part of a
# downstream pipeline.
number = os.getenv("BUILDKITE_BUILD_NUMBER")
pipeline_steps += get_steps_for_aggregating_migration_results(number, notify)
print_pipeline_steps(pipeline_steps, handle_emergencies=not is_downstream_project)
def show_gerrit_review_link(git_repository, pipeline_steps):
host = re.search(r"https://(.+?)\.googlesource", git_repository).group(1)
if not host:
raise BuildkiteException("Couldn't get host name from %s" % git_repository)
text = "The transformed code used in this pipeline can be found under https://{}-review.googlesource.com/q/{}". \
format(host, os.getenv("BUILDKITE_COMMIT"))
commands = ["buildkite-agent annotate --style=info '{}'".format(text)]
pipeline_steps.append(
create_step(
label=":pipeline: Print information about Gerrit Review Link",
commands=commands,
platform=DEFAULT_PLATFORM,
)
)
def is_git_on_borg_repo(git_repository):
return git_repository and "googlesource.com" in git_repository
def hash_task_config(task_name, task_config):
# Two task configs c1 and c2 have the same hash iff they lead to two functionally identical jobs
# in the downstream pipeline. This function discards the "bazel" field (since it's being
# overridden) and the "name" field (since it has no effect on the actual work).
# Moreover, it adds an explicit "platform" field if that's missing.
cpy = task_config.copy()
cpy.pop("bazel", None)
cpy.pop("name", None)
if "platform" not in cpy:
cpy["platform"] = task_name
m = hashlib.md5()
for key in sorted(cpy):
value = "%s:%s;" % (key, cpy[key])
m.update(value.encode("utf-8"))
return m.digest()
def get_platform_for_task(task, task_config):
# Most pipeline configurations have exactly one task per platform, which makes it
# convenient to use the platform name as task ID. Consequently, we use the
# task ID as platform if there is no explicit "platform" field.
return task_config.get("platform", task)
def create_config_validation_steps():
output = execute_command_and_get_output(
["git", "diff-tree", "--no-commit-id", "--name-only", "-r", os.getenv("BUILDKITE_COMMIT")]
)
config_files = [
l
for l in output.split("\n")
if l.startswith(".bazelci/") and os.path.splitext(l)[1] in CONFIG_FILE_EXTENSIONS
]
return [
create_step(
label=":cop: Validate {}".format(f),
commands=[
fetch_bazelcipy_command(),
"{} bazelci.py project_pipeline --file_config={}".format(
PLATFORMS[DEFAULT_PLATFORM]["python"], f
),
],
platform=DEFAULT_PLATFORM,
)
for f in config_files
]
def print_pipeline_steps(pipeline_steps, handle_emergencies=True):
if handle_emergencies:
emergency_step = create_emergency_announcement_step_if_necessary()
if emergency_step:
pipeline_steps.insert(0, emergency_step)
print(yaml.dump({"steps": pipeline_steps}))
def create_emergency_announcement_step_if_necessary():
style = "error"
message, issue_url, last_good_bazel = None, None, None
try:
emergency_settings = load_remote_yaml_file(EMERGENCY_FILE_URL)
message = emergency_settings.get("message")
issue_url = emergency_settings.get("issue_url")
last_good_bazel = emergency_settings.get("last_good_bazel")
except urllib.error.HTTPError as ex:
message = str(ex)
style = "warning"
if not any([message, issue_url, last_good_bazel]):
return
text = '<span class="h1">:rotating_light: Emergency :rotating_light:</span>\n'
if message:
text += "- {}\n".format(message)
if issue_url:
text += '- Please check this <a href="{}">issue</a> for more details.\n'.format(issue_url)
if last_good_bazel:
text += (
"- Default Bazel version is *{}*, "
"unless the pipeline configuration specifies an explicit version."
).format(last_good_bazel)
return create_step(
label=":rotating_light: Emergency :rotating_light:",
commands=[
'buildkite-agent annotate --append --style={} --context "omg" "{}"'.format(style, text)
],
platform=DEFAULT_PLATFORM,
)
def runner_step(
platform,
task,
task_name=None,
project_name=None,
http_config=None,
file_config=None,
git_repository=None,
git_commit=None,
monitor_flaky_tests=False,
use_but=False,
incompatible_flags=None,
shards=1,
):
command = PLATFORMS[platform]["python"] + " bazelci.py runner --task=" + task
if http_config:
command += " --http_config=" + http_config
if file_config:
command += " --file_config=" + file_config
if git_repository:
command += " --git_repository=" + git_repository
if git_commit:
command += " --git_commit=" + git_commit
if monitor_flaky_tests:
command += " --monitor_flaky_tests"
if use_but:
command += " --use_but"
for flag in incompatible_flags or []:
command += " --incompatible_flag=" + flag
label = create_label(platform, project_name, task_name=task_name)
return create_step(
label=label, commands=[fetch_bazelcipy_command(), command], platform=platform, shards=shards
)
def fetch_bazelcipy_command():
return "curl -sS {0} -o bazelci.py".format(SCRIPT_URL)
def fetch_incompatible_flag_verbose_failures_command():
return "curl -sS {0} -o incompatible_flag_verbose_failures.py".format(
INCOMPATIBLE_FLAG_VERBOSE_FAILURES_URL
)
def fetch_aggregate_incompatible_flags_test_result_command():
return "curl -sS {0} -o aggregate_incompatible_flags_test_result.py".format(
AGGREGATE_INCOMPATIBLE_TEST_RESULT_URL
)
def upload_project_pipeline_step(
project_name, git_repository, http_config, file_config, incompatible_flags
):
pipeline_command = (
'{0} bazelci.py project_pipeline --project_name="{1}" ' + "--git_repository={2}"
).format(PLATFORMS[DEFAULT_PLATFORM]["python"], project_name, git_repository)
if incompatible_flags is None:
pipeline_command += " --use_but"
else:
for flag in incompatible_flags:
pipeline_command += " --incompatible_flag=" + flag
if http_config:
pipeline_command += " --http_config=" + http_config
if file_config:
pipeline_command += " --file_config=" + file_config
pipeline_command += " | buildkite-agent pipeline upload"
return create_step(
label="Setup {0}".format(project_name),
commands=[fetch_bazelcipy_command(), pipeline_command],
platform=DEFAULT_PLATFORM,
)
def create_label(platform, project_name, build_only=False, test_only=False, task_name=None):
if build_only and test_only:
raise BuildkiteException("build_only and test_only cannot be true at the same time")
platform_display_name = PLATFORMS[platform]["emoji-name"]
if build_only:
label = "Build "
elif test_only:
label = "Test "
else:
label = ""
platform_label = (
"{0} on {1}".format(task_name, platform_display_name)
if task_name
else platform_display_name
)
if project_name:
label += "{0} ({1})".format(project_name, platform_label)
else:
label += platform_label
return label
def bazel_build_step(
task,
platform,
project_name,
http_config=None,
file_config=None,
build_only=False,
test_only=False,
):
pipeline_command = PLATFORMS[platform]["python"] + " bazelci.py runner"
if build_only:
pipeline_command += " --build_only --save_but"
if test_only:
pipeline_command += " --test_only"
if http_config:
pipeline_command += " --http_config=" + http_config
if file_config:
pipeline_command += " --file_config=" + file_config
pipeline_command += " --task=" + task
return create_step(
label=create_label(platform, project_name, build_only, test_only),
commands=[fetch_bazelcipy_command(), pipeline_command],
platform=platform,
)
def filter_tasks_that_should_be_skipped(task_configs, pipeline_steps):
skip_tasks = get_skip_tasks()
if not skip_tasks:
return task_configs
actually_skipped = []
skip_tasks = set(skip_tasks)
for task in list(task_configs.keys()):
if task in skip_tasks:
actually_skipped.append(task)
del task_configs[task]
skip_tasks.remove(task)
if not task_configs:
raise BuildkiteException(
"Nothing to do since all tasks in the configuration should be skipped."
)
annotations = []
if actually_skipped:
annotations.append(
("info", "Skipping the following task(s): {}".format(", ".join(actually_skipped)))
)
if skip_tasks:
annotations.append(
(
"warning",
(
"The following tasks should have been skipped, "
"but were not part of the configuration: {}"
).format(", ".join(skip_tasks)),
)
)
if annotations:
print_skip_task_annotations(annotations, pipeline_steps)
return task_configs
def get_skip_tasks():
value = os.getenv(SKIP_TASKS_ENV_VAR, "")
return [v for v in value.split(",") if v]
def print_skip_task_annotations(annotations, pipeline_steps):
commands = [
"buildkite-agent annotate --style={} '{}' --context 'ctx-{}'".format(s, t, hash(t))
for s, t in annotations
]
pipeline_steps.append(
create_step(
label=":pipeline: Print information about skipped tasks",
commands=commands,
platform=DEFAULT_PLATFORM,
)
)
def print_bazel_publish_binaries_pipeline(task_configs, http_config, file_config):
if not task_configs:
raise BuildkiteException("Bazel publish binaries pipeline configuration is empty.")
pipeline_steps = []
task_configs = filter_tasks_that_should_be_skipped(task_configs, pipeline_steps)
platforms = [get_platform_for_task(t, tc) for t, tc in task_configs.items()]
# These are the platforms that the bazel_publish_binaries.yml config is actually building.
configured_platforms = set(filter(should_publish_binaries_for_platform, platforms))
# These are the platforms that we want to build and publish according to this script.
expected_platforms = set(filter(should_publish_binaries_for_platform, PLATFORMS))
if not expected_platforms.issubset(configured_platforms):
raise BuildkiteException(
"Bazel publish binaries pipeline needs to build Bazel for every commit on all publish_binary-enabled platforms."
)
# Build Bazel
for task, task_config in task_configs.items():
pipeline_steps.append(
bazel_build_step(
task,
get_platform_for_task(task, task_config),
"Bazel",
http_config,
file_config,
build_only=True,
)
)
pipeline_steps.append("wait")
# If all builds succeed, publish the Bazel binaries to GCS.
pipeline_steps.append(
create_step(
label="Publish Bazel Binaries",
commands=[
fetch_bazelcipy_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"] + " bazelci.py publish_binaries",
],
platform=DEFAULT_PLATFORM,
)
)
print_pipeline_steps(pipeline_steps)
def should_publish_binaries_for_platform(platform):
if platform not in PLATFORMS:
raise BuildkiteException("Unknown platform '{}'".format(platform))
return PLATFORMS[platform]["publish_binary"]
def print_disabled_projects_info_box_step():
info_text = ["Downstream testing is disabled for the following projects :sadpanda:"]
for project, config in DOWNSTREAM_PROJECTS.items():
disabled_reason = config.get("disabled_reason", None)
if disabled_reason:
info_text.append("* **%s**: %s" % (project, disabled_reason))
if len(info_text) == 1:
return None
return create_step(
label=":sadpanda:",
commands=[
'buildkite-agent annotate --append --style=info "\n' + "\n".join(info_text) + '\n"'
],
platform=DEFAULT_PLATFORM,
)
def print_incompatible_flags_info_box_step(incompatible_flags_map):
info_text = ["Build and test with the following incompatible flags:"]
for flag in incompatible_flags_map:
info_text.append("* **%s**: %s" % (flag, incompatible_flags_map[flag]))
if len(info_text) == 1:
return None
return create_step(
label="Incompatible flags info",
commands=[
'buildkite-agent annotate --append --style=info "\n' + "\n".join(info_text) + '\n"'
],
platform=DEFAULT_PLATFORM,
)
def fetch_incompatible_flags():
"""
Return a list of incompatible flags to be tested in downstream with the current release Bazel
"""
incompatible_flags = {}
# If INCOMPATIBLE_FLAGS environment variable is set, we get incompatible flags from it.
if "INCOMPATIBLE_FLAGS" in os.environ:
for flag in os.environ["INCOMPATIBLE_FLAGS"].split():
# We are not able to get the github link for this flag from INCOMPATIBLE_FLAGS,
# so just assign the url to empty string.
incompatible_flags[flag] = ""
return incompatible_flags
bazel_major_version = get_bazel_major_version()
output = subprocess.check_output(
[
"curl",
"https://api.github.com/search/issues?per_page=100&q=repo:bazelbuild/bazel+label:migration-%s"
% bazel_major_version,
]
).decode("utf-8")
issue_info = json.loads(output)
for issue in issue_info["items"]:
# Every incompatible flags issue should start with "<incompatible flag name (without --)>:"
name = "--" + issue["title"].split(":")[0]
url = issue["html_url"]
if name.startswith("--incompatible_"):
incompatible_flags[name] = url
else:
eprint(
f"{name} is not recognized as an incompatible flag, please modify the issue title "
f'of {url} to "<incompatible flag name (without --)>:..."'
)
return incompatible_flags
def get_bazel_major_version():
# Get bazel major version on CI, eg. 0.21 from "Build label: 0.21.0\n..."
output = subprocess.check_output(
["bazel", "--nomaster_bazelrc", "--bazelrc=/dev/null", "version"]
).decode("utf-8")
return output.split()[2].rsplit(".", 1)[0]
def print_bazel_downstream_pipeline(
task_configs, http_config, file_config, test_incompatible_flags, test_disabled_projects, notify
):
if not task_configs:
raise BuildkiteException("Bazel downstream pipeline configuration is empty.")
pipeline_steps = []
task_configs = filter_tasks_that_should_be_skipped(task_configs, pipeline_steps)
pipeline_steps = []
info_box_step = print_disabled_projects_info_box_step()
if info_box_step is not None:
pipeline_steps.append(info_box_step)
if not test_incompatible_flags:
for task, task_config in task_configs.items():
pipeline_steps.append(
bazel_build_step(
task,
get_platform_for_task(task, task_config),
"Bazel",
http_config,
file_config,
build_only=True,
)
)
pipeline_steps.append("wait")
incompatible_flags = None
if test_incompatible_flags:
incompatible_flags_map = fetch_incompatible_flags()
if not incompatible_flags_map:
raise BuildkiteException("No incompatible flag issue is found on github for current version of Bazel.")
info_box_step = print_incompatible_flags_info_box_step(incompatible_flags_map)
if info_box_step is not None:
pipeline_steps.append(info_box_step)
incompatible_flags = list(incompatible_flags_map.keys())
for project, config in DOWNSTREAM_PROJECTS.items():
disabled_reason = config.get("disabled_reason", None)
# If test_disabled_projects is true, we add configs for disabled projects.
# If test_disabled_projects is false, we add configs for not disabled projects.
if (test_disabled_projects and disabled_reason) or (
not test_disabled_projects and not disabled_reason
):
pipeline_steps.append(
upload_project_pipeline_step(
project_name=project,
git_repository=config["git_repository"],
http_config=config.get("http_config", None),
file_config=config.get("file_config", None),
incompatible_flags=incompatible_flags,
)
)
if test_incompatible_flags:
current_build_number = os.environ.get("BUILDKITE_BUILD_NUMBER", None)
if not current_build_number:
raise BuildkiteException("Not running inside Buildkite")
if use_bazelisk_migrate():
pipeline_steps += get_steps_for_aggregating_migration_results(
current_build_number, notify
)
else:
pipeline_steps.append({"wait": "~", "continue_on_failure": "true"})
pipeline_steps.append(
create_step(
label="Test failing jobs with incompatible flag separately",
commands=[
fetch_bazelcipy_command(),
fetch_incompatible_flag_verbose_failures_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"]
+ " incompatible_flag_verbose_failures.py --build_number=%s | buildkite-agent pipeline upload"
% current_build_number,
],
platform=DEFAULT_PLATFORM,
)
)
if (
not test_disabled_projects
and not test_incompatible_flags
and os.getenv("BUILDKITE_BRANCH") == "master"
):
# Only update the last green downstream commit in the regular Bazel@HEAD + Downstream pipeline.
pipeline_steps.append("wait")
pipeline_steps.append(
create_step(
label="Try Update Last Green Downstream Commit",
commands=[
fetch_bazelcipy_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"]
+ " bazelci.py try_update_last_green_downstream_commit",
],
platform=DEFAULT_PLATFORM,
)
)
print_pipeline_steps(pipeline_steps)
def get_steps_for_aggregating_migration_results(current_build_number, notify):
parts = [
PLATFORMS[DEFAULT_PLATFORM]["python"],
"aggregate_incompatible_flags_test_result.py",
"--build_number=%s" % current_build_number,
]
if notify:
parts.append("--notify")
return [
{"wait": "~", "continue_on_failure": "true"},
create_step(
label="Aggregate incompatible flags test result",
commands=[
fetch_bazelcipy_command(),
fetch_aggregate_incompatible_flags_test_result_command(),
" ".join(parts),
],
platform=DEFAULT_PLATFORM,
),
]
def bazelci_builds_download_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "https://storage.googleapis.com/{}/artifacts/{}/{}/bazel".format(
bucket_name, platform, git_commit
)
def bazelci_builds_nojdk_download_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "https://storage.googleapis.com/{}/artifacts/{}/{}/bazel_nojdk".format(
bucket_name, platform, git_commit
)
def bazelci_builds_gs_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/artifacts/{}/{}/bazel".format(bucket_name, platform, git_commit)
def bazelci_builds_nojdk_gs_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/artifacts/{}/{}/bazel_nojdk".format(bucket_name, platform, git_commit)
def bazelci_latest_build_metadata_url():
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/metadata/latest.json".format(bucket_name)
def bazelci_builds_metadata_url(git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/metadata/{}.json".format(bucket_name, git_commit)
def bazelci_last_green_commit_url(git_repository, pipeline_slug):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-untrusted-builds"
return "gs://{}/last_green_commit/{}/{}".format(
bucket_name, git_repository[len("https://") :], pipeline_slug
)
def bazelci_last_green_downstream_commit_url():
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-untrusted-builds"
return "gs://{}/last_green_commit/downstream_pipeline".format(bucket_name)
def get_last_green_commit(last_green_commit_url):
try:
return (
subprocess.check_output(
[gsutil_command(), "cat", last_green_commit_url], env=os.environ
)
.decode("utf-8")
.strip()
)
except subprocess.CalledProcessError:
return None
def try_update_last_green_commit():
org_slug = os.getenv("BUILDKITE_ORGANIZATION_SLUG")
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
build_number = os.getenv("BUILDKITE_BUILD_NUMBER")
current_job_id = os.getenv("BUILDKITE_JOB_ID")
client = BuildkiteClient(org=org_slug, pipeline=pipeline_slug)
build_info = client.get_build_info(build_number)
# Find any failing steps other than Buildifier and "try update last green".
def has_failed(job):
state = job.get("state")
# Ignore steps that don't have a state (like "wait").
return (
state is not None
and state != "passed"
and job["id"] != current_job_id
and job["name"] != BUILDIFIER_STEP_NAME
)
failing_jobs = [j["name"] for j in build_info["jobs"] if has_failed(j)]
if failing_jobs:
raise BuildkiteException(
"Cannot update last green commit due to {} failing step(s): {}".format(
len(failing_jobs), ", ".join(failing_jobs)
)
)
git_repository = os.getenv("BUILDKITE_REPO")
last_green_commit_url = bazelci_last_green_commit_url(git_repository, pipeline_slug)
update_last_green_commit_if_newer(last_green_commit_url)
def update_last_green_commit_if_newer(last_green_commit_url):
last_green_commit = get_last_green_commit(last_green_commit_url)
current_commit = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("utf-8").strip()
if last_green_commit:
success = False
try:
execute_command(["git", "fetch", "-v", "origin", last_green_commit])
success = True
except subprocess.CalledProcessError:
# If there was an error fetching the commit it typically means
# that the commit does not exist anymore - due to a force push. In
# order to recover from that assume that the current commit is the
# newest commit.
result = [current_commit]
finally:
if success:
result = (
subprocess.check_output(
["git", "rev-list", "%s..%s" % (last_green_commit, current_commit)]
)
.decode("utf-8")
.strip()
)
else:
result = None
# If current_commit is newer that last_green_commit, `git rev-list A..B` will output a bunch of
# commits, otherwise the output should be empty.
if not last_green_commit or result:
execute_command(
[
"echo %s | %s -h 'Cache-Control: no-store' cp - %s"
% (current_commit, gsutil_command(), last_green_commit_url)
],
shell=True,
)
else:
eprint(
"Updating abandoned: last green commit (%s) is not older than current commit (%s)."
% (last_green_commit, current_commit)
)
def try_update_last_green_downstream_commit():
last_green_commit_url = bazelci_last_green_downstream_commit_url()
update_last_green_commit_if_newer(last_green_commit_url)
def latest_generation_and_build_number():
generation = None
output = None
for attempt in range(5):
output = subprocess.check_output(
[gsutil_command(), "stat", bazelci_latest_build_metadata_url()], env=os.environ
)
match = re.search("Generation:[ ]*([0-9]+)", output.decode("utf-8"))
if not match:
raise BuildkiteException("Couldn't parse generation. gsutil output format changed?")
generation = match.group(1)
match = re.search(r"Hash \(md5\):[ ]*([^\s]+)", output.decode("utf-8"))
if not match:
raise BuildkiteException("Couldn't parse md5 hash. gsutil output format changed?")
expected_md5hash = base64.b64decode(match.group(1))
output = subprocess.check_output(
[gsutil_command(), "cat", bazelci_latest_build_metadata_url()], env=os.environ
)
hasher = hashlib.md5()
hasher.update(output)
actual_md5hash = hasher.digest()
if expected_md5hash == actual_md5hash:
break
info = json.loads(output.decode("utf-8"))
return generation, info["build_number"]
def sha256_hexdigest(filename):
sha256 = hashlib.sha256()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(65536), b""):
sha256.update(block)
return sha256.hexdigest()
def upload_bazel_binaries():
"""
Uploads all Bazel binaries to a deterministic URL based on the current Git commit.
Returns maps of platform names to sha256 hashes of the corresponding bazel and bazel_nojdk binaries.
"""
bazel_hashes = {}
bazel_nojdk_hashes = {}
for platform_name, platform in PLATFORMS.items():
if not should_publish_binaries_for_platform(platform_name):
continue
tmpdir = tempfile.mkdtemp()
try:
bazel_binary_path = download_bazel_binary(tmpdir, platform_name)
# One platform that we build on can generate binaries for multiple platforms, e.g.
# the centos7 platform generates binaries for the "centos7" platform, but also
# for the generic "linux" platform.
for target_platform_name in platform["publish_binary"]:
execute_command(
[
gsutil_command(),
"cp",
bazel_binary_path,
bazelci_builds_gs_url(target_platform_name, os.environ["BUILDKITE_COMMIT"]),
]
)
bazel_hashes[target_platform_name] = sha256_hexdigest(bazel_binary_path)
# Also publish bazel_nojdk binaries.
bazel_nojdk_binary_path = download_bazel_nojdk_binary(tmpdir, platform_name)
for target_platform_name in platform["publish_binary"]:
execute_command(
[
gsutil_command(),
"cp",
bazel_nojdk_binary_path,
bazelci_builds_nojdk_gs_url(target_platform_name, os.environ["BUILDKITE_COMMIT"]),
]
)
bazel_nojdk_hashes[target_platform_name] = sha256_hexdigest(bazel_nojdk_binary_path)
finally:
shutil.rmtree(tmpdir)
return bazel_hashes, bazel_nojdk_hashes
def try_publish_binaries(bazel_hashes, bazel_nojdk_hashes, build_number, expected_generation):
"""
Uploads the info.json file that contains information about the latest Bazel commit that was
successfully built on CI.
"""
now = datetime.datetime.now()
git_commit = os.environ["BUILDKITE_COMMIT"]
info = {
"build_number": build_number,
"build_time": now.strftime("%d-%m-%Y %H:%M"),
"git_commit": git_commit,
"platforms": {},
}
for platform, sha256 in bazel_hashes.items():
info["platforms"][platform] = {
"url": bazelci_builds_download_url(platform, git_commit),
"sha256": sha256,
"nojdk_url": bazelci_builds_nojdk_download_url(platform, git_commit),
"nojdk_sha256": bazel_nojdk_hashes[platform],
}
tmpdir = tempfile.mkdtemp()
try:
info_file = os.path.join(tmpdir, "info.json")
with open(info_file, mode="w", encoding="utf-8") as fp:
json.dump(info, fp, indent=2, sort_keys=True)
try:
execute_command(
[
gsutil_command(),
"-h",
"x-goog-if-generation-match:" + expected_generation,
"-h",
"Content-Type:application/json",
"cp",
info_file,
bazelci_latest_build_metadata_url(),
]
)
except subprocess.CalledProcessError:
raise BinaryUploadRaceException()
execute_command(
[
gsutil_command(),
"cp",
bazelci_latest_build_metadata_url(),
bazelci_builds_metadata_url(git_commit),
]
)
finally:
shutil.rmtree(tmpdir)
def publish_binaries():
"""
Publish Bazel binaries to GCS.
"""
current_build_number = os.environ.get("BUILDKITE_BUILD_NUMBER", None)
if not current_build_number:
raise BuildkiteException("Not running inside Buildkite")
current_build_number = int(current_build_number)
# Upload the Bazel binaries for this commit.
bazel_hashes, bazel_nojdk_hashes = upload_bazel_binaries()
# Try to update the info.json with data about our build. This will fail (expectedly) if we're
# not the latest build.
for _ in range(5):
latest_generation, latest_build_number = latest_generation_and_build_number()
if current_build_number <= latest_build_number:
eprint(
(
"Current build '{0}' is not newer than latest published '{1}'. "
+ "Skipping publishing of binaries."
).format(current_build_number, latest_build_number)
)
break
try:
try_publish_binaries(bazel_hashes, bazel_nojdk_hashes, current_build_number, latest_generation)
except BinaryUploadRaceException:
# Retry.
continue
eprint(
"Successfully updated '{0}' to binaries from build {1}.".format(
bazelci_latest_build_metadata_url(), current_build_number
)
)
break
else:
raise BuildkiteException("Could not publish binaries, ran out of attempts.")
# This is so that multiline python strings are represented as YAML
# block strings.
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
yaml.add_representer(str, str_presenter)
parser = argparse.ArgumentParser(description="Bazel Continuous Integration Script")
parser.add_argument("--script", type=str)
subparsers = parser.add_subparsers(dest="subparsers_name")
bazel_publish_binaries_pipeline = subparsers.add_parser("bazel_publish_binaries_pipeline")
bazel_publish_binaries_pipeline.add_argument("--file_config", type=str)
bazel_publish_binaries_pipeline.add_argument("--http_config", type=str)
bazel_publish_binaries_pipeline.add_argument("--git_repository", type=str)
bazel_downstream_pipeline = subparsers.add_parser("bazel_downstream_pipeline")
bazel_downstream_pipeline.add_argument("--file_config", type=str)
bazel_downstream_pipeline.add_argument("--http_config", type=str)
bazel_downstream_pipeline.add_argument("--git_repository", type=str)
bazel_downstream_pipeline.add_argument(
"--test_incompatible_flags", type=bool, nargs="?", const=True
)
bazel_downstream_pipeline.add_argument(
"--test_disabled_projects", type=bool, nargs="?", const=True
)
bazel_downstream_pipeline.add_argument("--notify", type=bool, nargs="?", const=True)
project_pipeline = subparsers.add_parser("project_pipeline")
project_pipeline.add_argument("--project_name", type=str)
project_pipeline.add_argument("--file_config", type=str)
project_pipeline.add_argument("--http_config", type=str)
project_pipeline.add_argument("--git_repository", type=str)
project_pipeline.add_argument("--monitor_flaky_tests", type=bool, nargs="?", const=True)
project_pipeline.add_argument("--use_but", type=bool, nargs="?", const=True)
project_pipeline.add_argument("--incompatible_flag", type=str, action="append")
project_pipeline.add_argument("--notify", type=bool, nargs="?", const=True)
runner = subparsers.add_parser("runner")
runner.add_argument("--task", action="store", type=str, default="")
runner.add_argument("--file_config", type=str)
runner.add_argument("--http_config", type=str)
runner.add_argument("--git_repository", type=str)
runner.add_argument(
"--git_commit", type=str, help="Reset the git repository to this commit after cloning it"
)
runner.add_argument(
"--git_repo_location",
type=str,
help="Use an existing repository instead of cloning from github",
)
runner.add_argument(
"--use_bazel_at_commit", type=str, help="Use Bazel binary built at a specific commit"
)
runner.add_argument("--use_but", type=bool, nargs="?", const=True)
runner.add_argument("--save_but", type=bool, nargs="?", const=True)
runner.add_argument("--needs_clean", type=bool, nargs="?", const=True)
runner.add_argument("--build_only", type=bool, nargs="?", const=True)
runner.add_argument("--test_only", type=bool, nargs="?", const=True)
runner.add_argument("--monitor_flaky_tests", type=bool, nargs="?", const=True)
runner.add_argument("--incompatible_flag", type=str, action="append")
subparsers.add_parser("publish_binaries")
subparsers.add_parser("try_update_last_green_commit")
subparsers.add_parser("try_update_last_green_downstream_commit")
args = parser.parse_args(argv)
if args.script:
global SCRIPT_URL
SCRIPT_URL = args.script
try:
if args.subparsers_name == "bazel_publish_binaries_pipeline":
configs = fetch_configs(args.http_config, args.file_config)
print_bazel_publish_binaries_pipeline(
task_configs=configs.get("tasks", None),
http_config=args.http_config,
file_config=args.file_config,
)
elif args.subparsers_name == "bazel_downstream_pipeline":
configs = fetch_configs(args.http_config, args.file_config)
print_bazel_downstream_pipeline(
task_configs=configs.get("tasks", None),
http_config=args.http_config,
file_config=args.file_config,
test_incompatible_flags=args.test_incompatible_flags,
test_disabled_projects=args.test_disabled_projects,
notify=args.notify,
)
elif args.subparsers_name == "project_pipeline":
configs = fetch_configs(args.http_config, args.file_config)
print_project_pipeline(
configs=configs,
project_name=args.project_name,
http_config=args.http_config,
file_config=args.file_config,
git_repository=args.git_repository,
monitor_flaky_tests=args.monitor_flaky_tests,
use_but=args.use_but,
incompatible_flags=args.incompatible_flag,
notify=args.notify,
)
elif args.subparsers_name == "runner":
configs = fetch_configs(args.http_config, args.file_config)
tasks = configs.get("tasks", {})
task_config = tasks.get(args.task)
if not task_config:
raise BuildkiteException(
"No such task '{}' in configuration. Available: {}".format(
args.task, ", ".join(tasks)
)
)
platform = get_platform_for_task(args.task, task_config)
execute_commands(
task_config=task_config,
platform=platform,
git_repository=args.git_repository,
git_commit=args.git_commit,
git_repo_location=args.git_repo_location,
use_bazel_at_commit=args.use_bazel_at_commit,
use_but=args.use_but,
save_but=args.save_but,
needs_clean=args.needs_clean,
build_only=args.build_only,
test_only=args.test_only,
monitor_flaky_tests=args.monitor_flaky_tests,
incompatible_flags=args.incompatible_flag,
bazel_version=task_config.get("bazel") or configs.get("bazel"),
)
elif args.subparsers_name == "publish_binaries":
publish_binaries()
elif args.subparsers_name == "try_update_last_green_commit":
# Update the last green commit of a project pipeline
try_update_last_green_commit()
elif args.subparsers_name == "try_update_last_green_downstream_commit":
# Update the last green commit of the downstream pipeline
try_update_last_green_downstream_commit()
else:
parser.print_help()
return 2
except BuildkiteException as e:
eprint(str(e))
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
web.py
|
from flask import (
Flask,
g,
redirect,
render_template,
request,
session,
url_for
)
import asyncio
from threading import Thread
import json
import discord
from discord.ext import commands
class User:
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def __repr__(self):
return f'<User: {self.username}>'
users = []
app = Flask('')
app.secret_key = 'somesecretkeythatonlyishouldknow'
@app.errorhandler(404)
def page_not_found(e):
return render_template('erro404.html'), 404
@app.route('/', methods=['GET', 'POST'])
def login():
return render_template('index.html')
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
dataloader_webcam.py
|
import os
import torch
from torch.autograd import Variable
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from SPPE.src.utils.img import load_image, cropBox, im_to_torch
from opt import opt
from yolo.preprocess import prep_image, prep_frame, inp_to_image
from pPose_nms import pose_nms, write_json
from SPPE.src.utils.eval import getPrediction
from yolo.util import write_results, dynamic_write_results
from yolo.darknet import Darknet
from tqdm import tqdm
import cv2
import json
import numpy as np
import sys
import time
import torch.multiprocessing as mp
from multiprocessing import Process
from multiprocessing import Queue as pQueue
from threading import Thread
from queue import Queue, LifoQueue
if opt.vis_fast:
from fn import vis_frame_fast as vis_frame
else:
from fn import vis_frame
class WebcamLoader:
def __init__(self, webcam, batchSize=1, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
# initialize the queue used to store frames read from
# the video file
self.batchSize = batchSize
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
i = 0
while True:
# otherwise, ensure the queue has room in it
if not self.Q.full():
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(self.batchSize):
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(str(i) + '.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
self.Q.put((img, orig_img, im_name, im_dim_list))
i = i + 1
else:
with self.Q.mutex:
self.Q.queue.clear()
def videoinfo(self):
# indicate the video info
fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps = self.stream.get(cv2.CAP_PROP_FPS)
frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return fourcc, fps, frameSize
def getitem(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue size
return self.Q.qsize()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DetectionLoader:
def __init__(self, dataloder, batchSize=1, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stopped = False
self.dataloder = dataloder
self.batchSize = batchSize
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole dataset
while True:
img, orig_img, im_name, im_dim_list = self.dataloder.getitem()
with self.dataloder.Q.mutex:
self.dataloder.Q.queue.clear()
with torch.no_grad():
# Human Detection
img = img.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_img)):
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
dets = dets.cpu()
im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
for k in range(len(orig_img)):
boxes_k = boxes[dets[:, 0] == k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
pt1 = torch.zeros(boxes_k.size(0), 2)
pt2 = torch.zeros(boxes_k.size(0), 2)
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:, 0] == k], inps, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class DetectionProcessor:
def __init__(self, detectionLoader, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.detectionLoader = detectionLoader
self.stopped = False
# initialize the queue used to store data
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole dataset
while True:
with torch.no_grad():
(orig_img, im_name, boxes, scores, inps, pt1, pt2) = self.detectionLoader.read()
with self.detectionLoader.Q.mutex:
self.detectionLoader.Q.queue.clear()
if boxes is None or boxes.nelement() == 0:
while self.Q.full():
time.sleep(0.2)
self.Q.put((None, orig_img, im_name, boxes, scores, None, None))
continue
inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
while self.Q.full():
time.sleep(0.2)
self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class WebcamDetectionLoader:
def __init__(self, webcam=0, batchSize=1, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot open webcam'
self.stopped = False
self.batchSize = batchSize
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def len(self):
return self.Q.qsize()
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping
while True:
img = []
inp = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(self.batchSize):
(grabbed, frame) = self.stream.read()
if not grabbed:
continue
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
inp_k = im_to_torch(orig_img_k)
img.append(img_k)
inp.append(inp_k)
orig_img.append(orig_img_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
ht = inp[0].size(1)
wd = inp[0].size(2)
# Human Detection
img = Variable(torch.cat(img)).cuda()
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
im_dim_list = im_dim_list.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(inp)):
if self.Q.full():
with self.Q.mutex:
self.Q.queue.clear()
self.Q.put((inp[k], orig_img[k], None, None))
continue
im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5].cpu()
scores = dets[:, 5:6].cpu()
for k in range(len(inp)):
if self.Q.full():
with self.Q.mutex:
self.Q.queue.clear()
self.Q.put((inp[k], orig_img[k], boxes[dets[:, 0] == k], scores[dets[:, 0] == k]))
def videoinfo(self):
# indicate the video info
fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps = self.stream.get(cv2.CAP_PROP_FPS)
frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return fourcc, fps, frameSize
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DataWriter:
def __init__(self, save_video=False,
savepath='examples/res/1.avi', fourcc=cv2.VideoWriter_fourcc(*'XVID'), fps=25, frameSize=(640, 480),
queueSize=1024):
if save_video:
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoWriter(savepath, fourcc, fps, frameSize)
assert self.stream.isOpened(), 'Cannot open video for writing'
self.save_video = save_video
self.stopped = False
self.final_result = []
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
if self.save_video:
self.stream.release()
return
# otherwise, ensure the queue is not empty
if not self.Q.empty():
(boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
orig_img = np.array(orig_img, dtype=np.uint8)
if boxes is None:
if opt.save_img or opt.save_video or opt.vis:
img = orig_img
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
preds_hm, preds_img, preds_scores = getPrediction(
hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = pose_nms(boxes, scores, preds_img, preds_scores)
result = {
'imgname': im_name,
'result': result
}
self.final_result.append(result)
if opt.save_img or opt.save_video or opt.vis:
img = vis_frame(orig_img, result)
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
time.sleep(0.1)
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
return not self.Q.empty()
def save(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name):
# save next frame in the queue
self.Q.put((boxes, scores, hm_data, pt1, pt2, orig_img, im_name))
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
def results(self):
# return final result
return self.final_result
def len(self):
# return queue len
return self.Q.qsize()
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints_mpii = 16
self.nJoints = 33
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
def __getitem__(self, index):
pass
def __len__(self):
pass
def crop_from_dets(img, boxes, inps, pt1, pt2):
'''
Crop human from origin image according to Dectecion Results
'''
imght = img.size(1)
imgwidth = img.size(2)
tmp_img = img
tmp_img[0].add_(-0.406)
tmp_img[1].add_(-0.457)
tmp_img[2].add_(-0.480)
for i, box in enumerate(boxes):
upLeft = torch.Tensor((float(box[0]), float(box[1])))
bottomRight = torch.Tensor((float(box[2]), float(box[3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
if width > 100:
scaleRate = 0.2
else:
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
inps[i] = cropBox(tmp_img.clone(), upLeft, bottomRight, opt.inputResH, opt.inputResW)
pt1[i] = upLeft
pt2[i] = bottomRight
return inps, pt1, pt2
|
data_getter.py
|
from collections import Counter
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from os import mkdir, path
from sys import platform as sys_platform
from threading import Thread
from time import sleep
from typing import Dict, List
import jieba
import jieba.posseg as pseg
from httpx import HTTPError
from JianshuResearchTools.article import GetArticleText, GetArticleWordage
from JianshuResearchTools.convert import (ArticleSlugToArticleUrl,
UserUrlToUserSlug)
from JianshuResearchTools.user import (GetUserAllArticlesInfo,
GetUserAllBasicData,
GetUserArticlesCount)
from pandas import DataFrame
from wordcloud import WordCloud
from yaml import dump as yaml_dump
from config_manager import Config
from db_config import User
from concurrent.futures import TimeoutError
from exceptions import (GetUserArticleDataException, GetUserBasicDataException,
GetUserWordCloudException, QueueEmptyException)
from log_manager import AddRunLog
from queue_manager import GetOneToProcess, ProcessFinished, SetUserStatusFailed
jieba.setLogLevel(jieba.logging.ERROR) # 关闭 jieba 的日志输出
if not Config()["perf/enable_jieba_parallel"]:
AddRunLog(2, "由于配置文件设置,多进程分词已禁用")
elif sys_platform == "win32":
AddRunLog(2, "由于当前系统不支持,多进程分词已禁用")
else:
AddRunLog(3, "已开启多进程分词")
jieba.enable_parallel(2)
if Config()["word_split/enable_stopwords"]:
with open("wordcloud_assets/stopwords.txt", "r", encoding="utf-8") as f:
STOPWORDS = [x.replace("\n", "") for x in f.readlines()] # 预加载停用词词库
AddRunLog(4, "加载停用词成功")
else:
AddRunLog(2, "由于配置文件设置,停用词功能已禁用")
if Config()["word_split/enable_hotwords"]:
jieba.load_userdict("wordcloud_assets/hotwords.txt") # 将热点词加入词库
AddRunLog(4, "加载热点词成功")
else:
AddRunLog(2, "由于配置文件设置,热点词功能已禁用")
if not path.exists("user_data"):
mkdir("user_data")
def GetUserArticleData(user_url: str) -> DataFrame:
start_time = datetime(2021, 1, 1, 0, 0, 0)
end_time = datetime(2021, 12, 31, 23, 59, 59)
fail_times = 0
while fail_times < 3:
result = DataFrame()
try:
for item in GetUserAllArticlesInfo(user_url, count=50): # 增加单次请求量,提高性能
item_release_time = item["release_time"].replace(tzinfo=None)
if item_release_time > end_time: # 文章发布时间晚于 2021 年
pass # 文章是按照时间倒序排列的,此时不做任何处理
elif item_release_time < start_time: # 文章发布时间早于 2021 年
if not item["is_top"]:
break # 非置顶文章的发布时间早于 2021 年,则不再继续查询
else: # 文章发布时间在 2021 年内
try:
item["wordage"] = GetArticleWordage(ArticleSlugToArticleUrl(item["aslug"]), disable_check=True)
except IndexError as e: # 极少数概率下会由于请求的文章状态异常导致报错,此时跳过该文章的信息获取
AddRunLog(2, f"获取 {user_url} 的文章:{ArticleSlugToArticleUrl(item['aslug'])} 信息时发生错误:{e},已跳过该文章")
continue
else:
result = result.append(item, ignore_index=True, sort=False) # 将新的文章追加到 DataFrame 中
except HTTPError as e:
fail_times += 1
AddRunLog(2, f"获取 {user_url} 的文章信息时发生错误:{e},这是第 {fail_times} 次出错,10 秒后重试")
sleep(10)
continue
else:
if len(result) == 0:
raise GetUserArticleDataException("用户没有在 2021 年发布文章")
return result
raise GetUserBasicDataException("获取文章信息时连续三次失败")
def GetUserBasicData(user_url: str) -> Dict:
# 处理 JRT 的数据错误
fail_times = 0
while fail_times < 3:
result = {}
try:
data = GetUserAllBasicData(user_url)
except HTTPError as e:
fail_times += 1
AddRunLog(2, f"获取 {user_url} 的基础信息时发生错误:{e},这是第 {fail_times} 次出错,10 秒后重试")
sleep(10)
continue
else:
break
else: # 三次失败
raise GetUserBasicDataException("获取基础信息时连续三次失败")
result["id"] = data["articles_count"]["id"]
result["slug"] = data["articles_count"]["slug"]
result["url"] = data["url"]
result["name"] = data["name"]
result["gender"] = {0: "未知", 1: "男", 2: "女", 3: "未知"}[data["gender"]]
result["avatar_url"] = data["articles_count"]["avatar"]
result["background_image_url"] = data["articles_count"]["background_image"]
result["FP_count"] = round(data["FP_count"], 2)
result["FTN_count"] = round(data["FTN_count"], 2)
result["FP / FTN"] = round(result["FP_count"] / result["FTN_count"], 2)
result["assets_count"] = round(data["assets_count"], 2)
result["followers_count"] = data["followers_count"]
result["fans_count"] = data["fans_count"]
result["likes_count"] = data["likes_count"]
result["wordage"] = data["wordage"]
result["articles_count"] = GetUserArticlesCount(user_url, disable_check=True)
result["introduction_text"] = data["introduction_text"]
result["badges_list"] = data["badges_list"]
result["next_anniversary_day"] = data["next_anniversary_day"]
result["vip_type"] = data["vip_info"]["vip_type"]
result["vip_expire_time"] = data["vip_info"]["expire_date"]
return result
def GetUserWordcloud(articles_list: List[str], user_slug: str) -> None:
allow_word_types = ("Ag", "a", "ad", "an", "dg", "g",
"i", "j", "l", "Ng", "n", "nr",
"ns", "nt", "nz", "tg", "vg", "v",
"vd", "vn", "un")
words_count: Counter = Counter()
for article_url in articles_list:
fail_times = 0
while fail_times < 3:
try:
cutted_text = pseg.cut(GetArticleText(article_url, disable_check=True))
except HTTPError as e:
fail_times += 1
AddRunLog(2, f"获取 {user_slug} 的文章内容时发生错误:{e},这是第 {fail_times} 次出错,10 秒后重试")
sleep(10)
continue
else:
break
else: # 三次失败
raise GetUserWordCloudException("获取文章内容时连续三次失败")
# 只保留非单字词,且这些词必须不在停用词列表里,并属于特定词性
cutted_text = (x.word for x in cutted_text if len(x.word) > 1
and x.flag in allow_word_types and x.word not in STOPWORDS)
words_count += Counter(cutted_text)
wordcloud = WordCloud(font_path="wordcloud_assets/font.otf", width=1280, height=720,
background_color="white", max_words=100)
if words_count.most_common(1)[0][1] <= 10: # 文章中的最高频词没有达到可生成词云的数量
raise GetUserWordCloudException("用户文章中的最高频词没有达到可生成词云的数量")
else:
return wordcloud.generate_from_frequencies(
{key: value for key, value in words_count.items() if value > 10}
)
def GetDataJob(user: User):
user_slug = UserUrlToUserSlug(user.user_url)
if not path.exists(f"user_data/{user_slug}"): # 避免获取到中途时服务重启导致文件夹已存在报错
mkdir(f"user_data/{user_slug}")
AddRunLog(3, f"开始执行 {user.user_url}({user.user_name})的数据获取任务")
AddRunLog(4, f"开始获取 {user.user_url}({user.user_name})的基础数据")
try:
basic_data = GetUserBasicData(user.user_url)
except GetUserBasicDataException as e:
AddRunLog(1, f"获取 {user.user_url}({user.user_name})的基础数据时发生错误:{e}")
SetUserStatusFailed(user.user_url, str(e))
return # 终止运行
else:
with open(f"user_data/{user_slug}/basic_data_{user_slug}.yaml", "w", encoding="utf-8") as f:
yaml_dump(basic_data, f, indent=4, allow_unicode=True)
AddRunLog(4, f"获取 {user.user_url}({user.user_name})的基础数据完成")
AddRunLog(4, f"开始获取 {user.user_url}({user.user_name})的文章数据")
try:
article_data = GetUserArticleData(user.user_url)
except GetUserArticleDataException as e:
AddRunLog(1, f"获取 {user.user_url}({user.user_name})的文章数据时发生错误:{e}")
SetUserStatusFailed(user.user_url, str(e))
return # 终止运行
else:
article_data.to_csv(f"user_data/{user_slug}/article_data_{user_slug}.csv", index=False)
AddRunLog(4, f"获取 {user.user_url}({user.user_name})的文章数据完成,共 {len(article_data)} 条")
AddRunLog(4, f"开始为 {user.user_url}({user.user_name})生成词云图")
try:
wordcloud_img = GetUserWordcloud((ArticleSlugToArticleUrl(x) for x in list(article_data["aslug"])), user_slug)
except GetUserWordCloudException as e:
AddRunLog(1, f"为 {user.user_url}({user.user_name})生成词云图时发生错误:{e}")
SetUserStatusFailed(user.user_url, str(e))
return # 终止运行
else:
wordcloud_img.to_file(f"user_data/{user_slug}/wordcloud_{user_slug}.png")
AddRunLog(4, f"为 {user.user_url}({user.user_name})生成词云图成功")
ProcessFinished(user.user_url) # 如果数据获取完整,就将用户状态改为 3,表示已完成数据获取
AddRunLog(3, f"{user.user_url}({user.user_name})的数据获取任务执行完毕")
AddRunLog(4, f"{user.user_url}({user.user_name})的数据获取线程结束运行")
def main():
pool = ThreadPoolExecutor(max_workers=Config()["perf/data_getters_max_count"], thread_name_prefix="data_getter-")
futures = []
AddRunLog(4, f"数据获取线程池创建成功,最大线程数:{Config()['perf/data_getters_max_count']}")
while True:
try:
for user, future in futures[:]: # 创建拷贝,防止删除元素导致迭代出错
try:
exception_obj = future.exception(timeout=0) # 获取数据获取线程引发的异常
except TimeoutError: # 该线程还未执行完毕
continue
else:
if exception_obj:
AddRunLog(1, f"{user.user_url}({user.user_name})的数据获取线程中出现未捕获的异常:{exception_obj}")
SetUserStatusFailed(user.user_url, "数据获取过程中的未知异常") # 将用户状态设置为失败
futures.remove((user, future)) # 将 Future 对象从列表中移除
user = GetOneToProcess()
except QueueEmptyException:
sleep(0.3) # 队列为空,等待一段时间
continue
else:
future = pool.submit(GetDataJob, user)
futures.append((user, future))
AddRunLog(4, f"启动了新的数据获取线程:{user.user_url}({user.user_name})")
def init():
data_getter_thread = Thread(target=main, daemon=True) # 设置为守护线程,避免造成主线程无法退出
data_getter_thread.start()
|
lxm32_modbus.py
|
from __future__ import print_function
from threading import Thread
from time import sleep, time
import atexit
import sys
import signal
if sys.version_info.major == 2:
from pymodbus.client.sync import ModbusTcpClient as ModbusClient
from pymodbus.pdu import ExceptionResponse
else:
from pymodbus3.client.sync import ModbusTcpClient as ModbusClient
from pymodbus3.pdu import ExceptionResponse
def get_bit(number, idx):
return (number & (1 << idx)) != 0
int32max = 2 ** 31
int32min = -int32max
uint16max = 2 ** 16 - 1
def sig_handler(signo, frame):
print(f"sig_handler({signo}, {frame})")
sys.exit(0)
def split_int32(i):
if i > int32max or i < int32min:
raise ValueError
if i < 0:
i = abs(i)
i |= 1 << 31
i = i ^ 0xFFFFFFFF
i |= 1 << 31
i += 1
i16_l = i >> 16
i16_r = i & 0xFFFF
return i16_l, i16_r
def parse_mf_stat(mf_stat):
mode = mf_stat & 0x1F
de = get_bit(mf_stat, 4)
me = get_bit(mf_stat, 5)
mt = get_bit(mf_stat, 6)
return mode, de, me, mt
states = {
0: "?",
1: "Start (1)",
2: "Not Ready To Switch On (2)",
3: "Switch On Disabled (3)",
4: "Ready To Switch (4)",
5: "Switched On (5)",
6: "Operation Enabled (6)",
7: "Quick Stop Active (7)",
8: "Fault Reaction Active (8)",
9: "Fault (9)",
}
modes = {
1: "Profile Position",
3: "Profile Velocity",
4: "Profile Torque",
6: "Homing",
0x1F: "Jog",
0x1E: "Electronic Gear",
0x1D: "Motion Sequence",
}
class Motor:
def __init__(
self,
ip_modbus="192.168.28.21",
disable_scan_timeout=False,
disable_limit_switches=False,
):
self._code_state = None
self._is_scanning = False
self.client = ModbusClient(ip_modbus)
if self.client.connect():
print("connection ok")
sys.stdout.flush()
else:
raise ValueError("bad modbus connection.")
if disable_scan_timeout:
ret = self.read_holding_registers(17498, 2)
if ret.registers == [0, 20]:
self.write_registers(17498, [0, 0])
if disable_limit_switches:
self.write_registers(1566, [0] * 4)
else:
self.write_registers(1566, [0, 1, 0, 1])
self.ramp_v = self.read_ramp_v()
self.dm_control = 0
self.ref_a = [0, 0]
self.ref_b = [0, 0]
sleep(0.1)
self.outscan = [0] * 13
self._has_to_scan = True
self._is_pingponging = False
self.ioscanning_thread = Thread(target=self._ioscanning)
self.ioscanning_thread.daemon = True
self.ioscanning_thread.start()
atexit.register(self.close)
signal.signal(signal.SIGTERM, sig_handler)
def close(self):
if self._is_scanning:
print("close motor driver")
self._has_to_scan = False
while self._is_scanning:
sleep(0.05)
self.client.close()
def __del__(self):
self.close()
def disable_limit_switches(self):
"""disable limit switches (power stage must be disabled)"""
self.disable()
self.write_registers(1566, [0] * 4)
def enable_limit_switches(self):
"""disable limit switches (power stage must be disabled)"""
self.disable()
self.write_registers(1566, [0, 1, 0, 1])
def _build_output_scan(self):
outscan = [0] * 4
outscan[0] = 0x2 << 8
old_out = self.outscan[4:]
out = [self.dm_control]
out.extend(self.ref_a)
out.extend(self.ref_b)
out.extend(self.ramp_v)
if out != old_out:
# flip toggle bit
self.dm_control ^= 1 << 7
out[0] = self.dm_control
# print('hex(dm_control):', hex(self.dm_control))
outscan.extend(out)
self.outscan = outscan
assert len(outscan) == 13
return outscan
def read_holding_registers(self, address, count=1, **kwargs):
ret = self.client.read_holding_registers(address, count, **kwargs)
if isinstance(ret, ExceptionResponse):
print(
"ExceptionResponse", ret.exception_code, ret.function_code - 128
)
return ret
def write_registers(self, address, values, **kwargs):
ret = self.client.write_registers(address, values, **kwargs)
if isinstance(ret, ExceptionResponse):
print(
"ExceptionResponse", ret.exception_code, ret.function_code - 128
)
return ret
def compute_dm_control(
self,
mode=None,
enable=None,
quick_stop=None,
fault_reset=None,
halt=None,
clear_halt=None,
resume_after_halt=None,
):
dm_control = self.dm_control
if mode is not None:
if not isinstance(mode, str):
mode = str(mode)
if mode.startswith("pos"):
dm_control = 0x1
elif mode.startswith("homing"):
dm_control = 0x6
else:
dm_control = 0x23
if enable: # enable the power stage
dm_control |= 1 << 9
else: # disable the power stage
dm_control |= 1 << 8
if quick_stop:
dm_control |= 1 << 10
if fault_reset:
dm_control |= 1 << 11
if halt:
dm_control |= 1 << 13
elif clear_halt:
dm_control |= 1 << 14
elif resume_after_halt:
dm_control |= 1 << 15
self.dm_control = dm_control
def set_dm_control(
self,
mode=None,
enable=None,
quick_stop=None,
fault_reset=None,
halt=None,
clear_halt=None,
resume_after_halt=None,
):
self.compute_dm_control(
mode=mode,
enable=enable,
quick_stop=quick_stop,
fault_reset=fault_reset,
halt=halt,
clear_halt=clear_halt,
resume_after_halt=resume_after_halt,
)
self._pingpong()
def _ioscanning(self):
self._is_scanning = True
while self._has_to_scan:
self._pingpong()
sleep(0.5)
self._is_scanning = False
def _pingpong(self):
t0 = time()
while self._is_pingponging:
sleep(0.05)
t = time()
if t - t0 > 1:
print("warning: very slow Motor._pingpong (more than 1 s)")
elif t - t0 > 10:
raise Exception("Very slow Motor._pingpong (more than 10 s)")
self._is_pingponging = True
self._build_output_scan()
# t1 = time()
ret_write = self.write_registers(0, self.outscan, unit=255)
if isinstance(ret_write, ExceptionResponse):
print(
"ExceptionResponse",
ret_write.exception_code,
ret_write.function_code - 128,
)
ret_read = self.read_holding_registers(0, 13, unit=255)
registers = ret_read.registers
# t = time()
# # print ('write_read_time = {}'.format(t - t1))
# if t - t1 > 0.01:
# print('write and read time > dt')
self.par_ch = registers[:4]
self.drive_stat = drive_stat = registers[4]
self.mf_stat = mf_stat = registers[5]
self.motion_stat = motion_stat = registers[6]
self.drive_input = registers[7]
self._p_act = registers[8:10]
self._v_act = registers[10:12]
self._I_act = registers[12]
# decode drive_stat
new_code_state = drive_stat & 0xF
if self._code_state != new_code_state and self._code_state is not None:
print(
'state changed from "'
+ states[self._code_state]
+ '" to "'
+ states[new_code_state]
+ '"'
)
self._code_state = new_code_state
self.state = states[new_code_state]
self.error = get_bit(drive_stat, 6)
self.warn = get_bit(drive_stat, 7)
self.halt = get_bit(drive_stat, 8)
self.homing = get_bit(drive_stat, 9)
self.quick_stop = get_bit(drive_stat, 10)
self.x_add1 = get_bit(drive_stat, 13)
self.x_end = get_bit(drive_stat, 14)
self.x_err = get_bit(drive_stat, 15)
# mf_stat
self.mode, self.de, self.me, self.mt = parse_mf_stat(mf_stat)
# motion_stat
self.motor_standstill = get_bit(motion_stat, 6)
self.motor_pos = get_bit(motion_stat, 7)
self.motor_neg = get_bit(motion_stat, 8)
self._is_pingponging = False
def get_state(self):
return (
f"error: {self.error}\nquick_stop: {self.quick_stop}"
+ "mode:"
+ hex(self.mode)
+ f"\nde: {self.de}; me: {self.me}; mt: {self.mt}\n"
+ "x_add1: {}; x_end: {}; x_err: {}\n".format(
self.x_add1, self.x_end, self.x_err
)
+ "drive_input: "
+ bin(self.drive_input)
+ "\ndrive_stat: "
+ bin(self.drive_stat)
+ "\nstate: "
+ self.state
+ "\nmotor_neg: {}, motor_pos: {}, motor_standstill: {}".format(
self.motor_neg, self.motor_pos, self.motor_standstill
)
)
def print_state(self):
print(self.get_state())
def read_param(self, address, count=2):
ret = self.read_holding_registers(address, count)
return ret.registers
def read_ramp_v(self):
return self.read_param(1556, 4)
def read_v_target(self):
return self.read_param(6938)
def read_position_target(self):
return self.read_param(6940)
def set_target_rotation_rate(self, i32):
if self.state == "Fault (9)":
print('self.state == "Fault (9)"')
if not isinstance(i32, int):
i32 = int(round(i32))
self.ref_a = list(split_int32(i32))
self._pingpong()
def set_target_position(self, i32):
if self.state == "Fault (9)":
print('self.state == "Fault (9)"')
if not isinstance(i32, int):
i32 = int(round(i32))
self.ref_b = list(split_int32(i32))
self._pingpong()
def stop_rotation(self):
self.set_target_rotation_rate(0)
def disable(self):
self.set_target_rotation_rate(0)
self.set_dm_control()
def enable(self, mode=1):
self.set_dm_control(mode=mode, enable=1)
self.set_target_rotation_rate(0)
def run_quick_stop(self):
self.set_dm_control(quick_stop=True)
def fault_reset(self):
self.set_dm_control(fault_reset=1)
def set_acceleration(self, a):
a = abs(a)
if not isinstance(a, int):
a = int(round(a))
if a > uint16max:
a = uint16max
print("Warning: too large acceleration for the motor.")
self.ramp_v = [0, a] * 2
self._pingpong()
def get_position_actual(self):
self._pingpong()
return (self._p_act[0] << 16) + self._p_act[1]
|
EventLoop.py
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import time
import weakref
import threading
import traceback
import functools
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtWidgets
## This class provides the event loops used to run GafferUI based applications.
class EventLoop( object ) :
__RunStyle = IECore.Enum.create( "Normal", "PumpThread", "AlreadyRunning", "Houdini" )
## Creates a new EventLoop. Note that if you are creating the primary
# EventLoop for an application then you should use mainEventLoop() instead.
def __init__( self, __qtEventLoop=None ) :
if __qtEventLoop is None :
if self.__mainEventLoop is None or self.__mainEventLoop.__startCount==0 :
raise Exception( "Main event loop is not running - perhaps you should use EventLoop.mainEventLoop()?" )
self.__qtEventLoop = QtCore.QEventLoop()
else :
self.__qtEventLoop = __qtEventLoop
self.__runStyle = self.__RunStyle.Normal
if isinstance( self.__qtEventLoop, QtWidgets.QApplication ) :
try :
import maya.OpenMaya
if maya.OpenMaya.MGlobal.apiVersion() < 201100 :
self.__runStyle = self.__RunStyle.PumpThread
else :
self.__runStyle = self.__RunStyle.AlreadyRunning
except ImportError :
pass
try :
import hou
if hou.applicationVersion()[0] < 14 :
self.__runStyle = self.__RunStyle.Houdini
else :
self.__runStyle = self.__RunStyle.AlreadyRunning
except ImportError :
pass
try :
import nuke
self.__runStyle = self.__RunStyle.AlreadyRunning
except ImportError :
pass
self.__startCount = 0
self.__pumpThread = None
self.__houdiniCallback = None
## Starts the event loop, passing control to the UI code. This function returns
# when the corresponding stop() method is called. See documentation for
# mainEventLoop() for exceptions to this rule.
def start( self ) :
self.__startCount += 1
if self.__runStyle == self.__RunStyle.Normal :
assert( self.__startCount == 1 )
self.__qtEventLoop.exec_()
elif self.__runStyle == self.__RunStyle.PumpThread :
if self.__pumpThread is None :
self.__pumpThread = threading.Thread( target = self.__pumpThreadFn )
self.__pumpThread.start()
elif self.__runStyle == self.__RunStyle.Houdini :
if self.__houdiniCallback is None :
import hou
hou.ui.addEventLoopCallback( functools.partial( self.__pump, 5 ) )
self.__houdiniCallback = hou.ui.eventLoopCallbacks()[-1]
else :
# RunStyle.AlreadyRunning
# host application is using qt natively, no need to do anything.
pass
## Stops the event loop last started using start().
def stop( self ) :
assert( self.__startCount > 0 )
if self.__runStyle == self.__RunStyle.Normal :
assert( self.__startCount == 1 )
self.__qtEventLoop.exit()
elif self.__runStyle == self.__RunStyle.PumpThread :
## \todo Should we try to stop the pump thread
# when self.__startCount hits 0? Right not we're
# just keeping it running on the assumption we'll
# need it again soon.
pass
elif self.__runStyle == self.__RunStyle.Houdini :
if self.__startCount == 1 and self.__houdiniCallback :
import hou
hou.ui.removeEventLoopCallback( self.__houdiniCallback )
self.__houdiniCallback = None
else :
# RunStyle.AlreadyRunning
pass
self.__startCount -= 1
## Returns true if this event loop is currently running.
def running( self ) :
return self.__startCount > 0
# if we're running embedded in an application which already uses qt (like maya 2011 or later)
# then there'll already be an application, which we'll share. if not we'll make our own.
if QtWidgets.QApplication.instance() :
__qtApplication = QtWidgets.QApplication.instance()
else :
# Set the style explicitly so we don't inherit one from the desktop
# environment, which could mess with our own style (on GNOME for instance,
# our icons can come out the wrong size).
style = QtWidgets.QApplication.setStyle( "Fusion" )
assert( style is not None )
__qtApplication = QtWidgets.QApplication( [ "gaffer" ] )
__mainEventLoop = None
## Returns the main event loop for the application. This should always
# be started before running any other nested event loops. In the standalone
# Gaffer applications, the main event loop acts like any other, but when
# GafferUI is embedded in another application (like Maya) it behaves slightly
# differently. In this case, the start() method returns immediately so that
# the GafferUI event loop may be interleaved with the event loop of the host
# application. Additionally, the start() method may also be called multiple
# times to allow several GafferUI-based applications to run in the same host.
# The main event loop will therefore only cease running when the number of
# calls to stop() matches the number of calls to start().
@classmethod
def mainEventLoop( cls ) :
if cls.__mainEventLoop is None :
cls.__mainEventLoop = cls( cls.__qtApplication )
return cls.__mainEventLoop
__idleCallbacks = []
__idleTimer = None
## Adds a function to be called when the event loop is idle (has no events
# remaining to be processed). If callback returns False then it will be removed
# automatically after running, if it returns True it will be called again until
# it returns False, or until removeIdleCallback() is called.
## \todo This should probably be replaced with an idleSignal() like the one we
# have in GafferUI.Gadget.
@classmethod
def addIdleCallback( cls, callback ) :
assert( callback not in cls.__idleCallbacks )
cls.__idleCallbacks.append( callback )
cls.__ensureIdleTimer()
## Removes an idle callback previously created with addIdleCallback().
@classmethod
def removeIdleCallback( cls, callback ) :
cls.__idleCallbacks.remove( callback )
## Convenience method to introduce a delay on the mainEventLoop().
@classmethod
def waitForIdle( cls, count = 1000 ) :
cls.__idleCount = 0
def f() :
cls.__idleCount += 1
if cls.__idleCount >= count :
EventLoop.mainEventLoop().stop()
return False
return True
EventLoop.addIdleCallback( f )
EventLoop.mainEventLoop().start()
## Widgets may only be manipulated on the thread where mainEventLoop() is running. It
# is common to want to perform some background processing on a secondary thread, and
# to update the UI during processing or upon completion. This function can be used from
# such a secondary thread to queue a callable to be called on the main thread. If called
# from the main thread, the callable is called immediately.
@classmethod
def executeOnUIThread( cls, callable, waitForResult=False ) :
if QtCore.QThread.currentThread() == cls.__qtApplication.thread() :
# Already on the UI thread - just do it.
return callable()
resultCondition = threading.Condition() if waitForResult else None
# we only use a weakref here, because we don't want to be keeping the object
# alive from this thread, and hence deleting it from this thread. instead it
# is deleted in _UIThreadExecutor.event().
uiThreadExecutor = weakref.ref( _UIThreadExecutor( callable, resultCondition ) )
uiThreadExecutor().moveToThread( cls.__qtApplication.thread() )
if resultCondition is not None :
resultCondition.acquire()
cls.__qtApplication.postEvent( uiThreadExecutor(), QtCore.QEvent( QtCore.QEvent.Type( _UIThreadExecutor.executeEventType ) ) )
resultCondition.wait()
resultCondition.release()
return resultCondition.resultValue
else :
cls.__qtApplication.postEvent( uiThreadExecutor(), QtCore.QEvent( QtCore.QEvent.Type( _UIThreadExecutor.executeEventType ) ) )
return None
@classmethod
def __ensureIdleTimer( cls ) :
assert( QtCore.QThread.currentThread() == EventLoop.__qtApplication.thread() )
if cls.__idleTimer is None :
cls.__idleTimer = QtCore.QTimer( cls.__qtApplication )
cls.__idleTimer.timeout.connect( cls.__qtIdleCallback )
if not cls.__idleTimer.isActive() :
cls.__idleTimer.start()
# This is a staticmethod rather than a classmethod because PySide 1.0.5
# doesn't support classmethods as slots.
@staticmethod
def __qtIdleCallback() :
assert( QtCore.QThread.currentThread() == EventLoop.__qtApplication.thread() )
GafferUI.Gadget.idleSignal()()
for c in EventLoop.__idleCallbacks[:] : # slice takes copy, so we can remove during iteration
try :
if not c() :
EventLoop.__idleCallbacks.remove( c )
except Exception as e :
# if the callback throws then we remove it anyway, because
# we don't want to keep invoking the same error over and over.
EventLoop.__idleCallbacks.remove( c )
# report the error
IECore.msg( IECore.Msg.Level.Error, "EventLoop.__qtIdleCallback", "".join( traceback.format_exc() ) )
if len( EventLoop.__idleCallbacks )==0 and GafferUI.Gadget.idleSignal().empty() :
EventLoop.__idleTimer.stop()
@classmethod
def _gadgetIdleSignalAccessed( cls ) :
# It would be an error to access the idle signal from anything but the main
# thread, because it would imply multiple threads fighting over the same signal.
assert( QtCore.QThread.currentThread() == EventLoop.__qtApplication.thread() )
cls.__ensureIdleTimer()
def __pumpThreadFn( self ) :
import maya.utils
while 1 :
time.sleep( 0.01 )
maya.utils.executeDeferred( self.__pump )
def __pump( self, thrusts=1 ) :
for thrust in range( 0, thrusts ) :
self.__qtEventLoop.processEvents()
_gadgetIdleSignalAccessedConnection = GafferUI.Gadget._idleSignalAccessedSignal().connect( EventLoop._gadgetIdleSignalAccessed )
class _UIThreadExecutor( QtCore.QObject ) :
executeEventType = QtCore.QEvent.registerEventType()
__instances = set()
def __init__( self, callable, resultCondition = None ) :
QtCore.QObject.__init__( self )
self.__callable = callable
self.__resultCondition = resultCondition
# we store a reference to ourselves in __instances, as otherwise
# we go out of scope and get deleted at the end of executeOnUIThread
# above. that's bad because we never live long enough to get our event,
# and we'll also be being deleted from the calling thread, not the ui
# thread where we live.
self.__instances.add( self )
def event( self, event ) :
if event.type() == self.executeEventType :
result = self.__callable()
if self.__resultCondition is not None :
self.__resultCondition.acquire()
self.__resultCondition.resultValue = result
self.__resultCondition.notify()
self.__resultCondition.release()
self.__instances.remove( self )
return True
return False
# Service the requests made to `ParallelAlgo::callOnUIThread()`.
Gaffer.ParallelAlgo.pushUIThreadCallHandler( EventLoop.executeOnUIThread )
|
conftest.py
|
import os
import sys
import threading
import time
from typing import Iterator
import pytest
import uvicorn
from .utils import Server
os.environ["DEBUG"] = "true"
def serve_in_thread(server: Server) -> Iterator[Server]:
thread = threading.Thread(target=server.run)
thread.start()
try:
while not server.started:
time.sleep(1e-3)
yield server
finally:
server.should_exit = True
thread.join()
@pytest.fixture(scope="session")
def example_server() -> Iterator[Server]:
sys.path.append("example")
from server import app
config = uvicorn.Config(app=app, loop="asyncio")
server = Server(config=config)
yield from serve_in_thread(server)
|
machine_unit.py
|
#!/usr/bin/env python
#
# provide host/guest conatainer manager
#
import os
import abc
import time
import shutil
import pathlib
import difflib
import threading
from dataclasses import dataclass
from arkon_config import host_user
from arkon_config import has_ci_azure
from arkon_config import project_repo
from arkon_config import project_boot
from arkon_config import project_data
from sysroot_media import SimpleSysroot
# well known system path
linux_kernel = f"/boot/vmlinuz-linux"
# well known system path
linux_initrd = f"/boot/initramfs-linux.img"
class HostAny(abc.ABC):
"conatainer manager prototype"
def __init__(self, guest_name:str, sysroot_path:str) -> None:
self.guest_name = guest_name
self.sysroot_path = sysroot_path
@abc.abstractmethod
def command_initiate(self) -> str:
"start guest instance"
@abc.abstractmethod
def command_terminate(self) -> str:
"finish guest instance"
# FIXME barely usable
# https://wiki.archlinux.org/index.php/systemd-nspawn#Run_docker_in_systemd-nspawn
# https://www.freedesktop.org/software/systemd/man/systemd.exec.html#System%20Call%20Filtering
# https://github.com/systemd/systemd/blob/master/units/systemd-nspawn%40.service.in
class HostSYSD(HostAny):
"systemd-nspawn container host"
def command_initiate(self) -> str:
proc_cmdline = (
f"TERM=xterm "
# "systemd.log_level=debug "
# "systemd.log_target=console "
# "systemd.journald.forward_to_console=1 "
)
return (
f"sudo "
#
# elevate privilege
f"SYSTEMD_NSPAWN_LOCK=0 "
f"SYSTEMD_NSPAWN_USE_CGNS=0 "
f"SYSTEMD_NSPAWN_API_VFS_WRITABLE=1 "
#
f"systemd-nspawn "
#
# elevate privilege
f"--capability=CAP_MKNOD "
f"--system-call-filter='@mount @keyring @privileged' "
#
f"--bind=/dev/disk "
f"--bind=/dev/block "
f"--bind=/dev/mapper "
f"--bind=/dev/loop-control "
f"--bind=/dev/loop7 " # sysroot.disk
#
f"--property='DevicePolicy=auto' "
f"--property='DeviceAllow=/dev/loop-control rw' "
f"--property='DeviceAllow=block-loop rw' "
f"--property='DeviceAllow=block-blkext rw' "
f"--property='DeviceAllow=/dev/mapper/control rw' "
f"--property='DeviceAllow=block-device-mapper rw' "
#
f"-E SYSTEMD_COLORS=0 " # suppress colors
f"-E SYSTEMD_IN_INITRD=1 " # imitate initramfs
f"-E SYSTEMD_PROC_CMDLINE='{proc_cmdline}' " # imitate kernel command line
f"-D {project_data} " # image folder
f"-M {self.guest_name} " # container name
f"/init " # /init is /usr/lib/systemd/systemd
)
def command_terminate(self) -> str:
return (
f"sudo SYSTEMD_COLORS=0 "
f"machinectl terminate {self.guest_name} "
)
class HostQEMU(HostAny):
"quemu container host"
command = "qemu-system-x86_64"
kernel = f"{project_repo}{linux_kernel}"
initrd = f"{project_repo}{linux_initrd}"
monitor_addr = "127.0.0.1"
monitor_port = "51234"
def has_manager(self) -> bool:
"detect quemu manager present"
return shutil.which(self.command) is not None
def has_kernel_kvm(self) -> bool:
"detect kernel has kvm support"
return os.path.exists("/dev/kvm")
def command_action(self, action:str) -> str:
return (
f"printf '{action}\n' | telnet {self.qemu_monitor_addr} {self.qemu_monitor_port} "
)
def command_initiate(self) -> str:
# note: ensure virtio drivers are present in the guest
qemu_cpu_mode = f"-cpu host -enable-kvm " if self.has_kernel_kvm() else ""
return (
f"sudo "
f"{self.command} "
f"{qemu_cpu_mode} "
f"-name {self.guest_name} "
f"-runas {host_user} "
f"-kernel {self.kernel} "
f"-initrd {self.initrd} "
f"-m 512 -smp 2 "
f"-drive if=virtio,cache=none,format=raw,file={self.sysroot_path} "
f"-append 'edd=off console=ttyS0 TERM=xterm SYSTEMD_COLORS=0' "
f"-nographic -serial mon:stdio "
f"-monitor telnet:{self.monitor_addr}:{self.monitor_port},server,nowait "
)
def command_terminate(self) -> str:
# FIXME use self.guest_name
return f"sudo killall {self.command}"
class Support:
service_path_list_udev = [
"/etc/systemd/system/systemd-udevd.service",
"/etc/systemd/system/systemd-udevd-control.socket",
"/etc/systemd/system/systemd-udevd-kernel.socket",
]
@classmethod
def service_mask(cls, service_path:str) -> None:
print(f"### service: prohibit {service_path}")
os.system(f"sudo ln -s /dev/null {project_data}/{service_path}")
@classmethod
def service_mask_list(cls, service_path_list:list) -> None:
for service_path in service_path_list:
cls.service_mask(service_path)
@dataclass
class MachineUnit:
"container host/guest operator"
machine:str # container name
image_root:str # absolute path to resource folder
def __post_init__(self):
if has_ci_azure():
print(f"### settle machine state")
time.sleep(3)
self.sysroot = SimpleSysroot()
self.host_qemu = HostQEMU(self.booter_machine, self.sysroot.disk_path)
self.host_sysd = HostSYSD(self.booter_machine, self.sysroot.disk_path)
@property
def test_base(self) -> str:
"location of test resources"
return f"{self.image_root}/test_base"
# https://www.freedesktop.org/software/systemd/man/systemd-run.html
def run(self, command:str, machine:str=None) -> None:
"invoke command inside machine"
if machine is None:
machine = self.machine
invoke = f"sudo systemd-run --wait -G -P -M {machine} {command}"
result = os.system(invoke)
assert result == 0, f"result: {result}, command: {command}"
def report_machine(self) -> None:
print(f"### report active machines")
os.system(f"sudo machinectl --all --full")
def install_this_tool(self) -> None:
print(f"### install systemd-tool")
self.run(f"/repo/tool/module/manual-install.sh")
def service_enable(self, service:str) -> None:
print(f"### service enable : {service}")
self.run(f"/usr/bin/systemctl enable {service}")
def service_disable(self, service:str) -> None:
print(f"### service disable: {service}")
self.run(f"/usr/bin/systemctl disable {service}")
def service_mask(self, service:str) -> None:
print(f"### service mask : {service}")
self.run(f"/usr/bin/systemctl mask {service}")
def service_unmask(self, service:str) -> None:
print(f"### service unmask: {service}")
self.run(f"/usr/bin/systemctl unmask {service}")
def service_enable_list(self, service_list:list) -> None:
for service in service_list:
self.service_enable(service)
def service_disable_list(self, service_list:list) -> None:
for service in service_list:
self.service_disable(service)
def share_folder_clear(self):
print(f"### share folder clear")
os.system(f"sudo rm -rf {project_boot}/*")
os.system(f"sudo rm -rf {project_data}/*")
def share_folder_review(self):
print(f"### share folder review")
os.system(f"ls -las {project_boot}")
os.system(f"ls -las {project_data}")
def initrd_image_produce(self):
print(f"### produce machine initrd")
self.run(f"/usr/bin/mkinitcpio -p linux")
def initrd_image_extract(self):
print(f"### extract machine initrd")
self.run(f"/usr/bin/cp -f {linux_kernel} {linux_initrd} /repo/boot/")
self.run(f"/usr/bin/bash -c 'cd /repo/data; lsinitcpio -x {linux_initrd}' ")
os.system(f"sudo chown -R {host_user}:{host_user} {project_boot} ")
os.system(f"sudo chown -R {host_user}:{host_user} {project_data} ")
def perform_make_boot(self) -> None:
"produce boot image extract on the host"
self.report_machine()
self.share_folder_clear()
self.initrd_image_produce()
self.initrd_image_extract()
self.share_folder_review()
def assert_has_link(self, path:str) -> None:
print(f"### assert link present: {path}")
full_path = f"{project_data}/{path}"
assert pathlib.Path(full_path).is_symlink(), f"no link: {full_path}"
def assert_has_path(self, path:str) -> None:
print(f"### assert path present: {path}")
full_path = f"{project_data}/{path}"
assert pathlib.Path(full_path).exists(), f"no path: {full_path}"
def assert_has_text(self, path:str) -> None:
print(f"### assert text matches: {path}")
boot_path = f"{project_data}/{path}"
test_path = f"{self.test_base}/{path}"
boot_list = pathlib.Path(boot_path).read_text().split("\n")
test_list = pathlib.Path(test_path).read_text().split("\n")
diff_list = difflib.unified_diff(test_list, boot_list, lineterm='')
diff_text = "\n".join(diff_list)
assert boot_list == test_list, f"no match:\n{diff_text}\n"
def assert_has_link_list(self, path_list:list) -> None:
for path in path_list:
self.assert_has_link(path)
def assert_has_path_list(self, path_list:list) -> None:
for path in path_list:
self.assert_has_path(path)
def assert_has_text_list(self, path_list:list) -> None:
for path in path_list:
self.assert_has_text(path)
@property
def booter_machine(self) -> str:
"name of boot container instance"
return f"{self.machine}-boot"
def booter_run(self, command:str) -> None:
"invoke command inside booter"
self.run(command, self.booter_machine)
def booter_disable_udev(self) -> None:
print(f"### booter: disable udev")
Support.service_mask_list(Support.service_path_list_udev)
def booter_ensure_loop(self):
print(f"### booter: ensure loop")
os.system("sudo modprobe loop")
os.system("sudo losetup")
def booter_sysd_prepare(self):
print(f"### booter: sysd: prepare")
self.booter_ensure_loop()
self.booter_disable_udev()
def booter_initiate_thread(self, command:str) -> None:
"start process in parallel"
print(command)
def booter_task() -> None:
try:
print(f"### booter start : {self.booter_machine}")
os.system(command)
finally:
print(f"### booter finish : {self.booter_machine}")
booter_thread = threading.Thread(target=booter_task)
booter_thread.setDaemon(True)
booter_thread.start()
def booter_sysd_initiate(self) -> None:
self.booter_sysd_prepare()
print(f"### initrd image: sysd: activate")
command = self.host_sysd.command_initiate()
self.booter_initiate_thread(command)
# mavual stop: keyboard terminate: CTRL+]]]
def booter_sysd_terminate(self) -> None:
print()
print(f"### initrd image: sysd: deactivate")
command = self.host_sysd.command_terminate()
os.system(command)
# FIXME "Failed to create bus connection: Protocol error"
def booter_report_generator(self) -> None:
print(f"### report generated units")
self.booter_run(f"/usr/lib/systemd/system-generators/systemd-cryptsetup-generator")
self.booter_run(f"/usr/lib/systemd/system-generators/systemd-fstab-generator")
time.sleep(1)
self.booter_run(f"/usr/bin/ls -las /run/systemd/generator")
def booter_qemu_action(self, action:str) -> None:
print(f"### initrd image: action: {action}")
command = self.host_qemu.command_action(action)
print(command)
os.system(command)
def booter_qemu_initiate(self) -> None:
if not self.host_qemu.has_manager():
return
print()
self.sysroot.produce_media()
print(f"### initrd image: qemu: activate")
command = self.host_qemu.command_initiate()
self.booter_initiate_thread(command)
# manual stop: keyboard terminate: CTRL+A then X
def booter_qemu_terminate(self) -> None:
if not self.host_qemu.has_manager():
return
print()
print(f"### initrd image: qemu: deactivate")
command = self.host_qemu.command_terminate()
os.system(command)
|
client_threads_coroutines.py
|
# -*- coding: utf-8 -*-
import sys
import logging
import gevent
import time
import traceback
from multiprocessing import Process
from tutorial import TutorialService
from bfd.harpc import client
from bfd.harpc.common import config
from bfd.harpc.common import monkey
# 协程调用的时候要进行打入package,替换掉线程的一些库
monkey.patch_all()
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
filename='./logs/clientdemo.log',
filemode='w')
Coroutines = 5
req_num = 50
workers = 4
data = []
for i in range(0,1024):
data.append(chr(i%64 + 32))
test_msg= ''.join(data)
def test():
conf = config.Config()
conf.set("client", "service", "python_test$EchoService")
conf.set("client", "zk_connect_str", "172.18.1.22:2181")
# 每个进程创建一个 client,多个协程公用一个client
manager = client.Client(TutorialService.Client, conf)
proxy_client = manager.create_proxy()
def test_echo(msg):
for i in xrange(req_num):
try:
proxy_client.echo(msg)
except Exception, e:
print "request error:%s" % e
jobs = []
for i in range(Coroutines):
jobs.append(gevent.spawn(test_echo,test_msg))
gevent.joinall(jobs)
if __name__ == "__main__":
p_list = []
start = time.time()
for i in range(workers):
p = Process(target=test)
p_list.append(p)
p.start()
for p in p_list:
p.join()
end = time.time()
req_time = end-start
total = req_num*workers*Coroutines
print "total : %s" % total
print "total time: %s" % req_time
print "tps : %s" % (total/req_time)
|
printer.py
|
from threading import Thread,Lock
import sys,time
class Printer:
def __init__(self):
self.message = None
self.stopped = False
self.lock = Lock()
def keep_printing(self):
while not self.stopped:
if self.message is not None:
self.lock.acquire()
sys.stdout.write('\r'+self.message)
sys.stdout.flush()
self.message = None
time.sleep(0.3)
self.lock.release()
def start(self):
Thread(target=self.keep_printing,args=()).start()
return self
def stop(self):
self.stopped = True
def change_line(self):
self.message = "\n"
|
repeated_timer.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import threading
import time
import weakref
class RepeatedTimer:
class State:
Stopped=0
Paused=1
Running=2
def __init__(self, secs, callback, count=None):
self.secs = secs
self.callback = weakref.WeakMethod(callback) if callback else None
self._thread = None
self._state = RepeatedTimer.State.Stopped
self.pause_wait = threading.Event()
self.pause_wait.set()
self._continue_thread = False
self.count = count
def start(self):
self._continue_thread = True
self.pause_wait.set()
if self._thread is None or not self._thread.isAlive():
self._thread = threading.Thread(target=self._runner, name='RepeatedTimer', daemon=True)
self._thread.start()
self._state = RepeatedTimer.State.Running
def stop(self, block=False):
self.pause_wait.set()
self._continue_thread = False
if block and not (self._thread is None or not self._thread.isAlive()):
self._thread.join()
self._state = RepeatedTimer.State.Stopped
def get_state(self):
return self._state
def pause(self):
if self._state == RepeatedTimer.State.Running:
self.pause_wait.clear()
self._state = RepeatedTimer.State.Paused
# else nothing to do
def unpause(self):
if self._state == RepeatedTimer.State.Paused:
self.pause_wait.set()
if self._state == RepeatedTimer.State.Paused:
self._state = RepeatedTimer.State.Running
# else nothing to do
def _runner(self):
while (self._continue_thread):
if self.count:
self.count -= 0
if not self.count:
self._continue_thread = False
if self._continue_thread:
self.pause_wait.wait()
if self.callback and self.callback():
self.callback()()
if self._continue_thread:
time.sleep(self.secs)
self._thread = None
self._state = RepeatedTimer.State.Stopped
|
main.py
|
import os
import curses
from curses import textpad
import time
import random
import pickle
from pathlib import Path
from pytube import YouTube, Playlist
import re
import urllib.request
import threading
import vlc
os.environ['VLC_VERBOSE'] = '-1'
RES_FOLDER = Path(__file__).parent / "res"
QUOTE_FOLDER = Path(__file__).parent
QUOTE_FILE_NAME = "qts.txt"
QUOTE_FILE = QUOTE_FOLDER / QUOTE_FILE_NAME
TIMER_WORK_MINS = (20 , 20 , 40 , 50)
TIMER_BREAK_MINS = (20 , 10 , 20 , 10)
TIMER_WORK = (
TIMER_WORK_MINS[0] * 60,
TIMER_WORK_MINS[1] * 60,
TIMER_WORK_MINS[2] * 60,
TIMER_WORK_MINS[3] * 60)
TIMER_BREAK = (
TIMER_BREAK_MINS[0] * 60,
TIMER_BREAK_MINS[1] * 60,
TIMER_BREAK_MINS[2] * 60,
TIMER_BREAK_MINS[3] * 60)
SOUNDS_MUTED = False # This is for only growth and start_timer, alarm stays
TIMER_START_SOUND = str(RES_FOLDER / "timerstart.wav")
ALARM_SOUND = str(RES_FOLDER / "alarm.wav")
GROWTH_SOUND = str(RES_FOLDER/ "growth.waw")
effect_volume = 100 # How loud sound effects are, not including ambience and music.
__all__ = ["run_app"]
def get_user_config_directory():
"""Returns a platform-specific root directory for user config settings."""
# On Windows, prefer %LOCALAPPDATA%, then %APPDATA%, since we can expect the
# AppData directories to be ACLed to be visible only to the user and admin
# users (https://stackoverflow.com/a/7617601/1179226). If neither is set,
# return None instead of falling back to something that may be world-readable.
if os.name == "nt":
appdata = os.getenv("LOCALAPPDATA")
if appdata:
return appdata
appdata = os.getenv("APPDATA")
if appdata:
return appdata
return None
# On non-windows, use XDG_CONFIG_HOME if set, else default to ~/.config.
xdg_config_home = os.getenv("XDG_CONFIG_HOME")
if xdg_config_home:
return xdg_config_home
return os.path.join(os.path.expanduser("~"), ".config")
def play_sound(sound):
if SOUNDS_MUTED and sound != ALARM_SOUND:
return
media = vlc.MediaPlayer(sound)
media.audio_set_volume(effect_volume)
media.play()
def toggle_sounds():
global SOUNDS_MUTED
SOUNDS_MUTED = not SOUNDS_MUTED
def isinternet():
try:
urllib.request.urlopen("https://youtube.com", timeout = 10) #Python 3.x
return True
except:
return False
def replaceNth(
s, source, target, n
): # code from stack overflow, replaces nth occurence of an item.
inds = [
i for i in range(len(s) - len(source) + 1) if s[i : i + len(source)] == source
]
if len(inds) < n:
return s # or maybe raise an error
s = list(s) # can't assign to string slices. So, let's listify
s[
inds[n - 1] : inds[n - 1] + len(source)
] = target # do n-1 because we start from the first occurrence of the string, not the 0-th
return "".join(s)
def addtext(
x, y, text, anilen, stdscr, color_pair
): # adds and animates text in the center
text = replaceNth(
text[: int(anilen)], " ", "#", 8
) # aads "#" after the 7th word to split line
text = text.split("#") # splits text into 2 list
for i in range(len(text)):
stdscr.addstr(
y + i,
int(x - len(text[i]) / 2),
str(text[i]),
curses.color_pair(color_pair),
) # displays the list in 2 lines
def getrandomline(file): # returns random quote
lines = open(file, encoding="utf8").read().splitlines()
myline = random.choice(lines)
return myline
def getqt(): # returns random quote
return getrandomline(QUOTE_FILE)
def printart(
stdscr, file, x, y, color_pair
): # prints line one by one to display text art, also in the middle
with open(file, "r", encoding="utf8") as f:
lines = f.readlines()
for i in range(len(lines)):
stdscr.addstr(
y + i - len(lines),
x - int(len(max(lines, key=len)) / 2),
lines[i],
curses.color_pair(color_pair),
)
def key_events(stdscr, tree1, maxx):
global effect_volume # Used for setting the sound effect volume with '{' and '}'
key = stdscr.getch()
if key in (curses.KEY_UP, ord("k")):
tree1.showtimer = True
tree1.selectedtimer -= 1
tree1.timerhidetime = int(time.time()) + 5
if key in (curses.KEY_DOWN, ord("j")):
tree1.showtimer = True
tree1.selectedtimer += 1
tree1.timerhidetime = int(time.time()) + 5
if key == curses.KEY_ENTER or key == 10 or key == 13: # this is enter key
if tree1.showtimer:
if tree1.currentmenu == "timer":
tree1.starttimer(tree1.selectedtimer, stdscr, maxx)
else:
tree1.featureselect(tree1.selectedtimer, maxx, stdscr)
play_sound(TIMER_START_SOUND)
tree1.showtimer = False
if tree1.breakover:
tree1.breakover = False
tree1.starttimer(tree1.selectedtimer, stdscr, maxx)
play_sound(TIMER_START_SOUND)
if key == ord("q"):
treedata = open(RES_FOLDER / "treedata", "wb")
pickle.dump(tree1.age, treedata, protocol=None)
treedata.close()
exit()
if key == ord("u"):
toggle_sounds()
if key in (curses.KEY_RIGHT, ord("l")):
if tree1.showtimer:
tree1.selectedtimer = 0
tree1.currentmenu = "feature"
else:
tree1.radiomode = False
tree1.music_list_num += 1
if tree1.music_list_num > len(tree1.music_list) - 1:
tree1.music_list_num = len(tree1.music_list) - 1
tree1.media.stop()
tree1.media = vlc.MediaPlayer(tree1.music_list[tree1.music_list_num])
tree1.media.play()
tree1.show_music = True
tree1.musichidetime = int(time.time()) + 5
if key in (curses.KEY_LEFT, ord("h")):
if tree1.showtimer:
tree1.selectedtimer = 0
tree1.currentmenu = "timer"
else:
tree1.radiomode = False
tree1.music_list_num -= 1
if tree1.music_list_num < 0:
tree1.music_list_num = 0
tree1.media.stop()
tree1.media = vlc.MediaPlayer(tree1.music_list[tree1.music_list_num])
tree1.media.play()
tree1.show_music = True
tree1.musichidetime = int(time.time()) + 5
if key == ord(" "):
if tree1.media.is_playing():
tree1.media.pause()
tree1.pause = True
tree1.pausetime = time.time()
if key == ord("m"):
tree1.media.pause()
if not tree1.isloading and key == ord("n"):
tree1.lofiradio()
if key == ord("]"):
tree1.media.audio_set_volume(min(100, tree1.media.audio_get_volume()+1))
tree1.notifyendtime = int(time.time()) + 2
volume = str(round(tree1.media.audio_get_volume())) + "%"
tree1.notifystring = " "*round(maxx*(tree1.media.audio_get_volume()/100)-len(volume)-2) + volume
tree1.invert = True
tree1.isnotify = True
if key == ord("["):
tree1.media.audio_set_volume(max(0, tree1.media.audio_get_volume()-1))
tree1.notifyendtime = int(time.time()) + 2
volume = str(round(tree1.media.audio_get_volume())) + "%"
tree1.notifystring = " "*round(maxx*(tree1.media.audio_get_volume()/100)-len(volume)-2) + volume
tree1.invert = True
tree1.isnotify = True
tree1.media.audio_set_volume(max(0, tree1.media.audio_get_volume()-1))
tree1.notifyendtime = int(time.time()) + 2
if key == ord("}"):
effect_volume = min(100, effect_volume+1)
tree1.notifyendtime = int(time.time()) + 2
volume = str(effect_volume) + "%"
tree1.notifystring = " "*round(maxx*(effect_volume/100)-len(volume)-2) + volume
tree1.invert = True
tree1.isnotify = True
if key == ord("{"):
effect_volume = max(0, effect_volume-1)
tree1.notifyendtime = int(time.time()) + 2
volume = str(effect_volume) + "%"
tree1.notifystring = " "*round(maxx*(effect_volume/100)-len(volume)-2) + volume
tree1.invert = True
tree1.isnotify = True
if key == ord("="):
if tree1.media.get_time()+10000 < tree1.media.get_length():
tree1.media.set_time(i_time=tree1.media.get_time()+10000)
else:
tree1.media.set_time(tree1.media.get_length()-1)
time_sec = tree1.media.get_time()/1000
display_time = str(int(time_sec / 60)).zfill(2) + ":" + str(int(time_sec) % 60).zfill(2)
tree1.notifyendtime = int(time.time()) + 2
try:
tree1.notifystring = " "*(round(maxx*(tree1.media.get_time()/tree1.media.get_length()))-len(display_time)) + display_time
except ZeroDivisionError:
pass
tree1.invert = True
tree1.isnotify = True
if key == ord("-"):
if tree1.media.get_time()-10000 > 0:
tree1.media.set_time(i_time=tree1.media.get_time()-10000)
else:
tree1.media.set_time(0)
time_sec = tree1.media.get_time()/1000
display_time = str(int(time_sec / 60)).zfill(2) + ":" + str(int(time_sec) % 60).zfill(2)
tree1.notifyendtime = int(time.time()) + 2
tree1.notifystring = " "*(round(maxx*(tree1.media.get_time()/tree1.media.get_length()))-len(display_time)) + display_time
tree1.invert = True
tree1.isnotify = True
if key == ord("r"):
if tree1.isloop:
tree1.isloop = False
else:
tree1.isloop = True
tree1.notifyendtime = int(time.time()) + 2
tree1.notifystring = "REPEAT: " + str(tree1.isloop)
tree1.invert = False
tree1.isnotify = True
for i in range(10):
if key == ord(str(i)):
tree1.media.set_time(i_time=int(tree1.media.get_length()*(i/10)))
time_sec = tree1.media.get_time()/1000
display_time = str(int(time_sec / 60)).zfill(2) + ":" + str(int(time_sec) % 60).zfill(2)
tree1.notifyendtime = int(time.time()) + 2
tree1.notifystring = " "*(round(maxx*(tree1.media.get_time()/tree1.media.get_length()))-len(display_time)) + display_time
tree1.invert = True
tree1.isnotify = True
def GetSong(link):
video = YouTube("http://youtube.com/" + link.split("/")[-1] )
try:
video.streams
except:
return "WRONG LINK ERROR"
try:
songfile = str(video.streams.get_by_itag(251).download(timeout=30))
except:
return "DOWNLOAD ERROR"
return songpath
def GetLinks(search_string):
html = urllib.request.urlopen("https://www.youtube.com/results?search_query=" + search_string.replace(" ", "+"))
video_ids = re.findall(r"watch\?v=(\S{11})", html.read().decode())
return "http://youtube.com/watch?v=" + str(video_ids[0])
#print(GetSong(GetLinks(input())))
#time.sleep(100)
class tree:
def __init__(self, stdscr, age):
self.stdscr = stdscr
self.age = age
self.show_music = False
self.music_list = list(_ for _ in RES_FOLDER.glob("*ogg"))
self.music_list_num = 0
self.media = vlc.MediaPlayer(str(self.music_list[self.music_list_num]))
self.pause = False
self.showtimer = False
self.timerlist = [
" POMODORO {}+{} ".format(TIMER_WORK_MINS[0], TIMER_BREAK_MINS[0]),
" POMODORO {}+{} ".format(TIMER_WORK_MINS[1], TIMER_BREAK_MINS[1]),
" POMODORO {}+{} ".format(TIMER_WORK_MINS[2], TIMER_BREAK_MINS[2]),
" POMODORO {}+{} ".format(TIMER_WORK_MINS[3], TIMER_BREAK_MINS[3]),
" CUSTOM TIMER ",
" END TIMER NOW ",
]
self.featurelist = [
" PLAY MUSIC FROM YOUTUBE ",
" LOFI RADIO 1 ",
" LOFI RADIO 2 ",
" LOFI RADIO 3 ",
" CUSTOM PLAYLIST "
]
self.currentmenu = "timer"
self.selectedtimer = 0
self.istimer = False
self.isbreak = False
self.breakover = False
self.timerhidetime = 0
self.musichidetime = 0
random.seed(int(time.time() / (60 * 60 * 24)))
self.season = random.choice(
["rain", "heavy_rain", "light_rain", "snow", "windy"]
)
random.seed()
self.youtubedisplay = False
self.downloaddisplay = False
self.spinnerstate = 0
self.notifyendtime = 0
self.isnotify = False
self.notifystring = " "
self.playlist = Playlist("https://www.youtube.com/playlist?list=PL6fhs6TSspZvN45CPJApnMYVsWhkt55h7")
self.radiomode = False
self.isloading = False
self.invert = False
self.breakendtext = "BREAK IS OVER, PRESS ENTER TO START NEW TIMER"
self.isloop = False
def display(self, maxx, maxy, seconds):
if self.age >= 1 and self.age < 5:
self.artfile = str(RES_FOLDER/"p1.txt")
if self.age >= 5 and self.age < 10:
self.artfile = str(RES_FOLDER/"p2.txt")
if self.age >= 10 and self.age < 20:
self.artfile = str(RES_FOLDER/"p3.txt")
if self.age >= 20 and self.age < 30:
self.artfile = str(RES_FOLDER/"p4.txt")
if self.age >= 30 and self.age < 40:
self.artfile = str(RES_FOLDER/"p5.txt")
if self.age >= 40 and self.age < 70:
self.artfile = str(RES_FOLDER/"p6.txt")
if self.age >= 70 and self.age < 120:
self.artfile = str(RES_FOLDER/"p7.txt")
if self.age >= 120 and self.age < 200:
self.artfile = str(RES_FOLDER/"p8.txt")
if self.age >= 200:
self.artfile = str(RES_FOLDER/"p9.txt")
printart(self.stdscr, self.artfile, int(maxx / 2), int(maxy * 3 / 4), 1)
addtext(
int(maxx / 2),
int(maxy * 3 / 4),
"age: " + str(int(self.age)) + " ",
-1,
self.stdscr,
3,
)
# RAIN
def rain(self, maxx, maxy, seconds, intensity, speed, char, color_pair):
random.seed(
int(seconds / speed)
) # this keeps the seed same for some time, so rains looks like its going slowly
# printart(self.stdscr, 'res/rain1.txt', int(maxx/2), int(maxy*3/4), 4)
for i in range(intensity):
ry = random.randrange(int(maxy * 1 / 4), int(maxy * 3 / 4))
rx = random.randrange(int(maxx / 3), int(maxx * 2 / 3))
self.stdscr.addstr(ry, rx, char, curses.color_pair(color_pair))
random.seed()
def seasons(self, maxx, maxy, seconds):
if self.season == "rain":
self.rain(maxx, maxy, seconds, 30, 30, "/", 4)
if self.season == "light_rain":
self.rain(maxx, maxy, seconds, 30, 60, "`", 4)
if self.season == "heavy_rain":
self.rain(maxx, maxy, seconds, 40, 20, "/", 4)
if self.season == "snow":
self.rain(maxx, maxy, seconds, 30, 30, ".", 5)
if self.season == "windy":
self.rain(maxx, maxy, seconds, 20, 30, "-", 4)
def notify(self, stdscr, maxy, maxx):
if self.isnotify and time.time() <= self.notifyendtime:
curses.textpad.rectangle(stdscr, 0,0,2, maxx-1)
if self.invert:
stdscr.addstr(1,1, self.notifystring[:maxx-2], curses.A_BOLD | curses.A_REVERSE)
else:
stdscr.addstr(1,1, self.notifystring[:maxx-2], curses.A_BOLD)
self.downloaddisplay = False
#self.invert = False
def menudisplay(self, stdscr, maxy, maxx):
if self.showtimer:
if self.currentmenu == "timer":
if self.selectedtimer > len(self.timerlist) - 1:
self.selectedtimer = len(self.timerlist) - 1
if self.selectedtimer < 0:
self.selectedtimer = 0
if self.currentmenu == "feature":
if self.selectedtimer > len(self.featurelist) - 1:
self.selectedtimer = len(self.featurelist) - 1
if self.selectedtimer < 0:
self.selectedtimer = 0
for i in range(len(self.timerlist)):
if i == self.selectedtimer and self.currentmenu == "timer":
stdscr.addstr(
int((maxy - len(self.timerlist)*2) / 2) + i * 2,
int(maxx / 25 + 4),
self.timerlist[i],
curses.A_REVERSE,
)
else:
stdscr.addstr(
int((maxy - len(self.timerlist)*2) / 2) + i * 2,
int(maxx / 25),
self.timerlist[i],
)
for i in range(len(self.featurelist)):
if i == self.selectedtimer and self.currentmenu == "feature":
stdscr.addstr(
int((maxy - len(self.featurelist)*2) / 2) + i * 2,
int(maxx * 24 / 25 - len(self.featurelist[i])) - 4,
self.featurelist[i],
curses.A_REVERSE,
)
else:
stdscr.addstr(
int((maxy - len(self.featurelist)*2) / 2) + i * 2,
int(maxx * 24 / 25 - len(self.featurelist[i])),
self.featurelist[i],
)
if int(time.time()) >= self.timerhidetime:
self.showtimer = False
if self.istimer:
self.secondsleft = int(self.workendtime) - int(time.time())
timertext = (
"Break in: "
+ str(int(self.secondsleft / 60)).zfill(2)
+ ":"
+ str(self.secondsleft % 60).zfill(2)
)
stdscr.addstr(
int(maxy * 10 / 11), int(maxx / 2 - len(timertext) / 2), timertext
)
if self.breakover:
self.stdscr.addstr(
int(maxy * 10 / 11),
int(
maxx / 2 - len(self.breakendtext) / 2
),
self.breakendtext,
curses.A_BLINK | curses.A_BOLD,
)
def breakstart(self):
if self.istimer:
play_sound(ALARM_SOUND)
if self.media.is_playing():
self.media.pause()
self.breakendtime = int(time.time()) + self.breaktime
self.istimer = False
self.isbreak = True
def breakdisplay(self, maxx, maxy):
self.secondsleft = int(self.breakendtime) - int(time.time())
timertext = (
"Break ends in: "
+ str(int(self.secondsleft / 60)).zfill(2)
+ ":"
+ str(self.secondsleft % 60).zfill(2)
)
self.stdscr.addstr(
int(maxy * 10 / 11), int(maxx / 2 - len(timertext) / 2), timertext
)
if self.secondsleft == 0:
self.media.play()
self.isbreak = False
self.breakover = True
play_sound(ALARM_SOUND)
def timer(self):
if self.istimer and int(time.time()) == int(self.workendtime):
self.breakstart()
def starttimer(self, inputtime, stdscr, maxx):
if inputtime == 5:
self.breakendtext = "TIMER IS OVER, PRESS ENTER"
self.worktime = 0
self.breaktime = 0
self.istimer == False
elif inputtime == 4:
try:
curses.textpad.rectangle(stdscr, 0,0,2, maxx-1)
stdscr.addstr(1,1, "ENTER WORK LENGTH (min) : ")
stdscr.refresh()
curses.echo()
curses.nocbreak()
stdscr.nodelay(False)
stdscr.keypad(False)
curses.curs_set(1)
self.worktime = int(stdscr.getstr())*60
stdscr.addstr(1,1, " "*(maxx-2))
stdscr.addstr(1,1, "ENTER BREAK LENGTH (min) : ")
stdscr.refresh()
self.breaktime = int(stdscr.getstr())*60
curses.noecho()
curses.cbreak()
stdscr.nodelay(True)
stdscr.keypad(True)
curses.curs_set(0)
self.istimer = True
except ValueError:
curses.noecho()
curses.cbreak()
stdscr.nodelay(True)
stdscr.keypad(True)
curses.curs_set(0)
self.notifystring = "VALUE ERROR, PLEASE ENTER AN INTEGER"
self.notifyendtime = int(time.time())+5
self.isnotify = True
return 0
else:
self.breakendtext = "BREAK IS OVER, PRESS ENTER TO START NEW TIMER"
self.istimer = True
self.worktime = TIMER_WORK[inputtime]
self.breaktime = TIMER_BREAK[inputtime]
self.workendtime = int(time.time()) + self.worktime
def featureselect(self, inputfeature, maxx, stdscr):
self.radiomode = False
if inputfeature == 0:
if hasattr(self, "media"):
self.media.stop()
self.youtubedisplay = True
if inputfeature == 1:
self.playlist = Playlist("https://www.youtube.com/playlist?list=PLOzDu-MXXLliO9fBNZOQTBDddoA3FzZUo")
self.lofiradio()
if inputfeature == 2:
self.playlist = Playlist("https://www.youtube.com/playlist?list=PL0ONFXpPDe_mtm3ciwL-v7EE-7yLHDlP8")
self.lofiradio()
if inputfeature == 3:
self.playlist = Playlist("https://www.youtube.com/playlist?list=PLKYTmz7SemaqVDF6XJ15bv_8-j7ckkNgb")
self.lofiradio()
if inputfeature == 4:
curses.textpad.rectangle(stdscr, 0,0,2, maxx-1)
stdscr.addstr(1,1, "ENTER PLAyLIST LINK : ")
stdscr.refresh()
curses.echo()
curses.nocbreak()
stdscr.nodelay(False)
stdscr.keypad(False)
curses.curs_set(1)
self.playlist = Playlist(stdscr.getstr().decode("utf-8"))
curses.noecho()
curses.cbreak()
stdscr.nodelay(True)
stdscr.keypad(True)
curses.curs_set(0)
self.lofiradio()
def loading(self, stdscr, maxx):
spinner = [
"[ ]",
"[= ]",
"[== ]",
"[=== ]",
"[ ===]",
"[ ==]",
"[ =]",
"[ ]",
"[ =]",
"[ ==]",
"[ ===]",
"[====]",
"[=== ]",
"[== ]",
"[= ]"
]
self.spinnerstate+=0.5
if self.spinnerstate > len(spinner)-1:
self.spinnerstate = 0
curses.textpad.rectangle(stdscr, 0,0,2, maxx-1)
stdscr.addstr(1,1, "GETTING AUDIO " + spinner[int(self.spinnerstate)])
def youtube(self, stdscr, maxx):
if self.youtubedisplay:
curses.textpad.rectangle(stdscr, 0,0,2, maxx-1)
stdscr.addstr(1,1, "ENTER SEARCH QUERY/LINK : ")
stdscr.refresh()
if not "songinput" in locals():
curses.echo()
curses.nocbreak()
stdscr.nodelay(False)
stdscr.keypad(False)
curses.curs_set(1)
songinput = stdscr.getstr().decode("utf-8")
curses.noecho()
curses.cbreak()
stdscr.nodelay(True)
stdscr.keypad(True)
curses.curs_set(0)
stdscr.addstr(1,1, "GETTING AUDIO")
getsongthread = threading.Thread(target=self.playyoutube, args=(songinput,))
getsongthread.daemon = True
getsongthread.start()
self.youtubedisplay = False
self.downloaddisplay = True
del songinput
if self.downloaddisplay:
self.loading(stdscr, maxx)
def playyoutube(self, songinput):
try:
yt = YouTube(GetLinks(songinput))
song = yt.streams.get_by_itag(251).url
self.media = vlc.MediaPlayer(song)
self.media.play()
except:
self.notifyendtime = int(time.time()) + 5
self.notifystring = "ERROR GETTING AUDIO, PLEASE TRY AGAIN"
self.isnotify = True
exit()
self.downloaddisplay = False
self.yt_title = yt.title
self.notifyendtime = int(time.time()) + 10
self.notifystring = "Playing: " + self.yt_title
self.invert = False
self.isnotify = True
def getlofisong(self):
# some links dont work, use recursion to find a link which works
try:
self.lofilink = random.choice(self.playlist.video_urls)
link = YouTube(self.lofilink).streams.get_by_itag(251).url
return link
except:
self.isloading = False
self.notifyendtime = int(time.time()) + 10
self.notifystring = "UNABLE TO CONNECT, PLEASE CHECK INTERNET CONNECTION"
self.invert = False
self.isnotify = True
self.radiomode = False
exit()
def lofiradio(self):
#lofi playlist from youtube
self.media.stop()
self.isloading = True
self.radiomode = True
radiothread = threading.Thread(target=self.actuallofiradio)
radiothread.daemon = True
radiothread.start()
def actuallofiradio(self):
if not hasattr(self, "lofisong"):
self.lofisong = self.getlofisong()
if self.lofisong == "ERROR":
exit()
self.media = vlc.MediaPlayer(self.lofisong)
self.media.play()
self.notifyendtime = int(time.time()) + 10
self.notifystring = "Playing: " + YouTube(self.lofilink).title
self.invert = False
self.isnotify = True
self.lofisong = self.getlofisong()
self.isloading = False
def main():
run = True
stdscr = curses.initscr()
stdscr.nodelay(True)
stdscr.keypad(True)
curses.curs_set(0)
curses.start_color()
curses.noecho()
curses.cbreak()
curses.use_default_colors()
try:
curses.init_pair(1, 113, -1) # passive selected text inner, outer
curses.init_pair(2, 85, -1) # timer color inner, outer
curses.init_pair(3, 3, -1) # active selected inner, outer
curses.init_pair(4, 51, -1) # border color inner, outer
curses.init_pair(5, 15, -1)
curses.init_pair(6, 1, -1)
curses.init_pair(7, curses.COLOR_YELLOW, -1)
except:
curses.init_pair(1, 1, 0) # passive selected text inner, outer
curses.init_pair(2, 1, 0) # timer color inner, outer
curses.init_pair(3, 1, 0) # active selected inner, outer
curses.init_pair(4, 1, 0) # border color inner, outer
curses.init_pair(5, 1, 0)
curses.init_pair(6, 1, 0)
curses.init_pair(7, 1, 0)
seconds = 5
anilen = 1
anispeed = 1
music_volume = 0
music_volume_max = 1
quote = getqt()
play_sound(GROWTH_SOUND)
tree1 = tree(stdscr, 1)
tree1.media.play()
try:
treedata_in = open(RES_FOLDER/ "treedata", "rb")
tree1.age = pickle.load(treedata_in)
except:
tree1.age = 1
try:
while run:
start = time.time()
try:
stdscr.erase()
maxy, maxx = stdscr.getmaxyx()
addtext(int(maxx / 2), int(maxy * 5 / 6), quote, anilen, stdscr, 2)
anilen += anispeed
if anilen > 150:
anilen = 150
if (seconds % (100 * 60 * 10) == 0): # show another quote every 5 min, and grow tree
quote = getqt()
tree1.age += 1
anilen = 1
play_sound(GROWTH_SOUND)
if tree1.musichidetime <= int(time.time()):
tree1.show_music = False
if tree1.show_music:
if os.name == "posix":
showtext = (
"Playing: "
+ str(tree1.music_list[tree1.music_list_num]).split("/")[-1]
)
else:
showtext = (
"Playing: "
+ str(tree1.music_list[tree1.music_list_num]).split('\\')[-1]
)
stdscr.addstr(
int(maxy / 10),
int(maxx / 2 - len(showtext) / 2),
showtext,
curses.A_BOLD,
)
tree1.display(maxx, maxy, seconds)
tree1.seasons(maxx, maxy, seconds)
tree1.menudisplay(stdscr, maxy, maxx)
tree1.youtube(stdscr, maxx)
tree1.timer()
if tree1.media.is_playing() and tree1.media.get_length() - tree1.media.get_time() < 1000 :
if tree1.radiomode:
tree1.lofiradio()
if tree1.isloop:
tree1.media.set_position(0)
else:
tree1.media.stop()
tree1.media = vlc.MediaPlayer(tree1.music_list[tree1.music_list_num])
tree1.media.play()
if tree1.isloading:
tree1.loading(stdscr, maxx)
tree1.notify(stdscr, maxy, maxx)
key_events(stdscr, tree1, maxx)
while tree1.pause:
stdscr.erase()
stdscr.addstr(
int(maxy * 3 / 5),
int(maxx / 2 - len("PAUSED") / 2),
"PAUSED",
curses.A_BOLD,
)
key = stdscr.getch()
if key == ord(" "):
tree1.pause = False
tree1.media.play()
stdscr.refresh()
if tree1.istimer:
tree1.workendtime += time.time() - tree1.pausetime
if key == ord("q"):
treedata = open(RES_FOLDER / "treedata", "wb")
pickle.dump(tree1.age, treedata, protocol=None)
treedata.close()
exit()
time.sleep(0.1)
while tree1.isbreak:
stdscr.erase()
stdscr.addstr(
int(maxy * 3 / 5),
int(maxx / 2 - len("PRESS SPACE TO END BREAK") / 2),
"PRESS SPACE TO END BREAK",
curses.A_BOLD,
)
tree1.breakdisplay(maxx, maxy)
stdscr.refresh()
key = stdscr.getch()
if key == ord(" "):
tree1.isbreak = False
tree1.media.play()
stdscr.refresh()
if key == ord("q"):
treedata = open(RES_FOLDER / "treedata", "wb")
pickle.dump(tree1.age, treedata, protocol=None)
treedata.close()
exit()
time.sleep(0.1)
time.sleep(max(0.05 - (time.time() - start), 0))
#time.sleep(0.1)
seconds += 5
except KeyboardInterrupt:
try:
stdscr.erase()
stdscr.addstr(
int(maxy * 3 / 5),
int(maxx / 2 - len("PRESS 'q' TO EXIT") / 2),
"PRESS 'q' TO EXIT",
curses.A_BOLD,
)
stdscr.refresh()
time.sleep(1)
except KeyboardInterrupt:
pass
stdscr.refresh()
finally:
curses.echo()
curses.nocbreak()
curses.curs_set(1)
stdscr.keypad(False)
stdscr.nodelay(False)
curses.endwin()
def run_app():
"""A method to run the app"""
global QUOTE_FILE
config_file = Path(get_user_config_directory()) / "wisdom-tree" / QUOTE_FILE_NAME
if config_file.exists():
QUOTE_FILE = config_file
main()
if __name__ == "__main__":
# avoid running the app if the module is imported
run_app()
|
main.py
|
from spotify import DOWNLOADMP3 as SONGDOWNLOADER
import telepot
import spotify
import requests
import threading
import os
if 'BOT_TOKEN' in os.environ:
token = os.environ.get('BOT_TOKEN')
else:
token = 'token bot'
bot = telepot.Bot(token)
sort = {}
def txtfinder(txt):
a = txt.find("https://open.spotify.com")
txt = txt[a:]
return txt
def cantfind(chat_id):
bot.sendSticker(chat_id, 'CAACAgQAAxkBAAENH5phv52sd0Wz8El4xl4k917O2-lTFQACiAwAApR9AAFScJSLLR94puIjBA')
bot.sendMessage(chat_id, "can't find it")
def cantfindone(chat_id):
bot.sendSticker(chat_id, 'AAMCBAADGQEAAQ0fmmG_nax3RbPwSXjGXiT3Xs7b6VMVAAKIDAAClH0AAVJwlIstH3im4gEAB20AAyME')
bot.sendMessage(chat_id, "can't download one of them")
def downloader(link,chat_id,type):
PLAYLIST = False
if type=='AL':
ITEMS = spotify.album(link)
elif type == 'AR':
ITEMS = spotify.artist(link)
elif type == 'PL':
ITEMS = spotify.playlist(link)
PLAYLIST = True
else:
ITEMS = []
MESSAGE = ""
COUNT = 0
for song in ITEMS:
if PLAYLIST:
song = song['track']
COUNT+=1
MESSAGE += f"{COUNT}. {song['name']}\n"
bot.sendMessage(chat_id, MESSAGE)
for song in ITEMS:
if PLAYLIST:
song = song['track']
try:
SONGDOWNLOADER(song['href'], chat_id)
except:
cantfindone(chat_id)
def START(msg,chat_id):
print(f"{chat_id}:{msg}")
msglink = txtfinder(msg)
if msglink[:30]==('https://open.spotify.com/album') :
downloader(msg,chat_id,'AL')
elif msglink[:30]== ('https://open.spotify.com/track') :
try:
SONGDOWNLOADER(msg, chat_id)
except:
bot.sendSticker(chat_id,
'CAACAgQAAxkBAAIFSWBF_m3GHUtZJxQzobvD_iWxYVClAAJuAgACh4hSOhXuVi2-7-xQHgQ')
bot.sendMessage(chat_id, "can't download music")
elif msg[:33] == 'https://open.spotify.com/playlist':
downloader(msg,chat_id,'PL')
elif msglink[:31] == ('https://open.spotify.com/artist'):
downloader(msg,chat_id,'AR')
elif msg == "/start":
bot.sendMessage(chat_id,
"Hi \nsend me spotify link and I'll give you music\nor use /single or /album or "
"/artist")
elif msg == "/album":
sort[chat_id]='album'
bot.sendMessage(chat_id, 'send name and name of artist like this: \nName album\nor for better search use this:\nName album - Name artist')
elif msg == '/single':
sort[chat_id]='single'
bot.sendMessage(chat_id,'send name and name of artist like this: \nName song\nor for better search use this:\nName song - Name artist')
elif msg == '/artist':
sort[chat_id]='artist'
bot.sendMessage(chat_id,'send name and name of artist like this: \nName artist')
else:
try:
if sort[chat_id]=='artist':
try:
downloader(spotify.searchartist(msg),chat_id,'AR')
del sort[chat_id]
except:
cantfind(chat_id)
elif sort[chat_id]=='album':
try:
downloader(spotify.searchalbum(msg),chat_id,'AL')
del sort[chat_id]
except:
cantfind(chat_id)
elif sort[chat_id]=='single':
try:
SONGDOWNLOADER(spotify.searchsingle(msg), chat_id)
del sort[chat_id]
except:
cantfind(chat_id)
except:
bot.sendSticker(chat_id, 'CAACAgQAAxkBAAIBFGBLNcpfFcTLxnn5lR20ZbE2EJbrAAJRAQACEqdqA2XZDc7OSUrIHgQ')
bot.sendMessage(chat_id,'send me link or use /single or /album or /artist')
print('Listening ...')
tokenurl = f'https://api.telegram.org/bot{token}'
Update = tokenurl+"/getUpdates"
def UPDATE():
MESSAGES = requests.get(Update).json()
return MESSAGES['result']
while 1:
if threading.activeCount()-1 < 15:
try:
for message in UPDATE():
offset = message['update_id']+1
offset = Update+f"?offset={offset}"
offset = requests.post(offset)
msg = message['message']['text']
chat_id = message['message']['from']['id']
thread = threading.Thread(target=START,args=(msg,chat_id))
thread.start()
except:
pass
|
python_production_server.py
|
import flask
import sys
import inspect
import uuid
import numpy as np
import collections
import threading
import importlib
import pkgutil
_archives = {}
_type_map = {
str: 'char',
float: 'double',
int: 'int32',
bool: 'logical',
'bool': 'logical',
'float64': 'double',
'float32': 'single',
'int8': 'int8',
'int16': 'int16',
'int32': 'int32',
'int64': 'int64',
'uint8': 'uint8',
'uint16': 'uint16',
'uint32': 'uint32',
'uint64': 'uint64',
np.int64: 'int64',
np.int32: 'int32',
np.int16: 'int16',
np.int8: 'int8',
np.uint64: 'uint64',
np.uint32: 'uint32',
np.uint16: 'uint16',
np.uint8: 'uint8',
np.bool_: 'logical',
np.float64: 'double',
np.float32: 'single',
}
_reverse_type_map = {
'char': str,
'double': np.double,
'single': np.float32,
'int8': np.int8,
'int16': np.int16,
'int32': np.int32,
'int64': np.int64,
'uint8': np.uint8,
'uint16': np.uint16,
'uint32': np.uint32,
'uint64': np.uint64,
'logical': np.bool_,
}
_app = flask.Flask(__name__)
_server_seq = 0
_async_requests = collections.defaultdict(dict)
def _execute_function(func, params, n_arg_out=-1, output_format=None):
inf_format = 'string'
nan_format = 'string'
output_mode = 'large'
if output_format:
if 'nanInfFormat' in output_format:
nan_format = inf_format = output_format['nanInfFormat']
if 'mode' in output_format:
output_mode = output_format['mode']
for i, par_name in enumerate(list(inspect.signature(func).parameters)):
if type(params[i]) == dict and 'mwtype' in params[i]:
params[i] = _reverse_type_map[params[i]['mwtype']](params[i]['mwdata'])
else:
annotation = func.__annotations__[par_name]
if type(annotation) == np.ndarray:
params[i] = np.array(params[i], dtype=annotation.dtype)
else:
params[i] = func.__annotations__[par_name](params[i])
result = list(_iterify(func(*params)))
if n_arg_out != -1:
result = result[:n_arg_out]
for i in range(len(result)):
if type(result[i]) == np.ndarray:
result[i] = result[i].tolist()
if output_mode == 'large':
annotations = _iterify(func.__annotations__['return'])
for i, out in enumerate(result):
typ, size = _evaluate_type(annotations[i])
if type(out) == np.ndarray:
size = out.shape
else:
# Try to set length based on element length (for strings and lists)
try:
size = (1, len(out))
except TypeError:
# Element has no length - use default (1, 1) size
size = (1, 1)
result[i] = {
'mwtype': typ,
'mwsize': size,
'mwdata': list(_iterify(out))
}
else:
result = list(map(lambda x: list(_iterify(x)), result))
return result
class _AsyncFunctionCall:
def __init__(self, func, rhs, n_arg_out=-1, output_format=None, client_id=None, collection=None):
self.id = uuid.uuid4().hex
self.collection = collection if collection else uuid.uuid4().hex
self.func = func
self.rhs = rhs
self.n_arg_out = n_arg_out
self.output_format = output_format
self.client_id = client_id if client_id else ''
self.state = 'READING'
self.result = []
self.last_modified_seq = _server_seq
def execute(self):
global _server_seq
if self.state == 'CANCELLED':
return
self.state = 'PROCESSING'
_server_seq += 1
self.last_modified_seq = _server_seq
try:
self.result = _execute_function(self.func, self.rhs, self.n_arg_out, self.output_format)
_server_seq += 1
self.last_modified_seq = _server_seq
self.state = 'READY'
except Exception:
_server_seq += 1
self.last_modified_seq = _server_seq
self.state = 'ERROR'
def cancel(self):
global _server_seq
_server_seq += 1
self.last_modified_seq = _server_seq
self.state = 'CANCELLED'
def get_representation(self):
return {
'id': self.id,
'self': '/~' + self.collection + '/requests/' + self.id,
'up': '/~' + self.collection + '/requests',
'lastModifiedSeq': self.last_modified_seq,
'state': self.state,
'client': self.client_id
}
def _iterify(x):
if isinstance(x, collections.Sequence) and type(x) != str:
return x
return (x,)
def register_function(archive, func):
global _server_seq
_server_seq += 1
if archive not in _archives:
_archives[archive] = {
'uuid': archive[:12] + '_' + uuid.uuid4().hex,
'functions': {}
}
_archives[archive]['functions'][func.__name__] = func
def register_module(module):
functions = list(map(lambda x: x[1], filter(lambda x: inspect.isroutine(x[1]) and not inspect.isbuiltin(x[1]),
inspect.getmembers(module))))
for func in functions:
m_name = module.__name__.split('.')[-1]
register_function(m_name, func)
def autoload_package(pkg_name):
modules = list(map(lambda m: importlib.import_module(pkg_name + '.' + m.name),
pkgutil.iter_modules([pkg_name])))
for module in modules:
register_module(module)
def _evaluate_type(annotation):
if type(annotation) == np.ndarray:
typ = _type_map[annotation.dtype.__str__()]
size = []
for d in annotation.shape:
size.append(d if d != 0 else 'X')
else:
typ = _type_map[annotation]
size = [1, 1]
if typ == 'char':
size = [1, 'X']
return typ, size
@_app.route('/api/discovery', methods=['GET'])
def _discovery():
response = {
'discoverySchemaVersion': '1.0.0',
'archives': {}
}
vi = sys.version_info
py_version = str(vi[0]) + '.' + str(vi[1]) + '.' + str(vi[2])
for archive_key, archive in _archives.items():
arch_response = {
'archiveSchemaVersion': '1.0.0',
'archiveUuid': archive['uuid'],
'functions': {},
'matlabRuntimeVersion': py_version
}
for func_name, func in archive['functions'].items():
assert len(func.__annotations__), 'All functions must be annotated!'
assert 'return' in func.__annotations__, 'Return type must be annotated!'
arch_response['functions'][func_name] = {
'signatures': [{
'help': func.__doc__,
'inputs': [],
'outputs': []
}]
}
for i, output in enumerate(_iterify(func.__annotations__['return'])):
typ, size = _evaluate_type(output)
arch_response['functions'][func.__name__]['signatures'][0]['outputs'].append({
'mwsize': size,
'mwtype': typ,
'name': 'out' + str(i+1)
})
for par_name in list(inspect.signature(func).parameters):
typ, size = _evaluate_type(func.__annotations__[par_name])
arch_response['functions'][func.__name__]['signatures'][0]['inputs'].append({
'mwsize': size,
'mwtype': typ,
'name': par_name
})
response['archives'][archive_key] = arch_response
return flask.jsonify(response)
def _sync_request(archive_name, function_name, request_body):
global _server_seq
_server_seq += 1
params = request_body['rhs']
n_arg_out = request_body['nargout'] if 'nargout' in request_body else -1
output_format = request_body['outputFormat'] if 'outputFormat' in request_body else None
try:
func = _archives[archive_name]['functions'][function_name]
result = _execute_function(func, params, n_arg_out, output_format)
except KeyError:
return '404 FunctionNotFound', 404
return flask.jsonify({'lhs': result})
def _async_request(archive_name, function_name, request_body, client_id=None):
global _server_seq
_server_seq += 1
try:
func = _archives[archive_name]['functions'][function_name]
except KeyError:
return '404 ResourceNotFound', 404
params = request_body['rhs']
n_arg_out = request_body['nargout'] if 'nargout' in request_body else -1
output_format = request_body['outputFormat'] if 'outputFormat' in request_body else None
async_call = _AsyncFunctionCall(func, params, n_arg_out, output_format, client_id)
_async_requests[async_call.collection][async_call.id] = async_call
response = async_call.get_representation()
thread = threading.Thread(target=async_call.execute)
thread.start()
return flask.jsonify(response), 201
@_app.route('/<archive_name>/<function_name>', methods=['POST'])
def _call_request(archive_name, function_name):
mode = flask.request.args.get('mode', False)
if mode and mode == 'async':
client_id = flask.request.args.get('client', None)
return _async_request(archive_name, function_name, flask.json.loads(flask.request.data), client_id)
else:
return _sync_request(archive_name, function_name, flask.json.loads(flask.request.data))
@_app.route('/<collection_id>/requests', methods=['GET'])
def _get_collection(collection_id):
if collection_id[0] == '~':
collection_id = collection_id[1:]
since = flask.request.args.get('since', None)
if not since:
return '400 MissingParamSince', 400
clients = flask.request.args.get('clients', None)
clients = clients.split(',') if clients else None
ids = flask.request.args.get('ids', None)
ids = ids.split(',') if ids else None
if not clients and not ids:
return '400 MissingQueryParams', 400
try:
response = {
'createdSeq': _server_seq,
'data': []
}
for request in _async_requests[collection_id].values():
if ids and request.id in ids or clients and request.client_id in clients:
response['data'].append(request.get_representation())
return flask.jsonify(response)
except KeyError:
return '', 404
@_app.route('/<collection_id>/requests/<request_id>', methods=['GET'])
def _get_request_representation(collection_id, request_id):
if collection_id[0] == '~':
collection_id = collection_id[1:]
try:
response = _async_requests[collection_id][request_id].get_representation()
return flask.jsonify(response)
except KeyError:
return '404 ResourceNotFound', 404
@_app.route('/<collection_id>/requests/<request_id>/info', methods=['GET'])
def _get_request_status(collection_id, request_id):
if collection_id[0] == '~':
collection_id = collection_id[1:]
try:
request = _async_requests[collection_id][request_id]
response = {
'request': '/~' + request.collection + '/requests/' + request.id,
'lastModifiedSeq': request.last_modified_seq,
'state': request.state
}
return flask.jsonify(response)
except KeyError:
return '404 ResourceNotFound', 404
@_app.route('/<collection_id>/requests/<request_id>/result', methods=['GET'])
def _get_request_result(collection_id, request_id):
if collection_id[0] == '~':
collection_id = collection_id[1:]
try:
request = _async_requests[collection_id][request_id]
if request.state == 'CANCELLED':
return '410 RequestAlreadyCancelled', 410
if request.state == 'ERROR':
return '500 InternalServerError', 500
if request.state == 'READY':
return flask.jsonify({'lhs': request.result})
except KeyError:
return '404 RequestNotFound', 404
return '500 InternalServerError', 500
@_app.route('/<collection_id>/requests/<request_id>/cancel', methods=['POST'])
def _cancel_request(collection_id, request_id):
if collection_id[0] == '~':
collection_id = collection_id[1:]
try:
request = _async_requests[collection_id][request_id]
if request.state == 'CANCELLED':
return '410 RequestAlreadyCancelled', 410
if request.state == 'READY':
return '410 RequestAlreadyCompleted', 410
if request.state == 'ERROR':
return '500 InternalServerError', 500
request.cancel()
return '204 No Content', 204
except KeyError:
return '404 RequestNotFound', 404
return '500 InternalServerError', 500
@_app.route('/<collection_id>/requests/<request_id>', methods=['DELETE'])
def _delete_request(collection_id, request_id):
if collection_id[0] == '~':
collection_id = collection_id[1:]
try:
request = _async_requests[collection_id][request_id]
if request.state not in ['READY', 'ERROR', 'CANCELLED']:
return '409 RequestNotCompleted', 409
del _async_requests[collection_id][request_id]
return '204 No Content', 204
except KeyError:
return '404 RequestNotFound', 404
return '500 InternalServerError', 500
def run(ip='0.0.0.0', port='8080'):
_app.run(ip, port)
|
generate_train_5.py
|
import json
import os
from multiprocessing import Process
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def cut_videos(cmds, i):
for i in range(len(cmds)):
cmd = cmds[i]
print(i, cmd)
os.system(cmd)
return
cmds = [c.strip() for c in open("loveu_1s_cmds_5.txt").readlines()]
cpu_core = 25
mp_num = max(1, int(len(cmds)/cpu_core))
splits = list(chunks(cmds, mp_num))
ps = []
for i in range(len(splits)):
p = Process(target=cut_videos, args=(splits[i], i))
p.start()
ps.append(p)
for p in ps:
p.join()
|
multiprocess1.py
|
'''
使用Process类创建多个进程
'''
# 通过下面程序的执行结果可以验证 父进程在创建子进程时复制了进程及其数据结构
# 每个进程都有自己独立的内存空间 所以进程之间共享数据只能通过IPC的方式
from multiprocessing import Process, Queue
from time import sleep
def sub_task(string, q):
number = q.get()
while number:
print('%d: %s' % (number, string))
sleep(0.001)
number = q.get()
def main():
q = Queue(10)
for number in range(1, 11):
q.put(number)
Process(target=sub_task, args=('Ping', q)).start()
Process(target=sub_task, args=('Pong', q)).start()
if __name__ == '__main__':
main()
|
train.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
#from pytorch_pretrained_bert import BertConfig
from transformers import BertConfig
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
# config = BertConfig.from_json_file(args.bert_config_path)
config = BertConfig.from_pretrained('bert-base-multilingual-cased')
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
# config = BertConfig.from_json_file(args.bert_config_path)
config = BertConfig.from_pretrained('bert-base-multilingual-cased')
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter,step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = Summarizer(args, device, load_pretrained_bert=True)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline'])
parser.add_argument("-mode", default='train', type=str, choices=['train','validate','test'])
parser.add_argument("-bert_data_path", default='../bert_data/cnndm')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=512, type=int)
parser.add_argument("-heads", default=4, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if(args.world_size>1):
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
|
_cancel_many_calls_test.py
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test making many calls and immediately cancelling most of them."""
import threading
import unittest
from grpc._cython import cygrpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
_EMPTY_FLAGS = 0
_EMPTY_METADATA = cygrpc.Metadata(())
_SERVER_SHUTDOWN_TAG = 'server_shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TAG = 'receive_close_on_server'
_RECEIVE_MESSAGE_TAG = 'receive_message'
_SERVER_COMPLETE_CALL_TAG = 'server_complete_call'
_SUCCESS_CALL_FRACTION = 1.0 / 8.0
class _State(object):
def __init__(self):
self.condition = threading.Condition()
self.handlers_released = False
self.parked_handlers = 0
self.handled_rpcs = 0
def _is_cancellation_event(event):
return (
event.tag is _RECEIVE_CLOSE_ON_SERVER_TAG and
event.batch_operations[0].received_cancelled)
class _Handler(object):
def __init__(self, state, completion_queue, rpc_event):
self._state = state
self._lock = threading.Lock()
self._completion_queue = completion_queue
self._call = rpc_event.operation_call
def __call__(self):
with self._state.condition:
self._state.parked_handlers += 1
if self._state.parked_handlers == test_constants.THREAD_CONCURRENCY:
self._state.condition.notify_all()
while not self._state.handlers_released:
self._state.condition.wait()
with self._lock:
self._call.start_batch(
cygrpc.Operations(
(cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
_RECEIVE_CLOSE_ON_SERVER_TAG)
self._call.start_batch(
cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_RECEIVE_MESSAGE_TAG)
first_event = self._completion_queue.poll()
if _is_cancellation_event(first_event):
self._completion_queue.poll()
else:
with self._lock:
operations = (
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_message(b'\x79\x57', _EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
_EMPTY_FLAGS),
)
self._call.start_batch(
cygrpc.Operations(operations), _SERVER_COMPLETE_CALL_TAG)
self._completion_queue.poll()
self._completion_queue.poll()
def _serve(state, server, server_completion_queue, thread_pool):
for _ in range(test_constants.RPC_CONCURRENCY):
call_completion_queue = cygrpc.CompletionQueue()
server.request_call(
call_completion_queue, server_completion_queue, _REQUEST_CALL_TAG)
rpc_event = server_completion_queue.poll()
thread_pool.submit(_Handler(state, call_completion_queue, rpc_event))
with state.condition:
state.handled_rpcs += 1
if test_constants.RPC_CONCURRENCY <= state.handled_rpcs:
state.condition.notify_all()
server_completion_queue.poll()
class _QueueDriver(object):
def __init__(self, condition, completion_queue, due):
self._condition = condition
self._completion_queue = completion_queue
self._due = due
self._events = []
self._returned = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._due.remove(event.tag)
self._condition.notify_all()
if not self._due:
self._returned = True
return
thread = threading.Thread(target=in_thread)
thread.start()
def events(self, at_least):
with self._condition:
while len(self._events) < at_least:
self._condition.wait()
return tuple(self._events)
class CancelManyCallsTest(unittest.TestCase):
def testCancelManyCalls(self):
server_thread_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
server_completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server()
server.register_completion_queue(server_completion_queue)
port = server.add_http2_port('[::]:0')
server.start()
channel = cygrpc.Channel('localhost:{}'.format(port))
state = _State()
server_thread_args = (
state, server, server_completion_queue, server_thread_pool,)
server_thread = threading.Thread(target=_serve, args=server_thread_args)
server_thread.start()
client_condition = threading.Condition()
client_due = set()
client_completion_queue = cygrpc.CompletionQueue()
client_driver = _QueueDriver(
client_condition, client_completion_queue, client_due)
client_driver.start()
with client_condition:
client_calls = []
for index in range(test_constants.RPC_CONCURRENCY):
client_call = channel.create_call(
None, _EMPTY_FLAGS, client_completion_queue, b'/twinkies', None,
_INFINITE_FUTURE)
operations = (
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_message(b'\x45\x56', _EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),
)
tag = 'client_complete_call_{0:04d}_tag'.format(index)
client_call.start_batch(cygrpc.Operations(operations), tag)
client_due.add(tag)
client_calls.append(client_call)
with state.condition:
while True:
if state.parked_handlers < test_constants.THREAD_CONCURRENCY:
state.condition.wait()
elif state.handled_rpcs < test_constants.RPC_CONCURRENCY:
state.condition.wait()
else:
state.handlers_released = True
state.condition.notify_all()
break
client_driver.events(
test_constants.RPC_CONCURRENCY * _SUCCESS_CALL_FRACTION)
with client_condition:
for client_call in client_calls:
client_call.cancel()
with state.condition:
server.shutdown(server_completion_queue, _SERVER_SHUTDOWN_TAG)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
app.py
|
import nighttimeParenting as infra
import time
from threading import Thread, Lock, Event
from datetime import datetime
import smtplib, ssl
# TODO:
# have to figure out stuff for analytics
######################## global variables ############################
# taken from parenting.firstcry.com
messages = ["You are doing great; it will be over soon, hang in there!",
"Keep calm, hold your breath, and change this diaper.",
"3 am. Party in my crib, be there. Bring your own diaper.",
"Poops, I did it again, I made you believe that this could be pee.",
"Houston, we have a problem... It is code brown."]
# create empty log
log = []
# set state to zero by default (display encouring messages)
toggleMessage = True
# create objects
m = infra.micCircuit()
sd = infra.StereoDecoder()
oled = infra.OLED()
lBar = infra.ledBar()
hrs = infra.HRSensor()
phyUI = infra.PhysicalUI(sd, oled, lBar)
# creates locks
i2cL = Lock()
spiL = Lock()
displayL = Lock()
# create events
wakeup = Event()
asleep = Event()
stressHigh = Event()
enableBreathing = Event()
enableMusic = Event()
enableMessages = Event()
######################## supporting functions ########################
# sends email to caregiver
def sendEmail(message):
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port) as server:
server.set_debuglevel(1)
#server.ehlo()
#server.starttls(context=context)
server.ehlo()
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
#################### tasks that run in background ####################
# monitors audio level in bedrooom
def monitorBaby():
# time interval to 10 seconds
timeInt = 2
# trigger value (pk-pk) set to 35
trigVal = 32
# constantly monitor audio levels
while True:
with spiL:
isTriggered = m.trigger(trigVal, timeInt)
# set wakeup event in motion if threshold is broken over 5 times
if isTriggered:
wakeup.set()
asleep.wait()
time.sleep(2)
# calculate stress level of caregiver
def calculateStessLevel():
while True:
# get stress level
with i2cL:
stressLevel = hrs.getHR_SPO2()
# determine if stress level is high
if (stressLevel[0] != None and stressLevel[1] != None):
# calculate average bpm and sp02
BPM = stressLevel[0]
Spo2 = stressLevel[1]
print("BPM: " + str(BPM) + " Spo2: " + str(Spo2))
if (BPM >= 110 and Spo2 < 95):
stressHigh.set()
enableBreathing.set()
enableMessages.set()
enableMusic.set()
# read bpm and spo2 every minute
time.sleep(60)
############### tasks that run in response to stress level ##############
# sends email warning stress levels are high
def notifyStessLevels():
while True:
stressHigh.wait()
message = """Subject: Stress Level Elevated!\n
BPM above 110 and SPO2 below 95%."""
sendEmail(message)
stressHigh.clear()
# displays encouriging messages
def messageDisplay():
while True:
enableMessages.wait()
if toggleMessage:
for mes in messages:
with displayL:
with i2cL:
oled.clearDisplay()
oled.printMessage(mes)
time.sleep(10)
# displays time
def timeDisplay():
while True:
if not toggleMessage:
with displayL:
with i2cL:
oled.clearDisplay()
oled.displayTime()
time.sleep(60) # new time display every minute
# updates breathing
def updateBreathing():
while True:
enableBreathing.wait()
for val in [0b0000000000, 0b0010000000, 0b0011000000, 0b0011100000, 0b0011110000,
0b0011111000, 0b0011111100, 0b0011111110, 0b0011111111, 0b0111111111, 0b1111111111]:
with spiL:
lBar.set_bar_level(val)
time.sleep(0.5)
for val in [0b1111111111, 0b0111111111, 0b0011111111, 0b0011111110, 0b0011111100,
0b0011111000, 0b0011110000, 0b0011100000, 0b0011000000, 0b0010000000, 0b0000000000]:
with spiL:
lBar.set_bar_level(val)
time.sleep(0.5)
# turns on soothing music
def playMusic():
enableMusic.wait()
sd.play()
############### tasks that run in response to baby wakeup ##############
# adds to log and sends email when wakeup event occurs
def wakeupEvent():
global log
wakeup.wait()
wakeupTime = datetime.now().strftime("%B %d, %Y %I:%M:%S %p")
log.append(wakeupTime)
message = """Subject: Your baby is Awake!\n
Your baby woke up at approximately """ + wakeupTime + "."
sendEmail(message)
wakeup.clear()
############### tasks that run in response to stress browser UI ##############
# pauses messages
def pauseMessages():
global toggleMessage
toggleMessage = False
# resumes messages
def resumeMessages():
global toggleMessage
toggleMessage = True
# pauses music
def pauseMusic():
sd.pause()
# resumes music
def resumeMusic():
sd.unpause()
# adjusts volume
def adjustVolume(volLevel):
with spiL:
sd.setVol(volLevel)
# adds message
def addMessage(mes):
global messages
messages.append(mes)
# deletes specified message
# or pops last message
def deleteMessage(mes=''):
global messages
if mes != '':
messages.remove(mes)
else:
messages.pop()
############### tasks that run in response to physical UI ##############
# updates brightness
def updateBrightness():
while True:
with spiL:
currBrightness = phyUI.getBrightness()
with i2cL:
phyUI.setBrightness(currBrightness)
time.sleep(3)
# updates volume
def updateVolume():
while True:
with spiL:
phyUI.toggleVolume()
time.sleep(3)
# send SOS message to parent
def sendSOS():
while True:
if phyUI.triggerSOS():
# send email
print("button")
message = """Subject: SOS\n
In dire need of assistance! Please come help!"""
sendEmail(message)
# show confirmation on display
confirmMes = "Email sent successfully!"
with displayL:
with i2cL:
oled.clearDisplay()
oled.printMessage(confirmMes)
time.sleep(3) # let it appear on screen for 3 seconds
if __name__ == "__main__":
# set up server for email
port = 465 #587 # For starttls
smtp_server = "smtp.mail.yahoo.com" #"smtp.gmail.com"
#sender_email = "apm532@nyu.edu"
receiver_email = "tadific487@angeleslid.com"
password = "spqcgqenfthwonyz"
sender_email = "tzali.goldberg@yahoo.com"
# receiver_email = input("Type your email and press enter: ")
# password = input("Type your password and press enter: ")
# thread everything except browser UI
t1 = Thread(target=monitorBaby)
t1.start()
t2 = Thread(target=calculateStessLevel)
t2.start()
t3 = Thread(target=notifyStessLevels)
t3.start()
t4 = Thread(target=messageDisplay)
t4.start()
t5 = Thread(target=timeDisplay)
t5.start()
t6 = Thread(target=updateBreathing)
t6.start()
t7 = Thread(target=playMusic)
t7.start()
t8 = Thread(target=wakeupEvent)
t8.start()
t9 = Thread(target=updateBrightness)
t9.start()
t10 = Thread(target=updateVolume)
t10.start()
t11 = Thread(target=sendSOS)
t11.start()
# join threads
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
t6.join()
t7.join()
t8.join()
t9.join()
t10.join()
t11.join()
|
test_queue.py
|
# Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
import itertools
import random
import threading
import time
import unittest
import weakref
from test import support
py_queue = support.import_fresh_module('queue', blocked=['_queue'])
c_queue = support.import_fresh_module('queue', fresh=['_queue'])
need_c_queue = unittest.skipUnless(c_queue, "No _queue module found")
QUEUE_SIZE = 5
def qfull(q):
return q.maxsize > 0 and q.qsize() == q.maxsize
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin:
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
thread = _TriggerThread(trigger_func, trigger_args)
thread.start()
try:
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not thread.startedEvent.is_set():
self.fail("blocking function %r appeared not to block" %
block_func)
return self.result
finally:
support.join_thread(thread) # make sure the thread terminates
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self,block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
thread = _TriggerThread(trigger_func, trigger_args)
thread.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail("expected exception of kind %r" %
expected_exception_class)
finally:
support.join_thread(thread) # make sure the thread terminates
if not thread.startedEvent.is_set():
self.fail("trigger thread ended but event never set")
class BaseQueueTestMixin(BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def basic_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
self.assertTrue(q.empty())
self.assertFalse(q.full())
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue = [111, 333, 222],
LifoQueue = [222, 333, 111],
PriorityQueue = [111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertTrue(q.qsize(), "Queue should not be empty")
self.assertTrue(not qfull(q), "Queue should not be full")
last = 2 * QUEUE_SIZE
full = 3 * 2 * QUEUE_SIZE
q.put(last)
self.assertTrue(qfull(q), "Queue should be full")
self.assertFalse(q.empty())
self.assertTrue(q.full())
try:
q.put(full, block=0)
self.fail("Didn't appear to block with a full queue")
except self.queue.Full:
pass
try:
q.put(full, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except self.queue.Full:
pass
# Test a blocking put
self.do_blocking_test(q.put, (full,), q.get, ())
self.do_blocking_test(q.put, (full, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except self.queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except self.queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x < 0:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
threads = []
for i in (0,1):
thread = threading.Thread(target=self.worker, args=(q,))
thread.start()
threads.append(thread)
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0,1):
q.put(-1) # instruct the threads to close
q.join() # verify that you can join twice
for thread in threads:
thread.join()
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = self.type2test()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_basic(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.basic_queue_test(q)
self.basic_queue_test(q)
def test_negative_timeout_raises_exception(self):
q = self.type2test(QUEUE_SIZE)
with self.assertRaises(ValueError):
q.put(1, timeout=-1)
with self.assertRaises(ValueError):
q.get(1, timeout=-1)
def test_nowait(self):
q = self.type2test(QUEUE_SIZE)
for i in range(QUEUE_SIZE):
q.put_nowait(1)
with self.assertRaises(self.queue.Full):
q.put_nowait(1)
for i in range(QUEUE_SIZE):
q.get_nowait()
with self.assertRaises(self.queue.Empty):
q.get_nowait()
def test_shrinking_queue(self):
# issue 10110
q = self.type2test(3)
q.put(1)
q.put(2)
q.put(3)
with self.assertRaises(self.queue.Full):
q.put_nowait(4)
self.assertEqual(q.qsize(), 3)
q.maxsize = 2 # shrink the queue
with self.assertRaises(self.queue.Full):
q.put_nowait(4)
class QueueTest(BaseQueueTestMixin):
def setUp(self):
self.type2test = self.queue.Queue
super().setUp()
class PyQueueTest(QueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
class CQueueTest(QueueTest, unittest.TestCase):
queue = c_queue
class LifoQueueTest(BaseQueueTestMixin):
def setUp(self):
self.type2test = self.queue.LifoQueue
super().setUp()
class PyLifoQueueTest(LifoQueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
class CLifoQueueTest(LifoQueueTest, unittest.TestCase):
queue = c_queue
class PriorityQueueTest(BaseQueueTestMixin):
def setUp(self):
self.type2test = self.queue.PriorityQueue
super().setUp()
class PyPriorityQueueTest(PriorityQueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
class CPriorityQueueTest(PriorityQueueTest, unittest.TestCase):
queue = c_queue
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception): pass
class FailingQueueTest(BlockingTestMixin):
def setUp(self):
Queue = self.queue.Queue
class FailingQueue(Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException("You Lose")
return Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException("You Lose")
return Queue._get(self)
self.FailingQueue = FailingQueue
super().setUp()
def failing_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ("full",), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
q.get()
self.assertTrue(not qfull(q), "Queue should not be full")
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = self.FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
class PyFailingQueueTest(FailingQueueTest, unittest.TestCase):
queue = py_queue
@need_c_queue
class CFailingQueueTest(FailingQueueTest, unittest.TestCase):
queue = c_queue
class BaseSimpleQueueTest:
def setUp(self):
self.q = self.type2test()
def feed(self, q, seq, rnd):
while True:
try:
val = seq.pop()
except IndexError:
return
q.put(val)
if rnd.random() > 0.5:
time.sleep(rnd.random() * 1e-3)
def consume(self, q, results, sentinel):
while True:
val = q.get()
if val == sentinel:
return
results.append(val)
def consume_nonblock(self, q, results, sentinel):
while True:
while True:
try:
val = q.get(block=False)
except self.queue.Empty:
time.sleep(1e-5)
else:
break
if val == sentinel:
return
results.append(val)
def consume_timeout(self, q, results, sentinel):
while True:
while True:
try:
val = q.get(timeout=1e-5)
except self.queue.Empty:
pass
else:
break
if val == sentinel:
return
results.append(val)
def run_threads(self, n_feeders, n_consumers, q, inputs,
feed_func, consume_func):
results = []
sentinel = None
seq = inputs + [sentinel] * n_consumers
seq.reverse()
rnd = random.Random(42)
exceptions = []
def log_exceptions(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except BaseException as e:
exceptions.append(e)
return wrapper
feeders = [threading.Thread(target=log_exceptions(feed_func),
args=(q, seq, rnd))
for i in range(n_feeders)]
consumers = [threading.Thread(target=log_exceptions(consume_func),
args=(q, results, sentinel))
for i in range(n_consumers)]
with support.start_threads(feeders + consumers):
pass
self.assertFalse(exceptions)
self.assertTrue(q.empty())
self.assertEqual(q.qsize(), 0)
return results
def test_basic(self):
# Basic tests for get(), put() etc.
q = self.q
self.assertTrue(q.empty())
self.assertEqual(q.qsize(), 0)
q.put(1)
self.assertFalse(q.empty())
self.assertEqual(q.qsize(), 1)
q.put(2)
q.put_nowait(3)
q.put(4)
self.assertFalse(q.empty())
self.assertEqual(q.qsize(), 4)
self.assertEqual(q.get(), 1)
self.assertEqual(q.qsize(), 3)
self.assertEqual(q.get_nowait(), 2)
self.assertEqual(q.qsize(), 2)
self.assertEqual(q.get(block=False), 3)
self.assertFalse(q.empty())
self.assertEqual(q.qsize(), 1)
self.assertEqual(q.get(timeout=0.1), 4)
self.assertTrue(q.empty())
self.assertEqual(q.qsize(), 0)
with self.assertRaises(self.queue.Empty):
q.get(block=False)
with self.assertRaises(self.queue.Empty):
q.get(timeout=1e-3)
with self.assertRaises(self.queue.Empty):
q.get_nowait()
self.assertTrue(q.empty())
self.assertEqual(q.qsize(), 0)
def test_negative_timeout_raises_exception(self):
q = self.q
q.put(1)
with self.assertRaises(ValueError):
q.get(timeout=-1)
def test_order(self):
# Test a pair of concurrent put() and get()
q = self.q
inputs = list(range(100))
results = self.run_threads(1, 1, q, inputs, self.feed, self.consume)
# One producer, one consumer => results appended in well-defined order
self.assertEqual(results, inputs)
def test_many_threads(self):
# Test multiple concurrent put() and get()
N = 50
q = self.q
inputs = list(range(10000))
results = self.run_threads(N, N, q, inputs, self.feed, self.consume)
# Multiple consumers without synchronization append the
# results in random order
self.assertEqual(sorted(results), inputs)
def test_many_threads_nonblock(self):
# Test multiple concurrent put() and get(block=False)
N = 50
q = self.q
inputs = list(range(10000))
results = self.run_threads(N, N, q, inputs,
self.feed, self.consume_nonblock)
self.assertEqual(sorted(results), inputs)
def test_many_threads_timeout(self):
# Test multiple concurrent put() and get(timeout=...)
N = 50
q = self.q
inputs = list(range(1000))
results = self.run_threads(N, N, q, inputs,
self.feed, self.consume_timeout)
self.assertEqual(sorted(results), inputs)
def test_references(self):
# The queue should lose references to each item as soon as
# it leaves the queue.
class C:
pass
N = 20
q = self.q
for i in range(N):
q.put(C())
for i in range(N):
wr = weakref.ref(q.get())
self.assertIsNone(wr())
class PySimpleQueueTest(BaseSimpleQueueTest, unittest.TestCase):
queue = py_queue
def setUp(self):
self.type2test = self.queue._PySimpleQueue
super().setUp()
@need_c_queue
class CSimpleQueueTest(BaseSimpleQueueTest, unittest.TestCase):
queue = c_queue
def setUp(self):
self.type2test = self.queue.SimpleQueue
super().setUp()
def test_is_default(self):
self.assertIs(self.type2test, self.queue.SimpleQueue)
self.assertIs(self.type2test, self.queue.SimpleQueue)
def test_reentrancy(self):
# bpo-14976: put() may be called reentrantly in an asynchronous
# callback.
q = self.q
gen = itertools.count()
N = 10000
results = []
# This test exploits the fact that __del__ in a reference cycle
# can be called any time the GC may run.
class Circular(object):
def __init__(self):
self.circular = self
def __del__(self):
q.put(next(gen))
while True:
o = Circular()
q.put(next(gen))
del o
results.append(q.get())
if results[-1] >= N:
break
self.assertEqual(results, list(range(N + 1)))
if __name__ == "__main__":
unittest.main()
|
data_handler.py
|
from influxdb import InfluxDBClient
from source.device_manager.device_layer.sila_device import SilaDevice
from source.device_manager.device_layer.device_interface import DeviceInterface
from typing import List, Dict
import threading, time
import json
from threading import Thread
class DataHandler:
def __init__(self,
host: str,
port: int,
user: str = 'root',
password: str = 'root',
dbname: str = 'example'):
"""initialize the data handler
Args:
host : the host of the InfluxDBClient
port : port of the InfluxDB
user : username of InfluxDB
password : password of InfluxDB
dbname : database name
"""
self.host = host
self.port = port
self.user = user
self.password = password
self.dbname = dbname
self.client = InfluxDBClient(self.host, self.port, self.user, self.password,
self.dbname)
self.client.create_database(self.dbname)
def setup(self,get_devices: List[DeviceInterface]):
interval10=interval50=interval100=[]
for device in get_devices:
interval10.append([k for k,v in device.properties_interval.items() if v == 10])
interval50.append([k for k,v in device.properties_interval.items() if v == 50])
interval100.append([k for k,v in device.properties_interval.items() if v == 100])
process = Thread(target=self.run, args=[get_devices, interval10, 10])
process.start()
process2 = Thread(target=self.run, args=[get_devices, interval50, 50])
process2.start()
process3 = Thread(target=self.run, args=[get_devices, interval100, 100])
process3.start()
def run(self,get_devices: List[DeviceInterface] , interval_list, interval):
"""Will store all the data of all available device to the time series database
Args:https://discord.com/channels/709354436090396712/714844196300783626
get_devices: List of Devices
interval_list: List of property that we want to run at specific interval
interval: the interval time between every store to the database
"""
data = {}
for device in get_devices:
if device.get_status == "running":
# if(device.type=="Sila")
for features in device.get_feature_names():
for property in device.get_properties(features):
if property in interval_list:
data['measurement'] = property
data['tags'] = {'device': device, 'features': features}
data['time'] = time.time()
data['fields'] = {'value': device.call_property(features, property)}
json_body = json.dumps(data)
self.client.write_points(json_body)
threading.Timer(interval, self.run, [get_devices, interval_list, interval]).start()
@staticmethod
def get_logs(device):
"""Returns the specified device latest log message
Args:
device : The Sila device that we want to get the log for
Returns:
list containing LogLevel,time and the massage
"""
sila = SilaDevice(device.ip, device.port, device.uuid, device.name)
return sila.call_property('DeviceController', 'GetLog_Result')
|
multientityInterface_Tests.py
|
# Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 27, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
from multiprocessing import Process
from unittest import TestLoader, TextTestRunner, TestCase
from pyocni.TDD.fake_Data.server_Mock import ocni_server
import pycurl
import time
import StringIO
from pyocni.TDD.fake_Data.initialize_fakeDB import init_fakeDB
import pyocni.TDD.fake_Data.entities as f_entities
import pyocni.pyocni_tools.config as config
def start_server():
ocni_server_instance = ocni_server()
ocni_server_instance.run_server()
class test_post(TestCase):
"""
Tests POST request scenarios
"""
def setUp(self):
"""
Set up the test environment
"""
self.p = Process(target=start_server)
self.p.start()
time.sleep(0.5)
#init_fakeDB()
time.sleep(0.5)
def tearDown(self):
#config.purge_PyOCNI_db()
self.p.terminate()
def test_register_entities(self):
"""
register resources & links
"""
storage = StringIO.StringIO()
c = pycurl.Curl()
c.setopt(c.URL, 'http://127.0.0.1:8090/compute/')
c.setopt(c.HTTPHEADER, ['Accept:text/plain', 'Content-Type: application/occi+json'])
c.setopt(c.VERBOSE, True)
c.setopt(pycurl.POSTFIELDS, f_entities.link)
c.setopt(c.CUSTOMREQUEST, 'POST')
c.setopt(c.WRITEFUNCTION, storage.write)
c.perform()
content = storage.getvalue()
print " ========== Body content ==========\n " + content + " \n ==========\n"
class test_get(TestCase):
"""
Tests GET request scenarios
"""
def setUp(self):
"""
Set up the test environment
"""
self.p = Process(target=start_server)
self.p.start()
time.sleep(0.5)
def tearDown(self):
self.p.terminate()
def test_get_entities(self):
"""
get resources & links
"""
storage = StringIO.StringIO()
c = pycurl.Curl()
c.setopt(c.URL, "http://127.0.0.1:8090/compute/")
c.setopt(c.HTTPHEADER, ['Content-type: application/occi+json', 'Accept:application/occi+json'])
c.setopt(c.VERBOSE, True)
c.setopt(c.CUSTOMREQUEST, 'GET')
c.setopt(c.WRITEFUNCTION, storage.write)
c.setopt(c.POSTFIELDS,f_entities.j_occi_att)
c.perform()
content = storage.getvalue()
print " ===== Body content =====\n " + content + " ==========\n"
class test_put(TestCase):
"""
Tests PUT request scenarios
"""
def setUp(self):
"""
Set up the test environment
"""
self.p = Process(target=start_server)
self.p.start()
time.sleep(0.5)
def tearDown(self):
self.p.terminate()
def test_associate_mixins(self):
"""
"""
storage = StringIO.StringIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, 'http://127.0.0.1:8090/template/resource/medium/')
c.setopt(pycurl.HTTPHEADER, ['Accept: application/occi+json'])
c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/occi+json'])
c.setopt(pycurl.CUSTOMREQUEST, 'PUT')
c.setopt(pycurl.POSTFIELDS, fake_data.put_on_mixin_path)
c.setopt(pycurl.USERPWD, 'user_1:password')
c.setopt(c.WRITEFUNCTION, storage.write)
c.perform()
content = storage.getvalue()
print " ===== Body content =====\n " + content + " ==========\n"
class test_delete(TestCase):
"""
Tests DELETE request scenarios
"""
def setUp(self):
"""
Set up the test environment
"""
self.p = Process(target=start_server)
self.p.start()
time.sleep(0.5)
def tearDown(self):
self.p.terminate()
def test_dissociate_mixins(self):
"""
"""
storage = StringIO.StringIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, 'http://127.0.0.1:8090/template/resource/medium/')
c.setopt(pycurl.HTTPHEADER, ['Accept: application/occi+json'])
c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/occi+json'])
c.setopt(pycurl.CUSTOMREQUEST, 'DELETE')
c.setopt(c.WRITEFUNCTION, storage.write)
c.perform()
content = storage.getvalue()
print " ===== Body content =====\n " + content + " ==========\n"
if __name__ == '__main__':
#Create the testing tools
loader = TestLoader()
runner = TextTestRunner(verbosity=2)
#Create the testing suites
get_suite = loader.loadTestsFromTestCase(test_get)
delete_suite = loader.loadTestsFromTestCase(test_delete)
put_suite = loader.loadTestsFromTestCase(test_put)
post_suite = loader.loadTestsFromTestCase(test_post)
#Run tests
#runner.run(get_suite)
runner.run(post_suite)
|
ci_build.py
|
from optparse import OptionParser
from dependencies import read_json_dependencies_from_filename
import dependencies
import os
import platform
import threading
import sys
import subprocess
import shutil
import getpass
from userlocks import userlock
from default_platform import default_platform as _default_platform
from functools import wraps
import version
import filechecker
DEFAULT_STEPS = "default"
ALL_STEPS = "all"
ILLEGAL_STEP_NAMES = [DEFAULT_STEPS, ALL_STEPS]
def get_vsvars_environment(architecture="x86"):
"""
Returns a dictionary containing the environment variables set up by vsvars32.bat
architecture - Architecture to pass to vcvarsall.bat. Normally "x86" or "amd64"
win32-specific
"""
comntoolsVarNames = ['VS100COMNTOOLS', 'VS110COMNTOOLS', 'VS120COMNTOOLS']
for varName in comntoolsVarNames:
vscomntools = os.getenv(varName)
if vscomntools is not None:
break
if vscomntools is None:
raise Exception('Couldn\'t find COMNTOOLS environment variable (tried %s)' % ', '.join(comntoolsVarNames))
vsvars32 = os.path.join(vscomntools, '..', '..', 'VC', 'vcvarsall.bat')
python = sys.executable
process = subprocess.Popen('("%s" %s>nul)&&"%s" -c "import os; print repr(os.environ)"' % (vsvars32, architecture, python), stdout=subprocess.PIPE, shell=True)
stdout, _ = process.communicate()
exitcode = process.wait()
if exitcode != 0:
raise Exception("Got error code %s from subprocess!" % exitcode)
return eval(stdout.strip())
def default_platform(fail_on_unknown=True):
p = _default_platform()
if p is None and fail_on_unknown:
fail('No platform specified and unable to guess.')
return p
def delete_directory(path, logfile=None):
if logfile is None:
logfile = open(os.devnull, "w")
path = os.path.abspath(path)
logfile.write('Deleting "'+path+'"... ')
shutil.rmtree(path, ignore_errors=True)
if os.path.isdir(path):
logfile.write('\nFailed.\n')
raise Exception('Failed to delete "%s"' % path)
logfile.write('\nDone.\n')
class BuildStep(object):
def __init__(self, name, action):
if name in ILLEGAL_STEP_NAMES:
fail("'{0}' is not allowed as a build step name.".format(name))
self.name = name
self.condition_sets = []
self.is_optional = False
self.is_enabled_by_default = True
self.action = action
def add_conditions(self, condition_set):
self.condition_sets.append(condition_set)
def set_default(self, enabled_by_default):
self.is_enabled_by_default = enabled_by_default
def set_optional(self, optional):
self.is_optional = optional
def test_conditions(self, env):
if len(self.condition_sets) == 0:
return True
for conditions in self.condition_sets:
if all(key in env and env[key]==value for (key, value) in conditions.items()):
return True
return False
def run(self, context):
return self.action(context)
class BuildContext(object):
pass
def flatten_string_list(arglist):
"""
Assemble a list of string, such as for a subprocess call.
Input should be a string or a list containing only
strings or similar lists.
Output will be a list containing only strings.
"""
if isinstance(arglist, (str, unicode)):
return [arglist]
return sum([flatten_string_list(x) for x in arglist], [])
def flatten_comma_list(arglist):
return sum([s.split(",") for s in arglist], [])
def process_kwargs(func_name, kwarg_dict, defaults_dict):
result = dict(defaults_dict)
for key, value in kwarg_dict.items():
if key in result:
result[key] = value
else:
raise TypeError("{0}() got an unexpected keyword argument '{1}'".format(func_name, key))
return result
NOT_SPECIFIED = object()
class CaseInsensitiveEnvironmentCopy(dict):
def __contains__(self, key):
return dict.__contains__(self, key.upper())
def __getitem__(self, key):
return dict.__getitem__(self, key.upper())
def __setitem__(self, key, value):
return dict.__setitem__(self, key.upper(), value)
def __init__(self, *args):
if len(args)==0:
dict.__init__(self)
elif len(args)==1:
dict.__init__(self, [(k.upper(), v) for (k,v) in args[0].items()])
else:
raise ValueError()
def get(self, key, default=None):
return dict.get(self, key.upper(), default)
def has_key(self, key):
return dict.has_key(self, key.upper())
def pop(self, key, *args):
return dict.pop(self, key.upper(), *args)
def setdefault(self, key, *args):
return dict.setdefault(self, key.upper(), *args)
def update(self, *args, **kwargs):
if len(args)==0:
primary={}
elif len(args)==1:
primary=CaseInsensitiveEnvironmentCopy(args[0])
else:
raise ValueError()
secondary=CaseInsensitiveEnvironmentCopy(kwargs)
return dict.update(self, primary, **secondary)
# This is the same mechanism Python uses to decide if os.environ is case-
# sensitive:
if os.name in ['nt', 'os2']:
EnvironmentCopy = CaseInsensitiveEnvironmentCopy
else:
EnvironmentCopy = dict
def callable_to_function(f):
'''
Take a callable object, such as a function or instance method,
and wrap it in a function. This is necessary if you want to
annotate it with extra attributes and it might be an instance
method.
'''
@wraps(f)
def f_prime(*args, **kwargs):
f(*args, **kwargs)
return f_prime
class Builder(object):
def __init__(self):
self._steps = []
self._optionParser = OptionParser()
self.add_bool_option("-v", "--verbose")
self.add_bool_option("--no-overrides", help="When fetching dependencies, don't read from a local overrides file.")
self.add_bool_option("--incremental-fetch", help="Force incremental fetch, over-riding clean flag.")
self._enabled_options = set()
self._disabled_options = set()
self._disable_all_options = False
self._enable_all_options = False
#self._context = BuildContext()
def has_steps(self):
return len(self._steps) > 0
def create_build_step(self, f, name):
if hasattr(f, "buildstep"):
return f
f = callable_to_function(f)
f.buildstep = BuildStep(name or f.__name__, f)
self._steps.append(f.buildstep)
return f
def build_condition(self, name=None, **conditions):
"""Decorator applied to functions in the build_behaviour file."""
def decorator_func(f):
f = self.create_build_step(f, name=name)
f.buildstep.add_conditions(conditions)
return f
return decorator_func
def build_step(self, name=None, optional=False, default=True):
def decorator_func(f):
f = self.create_build_step(f, name=name)
f.buildstep.set_optional(optional)
f.buildstep.set_default(default)
return f
return decorator_func
def get_optional_steps(self):
return (step.name for step in self._steps if self.is_optional)
def specify_optional_steps(self, *steps):
'''
Specify which optional steps to include in the build.
"default" includes all default steps.
"all" includes all steps.
"foo" or "+foo" includes step foo.
"-foo" excludes step foo, even if "default" or "all" is present.
'''
steps = flatten_string_list(steps)
steps = flatten_comma_list(steps)
self._enable_all_options = ALL_STEPS in steps
#self._enable_default_options = DEFAULT_STEPS in steps
self._disable_all_options = DEFAULT_STEPS not in steps and ALL_STEPS not in steps
self._disabled_options = set(s[1:] for s in steps if s.startswith("-"))
self._enabled_options = set(s[1:] for s in steps if s.startswith("+"))
self._enabled_options = self._enabled_options.union(
s for s in steps if s[0] not in "+-")
def modify_optional_steps(self, *steps):
'''
Add or remove optional steps in the build.
"+foo" include step foo.
"-foo" exclude step foo.
'''
for name in steps:
if name.startswith("+"):
name = name[1:]
self._disabled_options.discard(name)
self._enabled_options.add(name)
elif name.startswith("-"):
name = name[1:]
self._enabled_options.discard(name)
self._disabled_options.add(name)
else:
raise TypeError("Each step must be a string beginning with '+' or '-'.")
def select_optional_steps(self, *args, **kwargs):
'''
Deprecated. Use specify_optional_steps or modify_optional_steps instead.
'''
kwargs = process_kwargs(
"select_optional_steps",
kwargs,
{"disable_others":False})
if kwargs["disable_others"]:
self._enabled_options.clear()
self._disable_all_options = True
args = flatten_string_list(args)
args = flatten_comma_list(args)
self.modify_optional_steps(*args)
def run(self, argv=None):
self._context = BuildContext()
options, args = self._optionParser.parse_args(argv)
self._context.options = options
self._context.args = args
self._context.env = EnvironmentCopy(os.environ)
for step in self._steps:
if step.test_conditions(self._context.env):
enabled = True
reason = "required"
if step.is_optional:
enabled = step.is_enabled_by_default
reason = "default" if enabled else "not default"
if self._enable_all_options:
enabled = True
reason = "all selected"
if self._disable_all_options:
enabled = False
reason = "not selected"
if step.name in self._enabled_options:
enabled = True
reason = "selected"
if step.name in self._disabled_options:
enabled = False
reason = "deselected"
if enabled:
print "Performing step '{0}' (reason: '{1}')".format(step.name, reason)
step.run(self._context)
else:
print "Skipping step '{0}' (reason: '{1}')".format(step.name, reason)
def add_bool_option(self, *args, **kwargs):
kwargs=dict(kwargs)
kwargs["default"] = False
kwargs["action"] = "store_true"
self.add_option(*args, **kwargs)
def add_option(self, *args, **kwargs):
self._optionParser.add_option(*args, **kwargs)
def _check_call(self, *args, **kwargs):
# force unicode strings in env to str() as unicode env variables break on windows
if 'env' in kwargs:
kwargs['env'] = dict((key,str(value)) for (key, value) in kwargs['env'].items())
argstring = [", ".join([repr(arg) for arg in args])]
kwargstring = [", ".join(["%s=%r" % (k,v) for (k,v) in kwargs.items()])]
invocation = "subprocess.call({0})".format(", ".join(argstring+kwargstring))
if self._context.options.verbose:
print invocation
try:
retval = subprocess.call(*args, **kwargs)
except OSError as e:
fail("{0} -> failed with exception {1}".format(invocation, e))
if retval != 0:
fail("{0} -> returned {1}".format(invocation, retval))
def python(self, *args, **kwargs):
args = flatten_string_list(args)
self._check_call([sys.executable] + args, env=self._context.env, **kwargs)
def shell(self, *args, **kwargs):
args = flatten_string_list(args)
kwargs.setdefault('shell', True)
kwargs.setdefault('env', self._context.env)
if len(args) == 1 and kwargs['shell']:
# The shell hates lists.
args = args[0]
self._check_call(args, **kwargs)
def cli(self, *args, **kwargs):
args = flatten_string_list(args)
if platform.system() != "Windows":
args = ["mono", "--debug", "--runtime=v4.0.30319"] + args
kwargs.setdefault('shell', False)
kwargs.setdefault('env', self._context.env)
self._check_call(args, **kwargs)
def rsync(self, *args, **kwargs):
args = flatten_string_list(args)
self._check_call(["rsync"] + args, **kwargs)
def _dependency_collection(self, env):
return read_json_dependencies_from_filename(
os.path.join('projectdata', 'dependencies.json'),
os.path.join('..', 'dependency_overrides.json'),
env, logfile=sys.stdout)
def _process_dependency_args(self, *selected, **kwargs):
kwargs = process_kwargs(
"fetch_dependencies",
kwargs,
{"env":None},)
selected = flatten_string_list(selected)
env = dict(kwargs['env'] or {})
if "debugmode" not in env:
env['debugmode'] = 'release'
env['titlecase-debugmode'] = env['debugmode'].title()
if "platform" not in env:
env['platform'] = self._context.env["OH_PLATFORM"]
if "linn-git-user" not in env:
env['linn-git-user'] = getpass.getuser()
return selected, env
def fetch_dependencies(self, *selected, **kwargs):
selected, env = self._process_dependency_args(*selected, **kwargs)
use_nuget = os.path.isfile('projectdata/packages.config')
clean = False
if 'default' in self._enabled_options or 'all' in self._enabled_options or 'clean' in self._enabled_options:
if self._context.options.incremental_fetch:
clean = False # clean-for fetch follows clean-for-build
else: # unless incremental-fetch option is set - in
clean = True # which case clean will NOT clean the dependencies
if 'clean' in self._disabled_options:
clean = False
try:
dependencies.fetch_dependencies(
selected or None, platform=self._context.env["OH_PLATFORM"], env=env,
fetch=True, nuget=use_nuget, clean=clean, source=False, logfile=sys.stdout,
local_overrides=not self._context.options.no_overrides)
except Exception as e:
print e
raise AbortRunException()
def read_dependencies(self, *selected, **kwargs):
selected, env = self._process_dependency_args(*selected, **kwargs)
return self._dependency_collection(env)
def fetch_source(self, *selected, **kwargs):
selected, env = self._process_dependency_args(*selected, **kwargs)
dependency_collection = self._dependency_collection(env)
return dependency_collection.checkout(selected or None)
def get_dependency_args(self, *selected, **kwargs):
selected, env = self._process_dependency_args(*selected, **kwargs)
dependency_collection = self._dependency_collection(env)
return dependency_collection.get_args(selected or None)
class SshConnection(object):
def __init__(self, stdin, stdout, stderr):
def pump_output_thread(source, destination):
for line in source:
destination.write(line)
destination.flush()
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.stdout_thread = threading.Thread(target=pump_output_thread, args=(stdout, sys.stdout))
self.stderr_thread = threading.Thread(target=pump_output_thread, args=(stderr, sys.stderr))
self.stdout_thread.start()
self.stderr_thread.start()
def send(self, data):
self.stdin.write(data)
self.stdin.flush()
def join(self):
self.stdout_thread.join()
self.stderr_thread.join()
return self.stdout.channel.recv_exit_status()
class SshSession(object):
def __init__(self, host, username):
import paramiko
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(host, username=username, look_for_keys='True')
def call(self, *args, **kwargs):
stdin, stdout, stderr = self.ssh.exec_command(*args, **kwargs)
conn = SshConnection(stdin, stdout, stderr)
return conn.join()
def call_async(self, *args, **kwargs):
stdin, stdout, stderr = self.ssh.exec_command(*args, **kwargs)
return SshConnection(stdin, stdout, stderr)
def __call__(self, *args):
return self.call(*args)
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
self.ssh.close()
class AbortRunException(Exception):
def __init__(self, message="Aborted due to error.", exitcode=1):
Exception.__init__(self, message)
self.usermessage = message
self.exitcode = exitcode
def fail(*args, **kwargs):
'''
fail(message, exitcode=1)
Abort the build with an error message.
'''
raise AbortRunException(*args, **kwargs)
def require_version(required_version):
'''Fail if the version of ohDevTools is too old.'''
try:
version.require_version(required_version)
except version.BadVersionException as e:
fail(e.usermessage, 32)
def windows_program_exists(program):
return subprocess.call(["where", "/q", program], shell=False)==0
def other_program_exists(program):
nul = open(os.devnull, "w")
return subprocess.call(["/bin/sh", "-c", "command -v "+program], shell=False, stdout=nul, stderr=nul)==0
program_exists = windows_program_exists if platform.platform().startswith("Windows") else other_program_exists
def scp(*args):
program = None
for p in ["scp", "pscp"]:
if program_exists(p):
program = p
break
if program is None:
raise "Cannot find scp (or pscp) in the path."
subprocess.check_call([program] + list(args))
def _forward_to_builder(name):
'''
Create a method that just calls a method of the same name
on the builder object with the same arguments.
'''
@wraps(getattr(Builder, name))
def func(self, *args, **kwargs):
return getattr(self._builder, name)(*args, **kwargs)
return func
def _forward_to_function(f):
'''
Create a method that just calls a function with the same
arguments.
'''
@wraps(f)
def func(self, *args, **kwargs):
return f(*args, **kwargs)
return func
def string_is_truish(value):
return value.lower() in ['1', 'yes', 'true', 'on', 'y', 't']
def string_is_falsish(value):
return value.lower() in ['0', 'no', 'false', 'off', 'n', 'f']
class OpenHomeBuilder(object):
# This is a slightly awkward attempt to bridge the way to a more maintainable
# system while making the 'build_behaviour' files easier to work with. To smooth
# the transition, this still uses some tricks that may be confusing for maintainers,
# but the idea is that the interface it presents to the 'build_behaviour' script
# is more natural and can eventually be provided without any esoteric Python.
# This tries to tame and conceal the broad generality of Builder and present it as
# a base-class for projects to sub-class. Instead of allowing completely arbitrary
# steps to be registered, a sensible set of steps is enforced:
# setup, fetch, clean, configure, build, test, publish
# This allows projects to avoid excessibly duplicating each other.
enable_configurations = True # Adds --configuration, --debug and --release options.
enable_platforms = True # Adds --platform, --system and --architecture options.
enable_versioning = True # Adds --version option.
enable_vsvars = True # Find and call vsvarsall if cl.exe is not on path.
test_location = 'build/{assembly}/bin/{configuration}/{assembly}.dll'
package_location = 'build/packages/{packagename}'
package_upload = 'releases@www.openhome.org:/home/releases/www/artifacts/{uploadpath}'
automatic_steps = ['fetch','configure','clean','build','test']
mdtool_mac = '/Applications/Xamarin\ Studio.app/Contents/MacOS/mdtool'
msbuild_verbosity = 'minimal'
cover_reports = [ ]
source_check_rules = [ ]
standard_source_check_rules = [
['src/**/*.csproj', 'warnings-as-errors'], # All C# projects should enable warnings-as-errors
['src/**/*.csproj', 'import-shared-settings'], # All C# projects should import the shared settings
['src/**/*.orig', 'disallow'], # Don't commit .orig files from merges!
['src/**/*.cs', 'no-tabs'], # Don't use tabs in C# source
]
platform_slave_overrides = {} # subclasses should override this to map non-standard platform slave labels to standard ones in auto behaviour
def __init__(self):
super(OpenHomeBuilder, self).__init__()
def startup(self, builder):
self._builder = builder
self._context = None
if self.enable_platforms:
builder.add_option('--platform', help="Target platform. E.g. Windows-x86, Linux-x64, iOs-armv7.")
builder.add_option('--system', help="Target system. E.g. Windows, Linux, Mac, iOs.")
builder.add_option('--architecture', help="Target architecture. E.g. x86, x64.")
if self.enable_configurations:
builder.add_option('--configuration', help="Target configuration. E.g. Debug, Release.")
builder.add_option("--debug", action="store_const", const="Debug", dest="configuration",
help="Specify Debug configuration. Short for --configuration=Debug")
builder.add_option("--release", action="store_const", const="Release", dest="configuration",
help="Specify Release configuration. Short for --configuration=Release")
if self.enable_versioning:
builder.add_option('--version', help="Specify version number for build.")
if self.enable_vsvars:
builder.add_option('--vsvars', default="auto", help="Find and run vsvarsall.bat: 'yes' - always, 'no' - never, 'auto' - only if cl.exe not on path. Default 'auto'.")
builder.add_option("--steps", default="default",
help="Steps to run, comma separated. Allowed: all default fetch clean configure build test publish")
builder.add_bool_option("--auto", help="Choose behaviour automatically based on environment. (Best for CI servers.)")
# Sorry, this is a bit crazy. This contortion is intended to let us re-use
# Builder.build_step without having to rewrite it. The Builder expects to
# call each step and pass in a context object, but we don't want our sub-classes
# to have to deal with the context, so our wrapper here accepts the context,
# stores it in a field, then forwards to our method.
def invoke(name):
def passthrough(context):
self._context = context
getattr(self, name)()
self._context = None
return passthrough
builder.build_step('process_options', optional=False)(invoke("_process_options"))
builder.build_step('setup', optional=False)(invoke("setup"))
builder.build_step('openhome_setup', optional=False)(invoke("openhome_setup"))
builder.build_step('check_source', optional=True, default=True)(invoke("check_source"))
builder.build_step('fetch', optional=True, default=True)(invoke("fetch"))
builder.build_step('configure', optional=True, default=True)(invoke("configure"))
builder.build_step('clean', optional=True, default=True)(invoke("clean"))
builder.build_step('build', optional=True, default=True)(invoke("build"))
builder.build_step('test', optional=True, default=False)(invoke("test"))
builder.build_step('publish', optional=True, default=False)(invoke("publish"))
def __getattr__(self, name):
return getattr(self._context, name)
def _expand_template(self, template, **kwargs):
kwargs.update(dict(
configuration = self.configuration,
system = self.system,
architecture = self.architecture,
platform = self.platform,
version = self.version))
return template.format(**kwargs)
def _process_platform_options(self):
system = self.options.system
architecture = self.options.architecture
platform = self.options.platform
if platform and (system or architecture):
fail('Specify --platform alone or both --system and --architecture, not a mix.')
if bool(system) != bool(architecture):
fail('Specify --system and --architecture together.')
if platform is None and system is not None:
platform = system + '-' + architecture
if platform is None and self.options.auto:
platform = self.env['slave']
# to map non-standard platform slave labels to standard ones in auto behaviour e.g. Android-mono -> Android-anycpu
if platform in self.platform_slave_overrides:
platform = self.platform_slave_overrides[platform]
if platform is None:
platform = default_platform()
if '-' not in platform:
fail('Platform should be a system and an architecture separated by a hyphen, e.g. Windows-x86.')
system, architecture = platform.split('-', 2)
self.env['OH_PLATFORM'] = platform
self.platform = platform
self.system = system
self.architecture = architecture
def _process_configuration_options(self):
configuration = self.options.configuration
if configuration is None:
configuration = "Release"
self.configuration = configuration
def _process_version_options(self):
self.version = self.options.version
def _process_auto_option(self):
if self.options.auto:
self.steps_to_run = self.automatic_steps
if self.env.get('PUBLISH_RELEASE',"false").lower() == "true":
self.steps_to_run += ["+publish"]
self.version = self.env.get('RELEASE_VERSION', self.version)
else:
self.steps_to_run = self.options.steps
def _process_options(self):
if self.enable_platforms:
self._process_platform_options()
if self.enable_configurations:
self._process_configuration_options()
if self.enable_versioning:
self._process_version_options()
self._process_auto_option()
def setup(self):
'''
Subclasses can override to specify setup behaviour to occur before the
start of any build.
'''
pass
def openhome_setup(self):
if self.enable_vsvars and self.system == 'Windows':
vsvars_string = self.options.vsvars.lower()
if vsvars_string == 'auto':
vsvars = not program_exists('cl')
elif string_is_truish(vsvars_string):
vsvars = True
elif string_is_falsish(vsvars_string):
vsvars = False
else:
fail('Bad value for --vsvars')
if vsvars:
print 'Automatically find Visual Studio...'
self.env.update(get_vsvars_environment(self.architecture))
self._builder.specify_optional_steps(self.steps_to_run)
def check_source(self):
'''
Check files in the source tree according to source_check_rules.
See filechecker.py for possible rules to apply.
'''
if self.source_check_rules == []:
print 'No rules defined.'
return
if not filechecker.apply_rules(self.source_check_rules):
self.fail('Source tree failed automated checks. Use --steps="default,-check_source" to suppress these checks temporarily.')
def fetch(self):
'''
Fetch dependencies. Subclasses may override.
'''
self.fetch_dependencies(env={'debugmode':self.configuration, 'platform':self.platform})
def configure(self):
'''
Invoke any configure script. Subclasses should override this if the
project requires configuration.
'''
pass
def clean(self):
'''
Clean out build results. Subclasses should override this.
'''
pass
def build(self):
'''
Perform the build. Subclasses should override this.
'''
pass
def test(self):
'''
Run the tests. Subclasses should override this.
'''
pass
def publish(self):
'''
Publish the packages. Subclasses should override this.
'''
pass
def set_nunit_location(self, nunitexe):
'''
Specify where nunit can be found. Subclasses must invoke this in order
to use the nunit() method.
'''
self.nunitexe = nunitexe
def set_cover_location(self, coverexe):
'''
Specify where OpenCover can be found. Subclasses must invoke this in order
to use the cover() method
'''
self.coverexe = coverexe
def set_reportgen_location(self, reportgenexe):
'''
Specify where ReportGenerator can be found. Subclasses must invoke this in order
to use the coverReport() method
'''
self.reportgenexe = reportgenexe
def should_cover(self):
'''
Return whether the tests should be covered or not. By default, this method only
returns true if the platform is 'Windows-x86', but it could be overriden to enable or
disable coverage for other platforms.
'''
return self.platform == 'Windows-x86'
def msbuild(self, project, target='Build', platform=None, configuration=None, args=None, properties=None, verbosity=None):
'''
Invoke msbuild/xbuild to build a project/solution. Specify the path to
the project or solution file.
'''
#msbuild_args = ['msbuild' if self.system == 'Windows' else 'xbuild']
msbuild_args = ['msbuild' if sys.platform.startswith('win') else 'xbuild']
properties = {} if properties is None else dict(properties)
if target is not None:
msbuild_args += ['/target:'+target]
if verbosity is None:
verbosity = self.msbuild_verbosity
msbuild_args += ['/verbosity:'+verbosity]
if platform is not None:
properties['Platform'] = platform
if configuration is not None:
properties['Configuration'] = configuration
msbuild_args += ['/property:{0}={1}'.format(k,v) for (k,v) in properties.items() if v is not None]
msbuild_args += [project]
if args is not None:
msbuild_args += args
self._builder.shell(' '.join(msbuild_args))
def mdtool(self, project, target='build', configuration=None, bundle=None):
'''
Invoke mdtool to build a project/solution. Specify the path to
the project or solution file.
'''
mdtool_args = [self.mdtool_mac if sys.platform.startswith('darwin') else 'mdtool']
if target == "build" or target == "Build":
mdtool_args += ['build']
mdtool_args += ['-t:Build']
elif target == "clean" or target == "Clean":
mdtool_args += ['build']
mdtool_args += ['-t:Clean']
elif target is not None:
mdtool_args += [target]
if configuration is not None:
mdtool_args += ['-c:'+configuration]
mdtool_args += [('-p:' if target == 'mac-bundle' else '') + project]
if bundle is not None:
mdtool_args += [bundle]
self._builder.shell(' '.join(mdtool_args))
def nunit(self, test_assembly):
'''
Run NUnit on a test assembly. Specify the name of the assembly (with
no extension). Test assemblies are located using the template string
test_location.
'''
if self.nunitexe is None:
fail("The builder's setup method should call set_nunit_location().")
self._builder.cli([
self.nunitexe,
'-labels',
'-noshadow',
self._expand_template(self.test_location, assembly=test_assembly)])
def cover(self, **args):
'''
Carry out a test, measuring it's code coverage. Accepts the following keyworded arguments:
- assembly-filter - a filter used by OpenCover to find the assemblies that coverage should be measured for (see OpenCover docs)
- output - path of the output xml report
- one of:
- command - command to execute
- nunit_assembly - name of an assembly to be run using nunit (located using the template string test_location)
'''
if self.coverexe is None:
fail("The builer's setup method should call set_cover_location().")
if self.should_cover():
report_dir = os.path.dirname(args['output'])
if not os.path.exists(report_dir):
os.makedirs(report_dir)
cmd_options = [self.coverexe, '-register:user', '-filter:' + args['assembly_filter'], '-output:' + args['output']]
if 'command' in args:
cmd_options.extend(['-target:' + args['command']])
elif 'nunit_assembly' in args:
if self.nunitexe is not None:
full_name = os.getcwd() + '/' + self._expand_template(self.test_location, assembly=args['nunit_assembly'])
cmd_options.extend(['-coverbytest:*.dll', '-target:' + self.nunitexe, '-targetargs:' + full_name + ' /noshadow /nologo'])
else:
fail("The builder's setup method should call set_nunit_location().")
else:
fail("Invalid arguments: " + args)
self._builder.cli(cmd_options)
self.cover_reports.append(args['output'])
else:
print 'Coverage not enabled for this platform, executing tests normally'
if 'command' in args:
self._builder.cli(args['command'])
elif 'nunit_assembly' in args:
self.nunit(args['nunit_assembly'])
def coverReport(self, output_dir, reports=None):
'''
Generates a HTML report, based on the provided array xml reports. If no reports are provided,
then all the reports generated using the cover function will be used. The array can contain filter strings,
i.e. 'reports/*.xml'.
'''
if self.should_cover():
if self.reportgenexe is None:
fail("The builder's setup method should call set_reportgen_location().")
if reports is None:
reports = self.cover_reports
self._builder.cli([
self.reportgenexe,
'-reports:' + ";".join(reports),
'-targetdir:' + output_dir])
else:
print 'Coverage not enabled for this platform, not generating report'
def publish_package(self, packagename, uploadpath, package_location=None, package_upload=None):
'''
Publish a package via scp to the package repository. Projects can
override the package_location and package_upload template strings to
control where packages are uploaded to.
'''
packagename = self._expand_template(packagename)
uploadpath = self._expand_template(uploadpath)
if package_location is None:
package_location = self.package_location
if package_upload is None:
package_upload = self.package_upload
sourcepath = self._expand_template(package_location, packagename=packagename)
destinationpath = self._expand_template(package_upload, uploadpath=uploadpath)
scp(sourcepath, destinationpath)
# This just sets up forwarding methods for a bunch of methods on the Builder, to
# allow sub-classes access to them.
fetch_dependencies = _forward_to_builder("fetch_dependencies")
read_dependencies = _forward_to_builder("read_dependencies")
get_dependency_args = _forward_to_builder("get_dependency_args")
add_option = _forward_to_builder("add_option")
add_bool_option = _forward_to_builder("add_bool_option")
python = _forward_to_builder("python")
shell = _forward_to_builder("shell")
cli = _forward_to_builder("cli")
rsync = _forward_to_builder("rsync")
#build_step = _forward_to_builder("build_step")
#build_condition = _forward_to_builder("condition")
modify_optional_steps = _forward_to_builder("modify_optional_steps")
specify_optional_steps = _forward_to_builder("specify_optional_steps")
default_platform = _forward_to_function(default_platform)
# This sets up forwarding methods for a bunch of useful functions, to allow
# sub-classes access to them.
get_vsvars_environment = _forward_to_function(get_vsvars_environment)
SshSession = _forward_to_function(SshSession)
userlock = _forward_to_function(userlock)
fail = _forward_to_function(fail)
scp = _forward_to_function(scp)
require_version = _forward_to_function(require_version)
def run(buildname="build", argv=None):
builder = Builder()
import ci
behaviour_globals = {
'fetch_dependencies':builder.fetch_dependencies,
'read_dependencies':builder.read_dependencies,
'get_dependency_args':builder.get_dependency_args,
'add_option':builder.add_option,
'add_bool_option':builder.add_bool_option,
'python':builder.python,
'shell':builder.shell,
'cli':builder.cli,
'rsync':builder.rsync,
'build_step':builder.build_step,
'build_condition':builder.build_condition,
'default_platform':default_platform,
'get_vsvars_environment':get_vsvars_environment,
'SshSession':SshSession,
'select_optional_steps':builder.select_optional_steps,
'modify_optional_steps':builder.modify_optional_steps,
'specify_optional_steps':builder.specify_optional_steps,
'userlock':userlock,
'fail':fail,
'scp':scp,
'require_version':require_version,
'OpenHomeBuilder':OpenHomeBuilder
}
for name, value in behaviour_globals.items():
setattr(ci, name, value)
try:
global_dict = dict(behaviour_globals)
execfile(os.path.join('projectdata', buildname+'_behaviour.py'), global_dict)
if not builder.has_steps() and 'Builder' in global_dict:
instance = global_dict['Builder']()
instance.startup(builder)
builder.run(argv)
except AbortRunException as e:
print e.usermessage
sys.exit(e.exitcode)
for name in behaviour_globals.keys():
delattr(ci, name)
|
network_execution.py
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import socket
import subprocess
import threading
import time
from . import distro
from . import perfdata
from ..local import execution
from ..objects import peer
from ..objects import workpacket
from ..server import compression
from ..server import constants
from ..server import local_handler
from ..server import signatures
def GetPeers():
data = local_handler.LocalQuery([constants.REQUEST_PEERS])
if not data: return []
return [ peer.Peer.Unpack(p) for p in data ]
class NetworkedRunner(execution.Runner):
def __init__(self, suites, progress_indicator, context, peers, workspace):
self.suites = suites
num_tests = 0
datapath = os.path.join("out", "testrunner_data")
self.perf_data_manager = perfdata.PerfDataManager(datapath)
self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
for s in suites:
for t in s.tests:
t.duration = self.perfdata.FetchPerfData(t) or 1.0
num_tests += len(s.tests)
self._CommonInit(num_tests, progress_indicator, context)
self.tests = [] # Only used if we need to fall back to local execution.
self.tests_lock = threading.Lock()
self.peers = peers
self.pubkey_fingerprint = None # Fetched later.
self.base_rev = subprocess.check_output(
"cd %s; git log -1 --format=%%H --grep=git-svn-id" % workspace,
shell=True).strip()
self.base_svn_rev = subprocess.check_output(
"cd %s; git log -1 %s" # Get commit description.
" | grep -e '^\s*git-svn-id:'" # Extract "git-svn-id" line.
" | awk '{print $2}'" # Extract "repository@revision" part.
" | sed -e 's/.*@//'" % # Strip away "repository@".
(workspace, self.base_rev), shell=True).strip()
self.patch = subprocess.check_output(
"cd %s; git diff %s" % (workspace, self.base_rev), shell=True)
self.binaries = {}
self.initialization_lock = threading.Lock()
self.initialization_lock.acquire() # Released when init is done.
self._OpenLocalConnection()
self.local_receiver_thread = threading.Thread(
target=self._ListenLocalConnection)
self.local_receiver_thread.daemon = True
self.local_receiver_thread.start()
self.initialization_lock.acquire()
self.initialization_lock.release()
def _OpenLocalConnection(self):
self.local_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
code = self.local_socket.connect_ex(("localhost", constants.CLIENT_PORT))
if code != 0:
raise RuntimeError("Failed to connect to local server")
compression.Send([constants.REQUEST_PUBKEY_FINGERPRINT], self.local_socket)
def _ListenLocalConnection(self):
release_lock_countdown = 1 # Pubkey.
self.local_receiver = compression.Receiver(self.local_socket)
while not self.local_receiver.IsDone():
data = self.local_receiver.Current()
if data[0] == constants.REQUEST_PUBKEY_FINGERPRINT:
pubkey = data[1]
if not pubkey: raise RuntimeError("Received empty public key")
self.pubkey_fingerprint = pubkey
release_lock_countdown -= 1
if release_lock_countdown == 0:
self.initialization_lock.release()
release_lock_countdown -= 1 # Prevent repeated triggering.
self.local_receiver.Advance()
def Run(self, jobs):
self.indicator.Starting()
need_libv8 = False
for s in self.suites:
shell = s.shell()
if shell not in self.binaries:
path = os.path.join(self.context.shell_dir, shell)
# Check if this is a shared library build.
try:
ldd = subprocess.check_output("ldd %s | grep libv8\\.so" % (path),
shell=True)
ldd = ldd.strip().split(" ")
assert ldd[0] == "libv8.so"
assert ldd[1] == "=>"
need_libv8 = True
binary_needs_libv8 = True
libv8 = signatures.ReadFileAndSignature(ldd[2])
except:
binary_needs_libv8 = False
binary = signatures.ReadFileAndSignature(path)
if binary[0] is None:
print("Error: Failed to create signature.")
assert binary[1] != 0
return binary[1]
binary.append(binary_needs_libv8)
self.binaries[shell] = binary
if need_libv8:
self.binaries["libv8.so"] = libv8
distro.Assign(self.suites, self.peers)
# Spawn one thread for each peer.
threads = []
for p in self.peers:
thread = threading.Thread(target=self._TalkToPeer, args=[p])
threads.append(thread)
thread.start()
try:
for thread in threads:
# Use a timeout so that signals (Ctrl+C) will be processed.
thread.join(timeout=10000000)
self._AnalyzePeerRuntimes()
except KeyboardInterrupt:
self.terminate = True
raise
except Exception, _e:
# If there's an exception we schedule an interruption for any
# remaining threads...
self.terminate = True
# ...and then reraise the exception to bail out.
raise
compression.Send(constants.END_OF_STREAM, self.local_socket)
self.local_socket.close()
if self.tests:
self._RunInternal(jobs)
self.indicator.Done()
return not self.failed
def _TalkToPeer(self, peer):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.context.timeout + 10)
code = sock.connect_ex((peer.address, constants.PEER_PORT))
if code == 0:
try:
peer.runtime = None
start_time = time.time()
packet = workpacket.WorkPacket(peer=peer, context=self.context,
base_revision=self.base_svn_rev,
patch=self.patch,
pubkey=self.pubkey_fingerprint)
data, test_map = packet.Pack(self.binaries)
compression.Send(data, sock)
compression.Send(constants.END_OF_STREAM, sock)
rec = compression.Receiver(sock)
while not rec.IsDone() and not self.terminate:
data_list = rec.Current()
for data in data_list:
test_id = data[0]
if test_id < 0:
# The peer is reporting an error.
with self.lock:
print("\nPeer %s reports error: %s" % (peer.address, data[1]))
continue
test = test_map.pop(test_id)
test.MergeResult(data)
try:
self.perfdata.UpdatePerfData(test)
except Exception, e:
print("UpdatePerfData exception: %s" % e)
pass # Just keep working.
with self.lock:
perf_key = self.perfdata.GetKey(test)
compression.Send(
[constants.INFORM_DURATION, perf_key, test.duration,
self.context.arch, self.context.mode],
self.local_socket)
self.indicator.AboutToRun(test)
has_unexpected_output = test.suite.HasUnexpectedOutput(test)
if has_unexpected_output:
self.failed.append(test)
if test.output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.indicator.HasRun(test, has_unexpected_output)
rec.Advance()
peer.runtime = time.time() - start_time
except KeyboardInterrupt:
sock.close()
raise
except Exception, e:
print("Got exception: %s" % e)
pass # Fall back to local execution.
else:
compression.Send([constants.UNRESPONSIVE_PEER, peer.address],
self.local_socket)
sock.close()
if len(test_map) > 0:
# Some tests have not received any results. Run them locally.
print("\nNo results for %d tests, running them locally." % len(test_map))
self._EnqueueLocally(test_map)
def _EnqueueLocally(self, test_map):
with self.tests_lock:
for test in test_map:
self.tests.append(test_map[test])
def _AnalyzePeerRuntimes(self):
total_runtime = 0.0
total_work = 0.0
for p in self.peers:
if p.runtime is None:
return
total_runtime += p.runtime
total_work += p.assigned_work
for p in self.peers:
p.assigned_work /= total_work
p.runtime /= total_runtime
perf_correction = p.assigned_work / p.runtime
old_perf = p.relative_performance
p.relative_performance = (old_perf + perf_correction) / 2.0
compression.Send([constants.UPDATE_PERF, p.address,
p.relative_performance],
self.local_socket)
|
multiprocesses.py
|
from multiprocessing import Process
from multiprocessing import Pool
from subprocess import check_output
from itertools import product
from functools import partial
import os
import time
def info(title):
#print title
#print 'module name:', __name__
#if hasattr(os, 'getppid'): # only available on Unix
#print 'parent process:', os.getppid()
print 'process id:', os.getpid()
def f(stre,args):
info('function f')
#test = check_output(["ab","-n",'20',"-c",'5','http://127.0.0.1:80/'])
time.sleep(args[1])
print "sleep for ", args[1]
start = time.time()
for x in range(args[2]):
if time.time() - start < args[3] :
print stre, "-----------running num", args[0]
test = check_output(["ab","-n",'20',"-c",'5','http://127.0.0.1:80/'])
print test
else:
print "*********process terminated", args[0]
break
if __name__ == '__main__':
#pname = 'p'+ str(num)
#info('main line')
p = Pool(5)
list1 = [1,2,3,6,7]
list2 = [3,3,3,3,3]
list3 = [5,5,5,5,5]
list4 = [1,1,1,1,1]
process = "process"
f2 = partial(f,process)
p.map(f2, zip(list1, list2,list3,list4))
#pname = Process(target=f, args=(num,))
#pname.start()
#time.sleep(5)
#pname.join()
|
main.py
|
'''
PyCxClient
Python-based all in one Chaoxing learning helper,put to test
by mos9527 @ mos9527.tooo.top
'''
import json
import logging
import os
import sys
import threading
import time
import coloredlogs
import math
import io
from apis import session,behaviorlogging, captchas, general, mooclearning, registration,activities,notification
from utils.myutils import userio
from utils.showfile import showfile
from utils.atom import streamedatom
# region Init
'''
Initialzation calls
Contains argument parsing,logging and other setups.
Logging & Argparser
'''
# region Settings
settings = {
'loginmethod': -1,
'username': '',
'password': '',
'schoolid': ''
}
# Set-up these strings to login semi-automaticly (you still need to pass Captcha)
mimic_settings = {
'step':20,
# In percentage * 100,the step of the mimic operation
'block':256
# The block size of a video request
}
# endregion
# region Misc Setup
def splash():
'''ASCII Art Splash,just for fun.'''
userio.get(f'''
________ _________ __________________ _____
___ __ \____ ___ ____/___ ___ ____/__ /__(_)_____________ /_
__ /_/ /_ / / / / __ |/_/ / __ /__ /_ _ \_ __ \ __/
_ ____/_ /_/ // /___ __> < / /___ _ / _ / / __/ / / / /_
/_/ _\__, / \____/ /_/|_| \____/ /_/ /_/ \___//_/ /_/\__/
/____/
Python 实现的超星学习通多合一客户端
使用说明: by greats3an@gmail.com
· 输入 {userio.cancel} 返回上一级
· 按下【回车】键登录
''',end='')
# endregion
# region Logging Setup
# Set root logger & Generate a path where the logging text will be write to
logger,logfile = logging.getLogger('main'),showfile._GenerateRandomFilename(ext=time.strftime('PyCxClient_%H%M',time.localtime()) + '.log')
def init_logging():
global logger
# Setup stdout
def WriteWrapper(write):
def wrapper(text):
write(text)
open(logfile,'a+',encoding='utf-8').write(text)
return wrapper
sys.stdin.reconfigure(encoding='utf-8')
sys.stdout.reconfigure(encoding='utf-8')
# set io encodings to `utf-8`
sys.stdout.write = WriteWrapper(sys.stdout.write)
coloredlogs.install(logging.DEBUG,stream=sys.stdout)
# Install coloredlogs ... for colored logs.
logging.getLogger("urllib3").setLevel(logging.WARNING)
# turn up logs levels for urllib3 which is used by requests
logger.debug('Program started at %s' % time.strftime('%H:%M:%S',time.localtime()))
# endregion
# region Argument Parser
def init_parseargs():
global settings,mimic_settings
showfile.fileprocesser = sys.argv[1] if len(sys.argv) > 1 else ''
if showfile.fileprocesser:logger.debug('Using custom file processer %s' % showfile.fileprocesser)
# setup custom fileprocsser if set in argv
# TODO:parse arguments for settings
# endregion
# region Login sequence
def 账号密码登录(settings):
'''Perform login by interfacing with user,returns login result'''
print('【卡密登录】')
# User should login now
result = registration.login.NormalLogin(
settings['username'] if settings['username'] else userio.get('输入您的账号'),
settings['password'] if settings['password'] else userio.get('输入您的密码'),
)
if not 'url' in result.keys():
logger.fatal('Failed to login:%s' %
result['mes'] if 'mes' in result.keys() else '原因未知')
userio.get('按任意键', end='退出')
sys.exit(2)
# We have logged in,now,list all the courses the user has
logger.info('User logged in')
return result
def 单位登录(settings):
'''Perform login by interfacing with user,returns login result'''
def GetSchoolID():
'''Seraches unit's ID then let the user choose'''
units = registration.serachunits.SearchUnits(
userio.get('请输入您所在机构的名称进行模糊搜索(如:XX中学)'))
userio.listout(
units['froms'], foreach=lambda x,i: x['name'], title='机构列表')
unit = units['froms'][int(userio.get('输入您所处机构的【序号】'))]
return unit['schoolid']
print('【单位登录】')
# User should login now
result = registration.login.UnitLogin(
settings['schoolid'] if settings['schoolid'] else GetSchoolID(),
settings['username'] if settings['username'] else userio.get(
'输入您于该机构中的【学号 / 工号】'),
settings['password'] if settings['password'] else userio.get('输入您的密码'),
# Prompt the user to input the captcha code
# Renewing captcha,which will also give us a new JSESSIONID
captchas.logincaptcha.RenewCaptcha(True)
)
if not 'url' in result.keys():
logger.fatal('Failed to login:%s' %
result['mes'] if 'mes' in result.keys() else '原因未知')
userio.get('按任意键', end='退出')
sys.exit(2)
# We have logged in,now,list all the courses the user has
logger.info('User logged in')
return result
def init_login():
# Perform login
methods = [账号密码登录, 单位登录]
if not 'loginmethod' in list(settings.keys()) or settings['loginmethod'] == -1:
userio.listout(methods, foreach=lambda x,i: x.__name__, title='选择登录途径')
method = methods[int(userio.get('输入登录方法【序号】',end='>>>'))]
# Then we enter the main loop
else:
method = methods[int(settings['loginmethod'])]
method(settings)
# endregion
# region init()
newest_id,Ts,Tscale = None,[],1
# NewestID,TaskS,Timescale
def init():
'''
Setup loggings and argparsers.
Then,Prompt to login and starts all timed tasks declared with `decorator @T([interval])`
'''
# Start timed execution thread
def _T():
'''Execute timed sequence'''
global Ts
while True:
for t in Ts:
if int(time.time() - t['lastexec']) >= t['every']:
t['lastexec'] = time.time()
def wrapper():
try:
t['method'](*t['args'],**t['kwargs'])
except Exception as e:
logger.warn('Execption occured when executing %s:%s' % (t['method'].__name__,e))
threading.Thread(target=wrapper,daemon=True).start()
# Starts a subthread for this operation
# Execute when time is up
time.sleep(Tscale)
# Minium timescale of x.xx s
# First,initialize logging
init_logging()
# Then,parses arguments
init_parseargs()
# Splash text...why not?
splash()
# Finally,prompt the user to login
init_login()
# And Starts a time sequence executer
threading.Thread(target=_T,daemon=True).start()
# endregion
# endregion
# region Nested Life Cycle
'''
Work during loop
CLI Interfacing is based on this classic nested
Loop chain construct,might be a little confusing
But in development,yields great extensiblity and debuggablity
Since all call stacks can be easily traced
'''
# region Looper Utilities
def A(actions):
'''`A:Action`,prompts user to select a action of choice'''
userio.listout(actions, foreach=lambda x,i: x.__name__, title='可用操作')
action = actions[int(userio.get('输入操作【序号】'))]
return action
def L(method):
'''`L:Looper`,wrapper for looping inside a function,then breaks once an exception occures'''
def wrapper(*args, **kwargs):
while True:
try:
method(*args, **kwargs)
except Exception as e:
logger.error(e)
break
return wrapper
def T(every,*args,**kwargs):
'''T:Timed tasks decorator.Adds timed task into the timed sequence'''
def wrapper(method):
global Ts
Ts.append({'method':method,'every':every,'lastexec':time.time(),'args':args,'kwargs':kwargs})
return wrapper
# endregion
# region Notification Managment
notifylambda = lambda x,i:f"""[{x['completeTime']}] {x['createrName']} {('(我)' if str(x['createrId']) == str(session.cookies.get('_uid')) else '')}
{x['title'].strip()}:
{x['content'].strip() if 'content' in x.keys() else '(请在【通知列表】查看)'}"""
# Lambda for stringify-ing notifications
@T(1) # Pull every 1s
def 拉取通知():
global newest_id
new_notification = notification.pull.PullNotifiactions(lastValue=newest_id,getNew=True)
if 'msg' in new_notification.keys() and newest_id:
# Incoming message: Message exisits and the id has been renewed before
userio.listout(new_notification['msg'],notifylambda,title='新信息',showindex=False)
if not newest_id == new_notification['objs']:
newest_id = new_notification['objs']
logger.debug('Updating newest notification ID to %s' % newest_id)
# endregion
# region Nested Functions
def 课堂列表():
def _select():
'''User will pick one courses from the list,this funtions returns the one selected'''
courses = mooclearning.studentcourses.LoadCourses()
userio.listout(
courses, foreach=lambda x,i: x["title"] + ' (' + x["description"][0] + ')', title='课堂列表')
course = courses[int(userio.get('输入课堂【序号】'))]
# Now the user should choose one of those courses
return course
course = _select()
def 课程列表(course):
def _select(course):
'''Then,user picks one task from the course,this function returns it'''
logger.debug('Loading course %s' % course['title'])
classes = mooclearning.courseclasses.LoadClasses(course['url'])
# User can now select one of the classes to start 'learning'
userio.listout(classes.keys(), title='课程列表')
class_ = classes[list(classes.keys())[int(userio.get('输入课程【序号】'))]]
# Returns the class selected
return class_
class_ = _select(course)
def 任务列表(class_):
def _select(class_):
'''We can finally do things on this praticlar task'''
userio.listout(class_, foreach=lambda x,i: x['chapter'] + ' ' + x['title'], title='任务列表')
task = class_[int(userio.get('输入任务【序号】'))]
# Now we load the info of such sub task
logger.debug('Loading task %s' % task['title'])
_task = mooclearning.classtasks.LoadClassInfo(task['knowledge_url'])
# returns the task selected
return {**task,**_task}
task = _select(class_)
def 任务点列表(task):
def _select(task):
'''User will now select a task point of choice'''
userio.listout(task['attachments'],
foreach=lambda x,i: x['property']['name'], title='任务点')
attachment = task['attachments'][int(userio.get('输入任务点【序号】'))]
status = {
**general.objectstatus.GetObjectStatus(attachment['property']['objectid']),
'isPassed': attachment['isPassed'] if 'isPassed' in attachment.keys() else False
}
# returns the task's
return {'attachment': attachment, 'status': status}
taskpoint = _select(task)
print('\n'.join([
f"{userio.header('任务点状态')}",
f" 名称:{taskpoint['attachment']['property']['name']}",
f" 类型:{taskpoint['attachment']['type']}",
f"通过状态:{['未通过 / 未知','已通过'][taskpoint['status']['isPassed']]}"
]))
def _enumerate(task, attachment, status):
'''Gets all supported operations for the certain task'''
def 获取下载链接():
return f"""
直链:{status['download']}
可续传:{status['http']} (需要额外 Header 'referer':'https://mooc1-1.chaoxing.com/ananas/modules/video/index.html?v=2019-1113-1705')
"""
def 获取封面():
return f"""
链接:{status['screenshot']}
"""
def 下载为MP3():
return f"""
链接:{status['mp3']} (需要额外 Header 'referer':'https://mooc1-1.chaoxing.com/ananas/modules/video/index.html?v=2019-1113-1705')
"""
def 设置观看时长(mimic_settings=mimic_settings):
step = mimic_settings['step']
block = mimic_settings['block']
logger.debug('Fecthing HTTP Video ATOM header')
headers = {'referer': 'https://mooc1-1.chaoxing.com/ananas/modules/video/index.html?v=2019-1113-1705'}
# The header necessary to request the HTTP streamed (206) video source
header = streamedatom.GetHTTPVideoHeader(status['http'],session,headers=headers)
content_length = int(header['http']['Content-Length'])
real_duration = math.floor(header['atom'].ATOM_DURATION_SEC)
# Note that the video's `real_duration` (in its ATOM header)
report_duration = status['duration']
# Is usually the same as the `report_duration`,yet the `report_duration` given by the server
# ...is sometimes lower than the real_duration.To make `multimedialog` work,the maxium duration
# (in clipTime) must be the `report_duration` otherwise it will result in an 403
# Yet mimicing the playback,we should use `real_duration` to give us a full 100% playback time
print('警告:1.更改操作只能【增加】时长,而不能【消减】时长')
print()
print(' 故该操作不可逆,请慎重使用')
print()
print(f' 2.该操作将视频分为 {int(100 / step)} 份并同时对API和视频源(分块:{block} B)进行')
print()
print(' 请求,且在大多数情况下表现安全,但**不保证**')
print()
print(' 不会导致后台数据的异常,所产生的后果将由阁下')
print()
print(' 自行承担')
print()
print(' 注:需要刷新视频页面查看结果')
print()
print('视频总时长(秒):', real_duration)
set_duration = int(userio.get('欲调节到的观看时长'))
percentage = set_duration / real_duration
print('观看时长、总时长比:%s ' % percentage)
def postLog(played_duration):
'''Posts a MultimediaLog'''
return behaviorlogging.multimedialog.MultimediaLog(
task['defaults']['reportUrl'],
int(played_duration),
report_duration,
status['dtoken'],
task['defaults']['clazzId'],
attachment['property']['objectid'],
attachment['otherInfo'],
attachment['property']['jobid'] if 'jobid' in attachment['property'].keys(
) else attachment['property']['_jobid'],
isdrag=0 if played_duration < int(report_duration) * 0.5 else 4
# Minium playback 'pass' ratio
)
for seek in range(0,100 + step,step):
# Mimic a normal watch routine
seek_precentage = seek / 100
# Precentage of the loop
seek_head = int(content_length * percentage * seek_precentage)
# Byte start posistion of the request
played_duration = int(real_duration * percentage * seek_precentage)
# Time start posistion of the log
logger.debug('Stepping watch routine head: %s / %s (%s / 100)' % (seek_head,content_length,seek))
# Loads the streaming video sources by chunks
r = streamedatom.PartialGet(status['http'],session,seek_head,block,headers=headers)
logger.debug('Server returned code %s' % r.status_code)
# Sends the request
result = postLog(played_duration)
# Does the logger
return f'''
返回值:{result}
结果:{'播放已结束' if 'true' in result else '播放未结束' if 'false' in result else '修改失败'}
'''
def 设置考核点():
print('警告: 该操作不可逆,请慎重使用')
userio.get('按下回车键', end='[确定]')
result = behaviorlogging.documentpoint.SetDocumentPoint(
attachment['property']['jobid'] if 'jobid' in attachment['property'].keys(
) else attachment['property']['_jobid'],
task['defaults']['knowledgeid'],
task['defaults']['courseid'],
task['defaults']['clazzId'],
attachment['jtoken']
)
return f'''
信息:{result["msg"]}
结果:{'设置成功' if result['status'] else '设置失败(该项目可能不属于考核点)' }
'''
operations = {
'*': [获取下载链接],
'video': [获取封面, 下载为MP3, 设置观看时长],
'document': [设置考核点]
}
return operations['*'] + operations[attachment['type']] if attachment['type'] in operations.keys() else []
AS = _enumerate(task, taskpoint['attachment'], taskpoint['status'])
print(A(AS)())
def 设置阅读记录(task):
result = behaviorlogging.studentstudy.SetStudentStudy(task['url'])
print('结果:',result)
raise Exception('Done')
AS = [任务点列表,设置阅读记录]
L(A(AS))(task)
AS = [任务列表]
L(A(AS))(class_)
def 活动列表(course):
def _select(activitylist):
'''User will now choose one of the activites'''
userio.listout(activitylist,foreach=lambda x,i:'[%s] [%s] %s %s' %(['进行中','已结束'][x['activity_ended']],x['activity_type_str'],x['activity_description'],x['activity_alert']),title='活动列表')
return activitylist[int(userio.get('输入序号'))]
activitylist = activities.courseactivites.GetCourseActivites(course['url'])
activity = _select(activitylist)
def 签到():
try:
userio.get(f'输入 {userio.cancel} 取消,否则按回车键继续')
result = activities.signin.NormalSingin(activity['url'])
return '结果:\n ' + result
except Exception:
return ''
def 查看选人情况():
result = activities.pick.PickInfo(activity['url'])
userio.listout(
result,
foreach=lambda x,i:f"{x['name']} {('(我)' if str(x['uid']) == str(session.cookies.get('_uid')) else '')} {x['updatetimeStr']}",
title='被选到的人')
userio.get('按回车键',ignore_cancel=True)
return ''
def 查看评分信息():
result = activities.rating.RateDetail(activity['url'])
print(f" 标题: {result['rate_info']['title']}")
print(f"其他信息: {result['rate_info']['deploy_info']}")
userio.listout(result['rate_survey'],lambda x,i:f"{x['sender']} | {x['score']} | {x['message']}",'调研信息')
userio.listout(result['rate_imgs'],lambda x,i:f'图片 {i}','内容')
try:
url = result['rate_imgs'][int(userio.get('输入预览序号'))]
showfile.ShowPath(url,ext='jpg',lifetime=10)
except Exception:
pass
return ''
def 评分():
print('注意:该功能没有对分数进行上、下限进行限制')
print(' 故阁下需为自己所给出的异常分数')
print(' 自行负责,请在充分考虑后果后继续')
print()
userio.get(f'按回车键继续,输入 {userio.cancel} 取消')
content = userio.get('评分内容')
score = userio.get('评分分数')
result = activities.rating.Rate(activity['url'],content,score)
return f" 结果:{result['msg'] if result['msg'] else '成功'}"
def _enumerate(activity_type):
operations = {
'*':[],
'2':[签到],
'11':[查看选人情况],
'23':[查看评分信息,评分]
}
return operations['*'] + operations[str(activity_type)] if str(activity_type) in operations.keys() else []
print(A(_enumerate(activity['activity_type']))())
AS = [课程列表,活动列表]
L(A(AS))(course)
def 通知列表(pageid=0):
notice = notification.pull.PullNotifiactions(0,pageid)
if not ('notices' in notice.keys()):
print ('!没有更多通知')
raise Exception('No older notifications')
return
userio.listout(
notice['notices']['list'],
notifylambda,
title='通知列表',
reverse=True)
lastpage = notice['notices']['lastGetId']
userio.get(f'按回车查看下一页,否则输入 {userio.cancel} 退出')
return 通知列表(lastpage)
def 输入邀请码():
inviteCode = userio.get('请输入邀请码')
inviteMessage = mooclearning.invitecode.ParseInviteCode(inviteCode)
print(userio.header('课程信息'))
print(f'''
课程名: {inviteMessage['title']}
授课教师:{inviteMessage['teacher']}
附加信息:{inviteMessage['msg']}
''')
if inviteMessage['msg']:raise Exception(inviteMessage['msg'])
userio.get('输入 q 取消添加,否则按回车参加该课程')
result = mooclearning.invitecode.JoinByInviteCode(inviteMessage['courseId'],inviteMessage['classId'])
print('结果:',result['msg'])
raise Exception('Done')
# endregion
# region entryPoint()
def entryPoint():
'''Entry point to the looper'''
AS = [课堂列表,通知列表,输入邀请码]
# AS:ActionS to be emurated
L(A(AS))()
# endregion
# endregion
# region End
def end():
session.close()
userio.get('按任意键退出,日志文件将会被清除')
os.remove(logfile)
sys.exit(0)
# endregion
# Lifecycle of this program:
if __name__ == "__main__":
# Init: Logging in & `not-a-bot` verification
init()
# Enters entery point once finished initialzing
L(entryPoint)()
# End: Cleaning up logs & closes connection.Exits with code 0
end()
|
train.py
|
import pdb
import os
import torch
import torch.distributed as dist
from math import ceil
from random import Random
from torch.multiprocessing import Process
from torch.autograd import Variable
from torchvision import datasets, transforms
class Partition(object):
""" Dataset-like object, but only access a subset of it. """
def __init__(self, data, index):
self.data = data
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, index):
data_idx = self.index[index]
return self.data[data_idx]
class DataPartitioner(object):
""" Partitions a dataset into different chuncks. """
def __init__(self, data, sizes=[0.7, 0.2, 0.1], seed=1234):
self.data = data
self.partitions = []
rng = Random()
rng.seed(seed)
data_len = len(data)
indexes = [x for x in range(0, data_len)]
rng.shuffle(indexes)
for frac in sizes:
part_len = int(frac * data_len)
self.partitions.append(indexes[0:part_len])
indexes = indexes[part_len:]
def use(self, partition):
return Partition(self.data, self.partitions[partition])
class RingAllReduce(object):
def __init__(self, model, criterion, optimizer, dataset, epoch=100, addr='127.0.0.1', port='29500', backend='gloo'):
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.dataset = dataset
self.epoch = epoch
self.addr = addr
self.port = port
self.backend = backend
def partition_dataset(self):
size = dist.get_world_size()
bsz = 128 // size
partition_sizes = [1.0 / size for _ in range(size)]
partition = DataPartitioner(self.dataset, partition_sizes)
partition = partition.use(dist.get_rank())
train_set = torch.utils.data.DataLoader(
partition, batch_size=bsz, shuffle=True)
return train_set, bsz
def average_gradients(self):
""" Gradient averaging. """
size = float(dist.get_world_size())
for param in self.model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)
param.grad.data /= size
def run(self, rank, size):
""" Distributed Synchronous SGD Example """
torch.manual_seed(1234)
train_set, bsz = self.partition_dataset()
optimizer = self.optimizer
criterion = self.criterion
num_batches = ceil(len(train_set.dataset) / float(bsz))
for epoch in range(self.epoch):
epoch_loss = 0.0
for data, target in train_set:
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = self.model(data)
loss = criterion(output, target)
epoch_loss += loss.data.item()
loss.backward()
self.average_gradients()
optimizer.step()
print('Rank ',
dist.get_rank(), ', epoch ', epoch, ': ',
epoch_loss / num_batches)
def init_processes(self, rank, size, fn):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = self.addr
os.environ['MASTER_PORT'] = self.port
dist.init_process_group(self.backend, rank=rank, world_size=size)
fn(rank, size)
def train(self, size=2):
processes = []
for rank in range(size):
p = Process(target=self.init_processes, args=(rank, size, self.run))
p.start()
processes.append(p)
for p in processes:
p.join()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
from binascii import hexlify
from os import urandom
import json
import ssl
import sys
import OpenSSL.crypto
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource,
SkuDescription, SslState, HostNameBinding, NameValuePair,
BackupRequest, DatabaseBackupSetting, BackupSchedule,
RestoreRequest, FrequencyUnit, Certificate, HostNameSslState,
RampUpRule, UnauthenticatedClientAction, ManagedServiceIdentity,
DeletedAppRestoreRequest, DefaultErrorResponseException)
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console
from azure.cli.core.util import open_page_in_browser
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, multicontainer_config_type=None, multicontainer_config_file=None,
tags=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = '8.11.1'
location = plan_info.location
site_config = SiteConfig(app_settings=[])
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags)
helper = _StackRuntimeHelper(client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](match, site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
for name_value in settings + slot_settings:
# split at the first '=', appsetting should not have '=' in the name
settings_name, value = name_value.split('=', 1)
app_settings.properties[settings_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
import requests
import os
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
requests.post(zip_url, data=zip_content, headers=headers)
# check the status of async deployment
response = _check_zip_deployment_status(deployment_status_url, authorization, timeout)
return response
def get_sku_name(tier): # pylint: disable=too-many-return-statements
tier = tier.upper()
if tier == 'F1' or tier == "FREE":
return 'FREE'
elif tier == 'D1' or tier == "SHARED":
return 'SHARED'
elif tier in ['B1', 'B2', 'B3', 'BASIC']:
return 'BASIC'
elif tier in ['S1', 'S2', 'S3']:
return 'STANDARD'
elif tier in ['P1', 'P2', 'P3']:
return 'PREMIUM'
elif tier in ['P1V2', 'P2V2', 'P3V2']:
return 'PREMIUMV2'
elif tier in ['PC2', 'PC3', 'PC4']:
return 'PremiumContainer'
else:
raise CLIError("Invalid sku(pricing tier), please refer to command help for valid values")
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
result = list(client.deleted_web_apps.list())
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
print(arg, values[arg])
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(client, linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
def update_site_configs(cmd, resource_group_name, name, slot=None,
linux_fx_version=None, windows_fx_version=None, php_version=None, python_version=None, # pylint: disable=unused-argument
net_framework_version=None, # pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument
use32_bit_worker_process=None, # pylint: disable=unused-argument
min_tls_version=None, # pylint: disable=unused-argument
http20_enabled=None, # pylint: disable=unused-argument
app_command_line=None, # pylint: disable=unused-argument
ftps_state=None): # pylint: disable=unused-argument
configs = get_site_configs(cmd, resource_group_name, name, slot)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings))
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
clone_from_prod = None
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
if configuration_source:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
result.name = result.name.split('/')[-1]
return result
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
else:
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
import time
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list())
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, sku='B1', number_of_workers=None,
location=None, tags=None):
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
sku = _normalize_sku(sku)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None,
admin_site_name=None):
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
instance.sku = sku_def
if admin_site_name is not None:
instance.admin_site_name = admin_site_name
return instance
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_request = BackupRequest(backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
from datetime import datetime
backup_name = '{0}_{1}'.format(webapp_name, datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(db_name, db_type, db_connection_string):
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
elif any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(frequency):
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _normalize_sku(sku):
sku = sku.upper()
if sku == 'FREE':
return 'F1'
elif sku == 'SHARED':
return 'D1'
return sku
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from azure.mgmt.resource import ResourceManagementClient
client = get_mgmt_service_client(cli_ctx, ResourceManagementClient)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
for profile in profiles:
if profile['publishMethod'] == 'MSDeploy':
scmUrl = profile['publishUrl'].replace(":443", "")
cd_url = 'https://' + profile['userName'] + ':' + profile['userPWD'] + '@' + scmUrl + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
break
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
url = ('https' if ssl_host else 'http') + '://' + url
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
elif action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
else: # reset
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_suffix = '.' + site.default_host_name.split('.', 1)[1]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
import time
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file):
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get')
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, cert_resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(cert_resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def _update_host_name_ssl_state(cli_ctx, resource_group_name, webapp_name, location,
host_name, ssl_state, thumbprint, slot=None):
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=location)
name = '{}({})'.format(webapp_name, slot) if slot else webapp_name
return _generic_site_operation(cli_ctx, resource_group_name, name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, client, linux=False):
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(stack, site_config):
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, runtime=None, consumption_plan_location=None,
deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
# for linux consumption plan app the os_type should be Linux & should have a runtime specified
# currently in other cases the runtime is ignored
if is_linux and not runtime:
raise CLIError("usage error: --runtime RUNTIME required for linux functions apps with consumption plan.")
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
if consumption_plan_location:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
else:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='beta'))
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
site_config.linux_fx_version = _format_fx_version('appsvc/azure-functions-runtime')
else:
functionapp_def.kind = 'functionapp'
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='8.11.1'))
if consumption_plan_location is None:
site_config.always_on = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully"
"created but is not active until content is published using"
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
return functionapp
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name.value
allowed_storage_types = ['Standard_GRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(deployment_status_url, authorization, timeout=None):
import requests
import time
total_trials = (int(timeout) // 30) if timeout else 10
for _num_trials in range(total_trials):
time.sleep(30)
response = requests.get(deployment_status_url, headers=authorization)
res_dict = response.json()
if res_dict.get('status', 0) == 5:
logger.warning("Zip deployment failed status %s", res_dict['status_text'])
break
elif res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
logger.warning("""Deployment is taking longer than expected. Please verify status at '%s'
beforing launching the app""", deployment_status_url)
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
|
load_html.py
|
import webview
import threading
"""
This example demonstrates how to load HTML in a web view window
"""
def load_html():
webview.load_html("<h1>This is dynamically loaded HTML</h1>")
if __name__ == '__main__':
t = threading.Thread(target=load_html)
t.start()
# Create a non-resizable webview window with 800x600 dimensions
webview.create_window("Simple browser", width=800, height=600, resizable=True)
|
chunk_chek.py
|
"""
Maintainer: Giovanni Lopez
Mail: giovannt_92@hotmail.com / gioipez92@gmail.com
Build a class to get Manifest, sub-manifest, video and
audio chunks
"""
import re
import os
import platform
import threading
from random import randint
from basic import (
# head_request,
get_request,
post_request,
load_streamnames_from_file
)
# Check random profile or disable and define
# the profile to check
# OPTIONS = (V_PROFILE, NUM_CHUNKS_TOB_CHECK, RANDOM_PROFILES, ASSET_LIST)
if platform.system() != 'Linux':
OPTIONS = (5, 10, False, "asset_list.yaml")
# ABR Server IP
ABR_MANIFEST_SERVER_IP = "localhost"
ABR_MANIFEST_SERVER_PORT = 8000
else:
V_PROFILE = int(os.getenv("V_PROFILE"))
NUM_CHUNKS_TOB_CHECK = int(os.getenv("NUM_CHUNKS_TOB_CHECK"))
RANDOM_PROFILES = bool(os.getenv("RANDOM_PROFILES"))
ASSET_LIST = os.getenv("ASSET_LIST")
OPTIONS = (V_PROFILE, NUM_CHUNKS_TOB_CHECK, RANDOM_PROFILES, ASSET_LIST)
ABR_MANIFEST_SERVER_IP = os.getenv("ABR_MANIFEST_SERVER_IP")
ABR_MANIFEST_SERVER_PORT = os.getenv("ABR_MANIFEST_SERVER_PORT")
# Load asset URLs
URLS = load_streamnames_from_file(OPTIONS[3])
ABR_HLS_PARSER = f"http://{ABR_MANIFEST_SERVER_IP}:{ABR_MANIFEST_SERVER_PORT}/hlsmanifest/"
class Chunk:
"""Chunk to be check
This object has a basic HTTP caracteristic
of an HTTP Chunk, could be video, audio or
subtitle
"""
def __init__(self, chunk_url):
self.chunk_url = chunk_url
def get_http_chunk_info(self):
"""To avoid traffic overload on the
network, a simple head validation will
be execute
"""
return get_request(self.chunk_url)
def get_parsed_manifest(manifest_url):
"""
Get data from hlsparser, developed by Gio
"""
manifest_data = {"manifest_url": manifest_url}
return post_request(ABR_HLS_PARSER, body=manifest_data)
def channel_check(manifest_url):
"""
Manifest parser request
"""
parsed_manifest = get_parsed_manifest(manifest_url)
if parsed_manifest:
json_manifest = parsed_manifest.json()
sub_manifests = json_manifest["sub_manifest"]
vid_prof_num = len(sub_manifests["video"])
# au_prof_num = len(sub_manifests["audio"])
# sub_prof_num = len(sub_manifests["subtitles"])
if OPTIONS[0] >= vid_prof_num:
v_prof = vid_prof_num - 1
else:
v_prof = OPTIONS[0]
if OPTIONS[2]:
v_prof = randint(0, vid_prof_num)
if f"sub_manifest_{v_prof}" in sub_manifests["video"].keys():
selected_profile = sub_manifests["video"][f"sub_manifest_{v_prof}"]
chunk_base_url = list(
filter(None, re.split(r"/\w*.m3u8(.*?)", selected_profile)))
chunks = json_manifest["asset_chunks"]
if len(chunks["video"]) > OPTIONS[1]:
for item in range(
len(chunks["video"]) - 2,
(len(chunks["video"]) - 2 - int(OPTIONS[1])), - 1):
chunk = Chunk(f'{chunk_base_url[0]}/{chunks["video"][str(item)]}')
chunk.get_http_chunk_info()
# if chunk_headers:
# print(chunk_headers.headers)
def recursion_channel_check(manifest_urls):
"""
Given a list of URLs, do a thread execution by URL, amazing fast
"""
if len(manifest_urls) == 1:
my_thread = threading.Thread(target=channel_check, args=(manifest_urls[0],))
my_thread.start()
else:
mid1 = manifest_urls[:len(manifest_urls)//2]
mid2 = manifest_urls[len(manifest_urls)//2:]
recursion_channel_check(mid1)
recursion_channel_check(mid2)
def main():
"""
Main executing function
"""
recursion_channel_check(URLS)
if __name__ == '__main__':
main()
|
HiwinRA605_socket_ros_test_20190625191532.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = int('%s'%req.Speedmode)
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
if Arm_feedback == 0:
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.