repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_exp_eval.py | """Test functions in imgx.exp.eval."""
import chex
import jax
import numpy as np
from chex._src import fake
from imgx.exp.eval import (
get_jit_segmentation_metrics,
get_non_jit_segmentation_metrics,
)
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestGetSegmentationMetrics(chex.TestCase):
"""Test get_segmentation_metrics."""
batch = 2
num_classes = 3
spatial_shape = (4, 5, 6)
spacing = np.array((0.2, 0.5, 1.0))
mask_shape = (batch, *spatial_shape, num_classes)
@chex.all_variants
def test_jit_shapes(self) -> None:
"""Test shapes."""
key = jax.random.PRNGKey(0)
key_pred, key_true = jax.random.split(key)
mask_pred = jax.random.uniform(key_pred, shape=self.mask_shape)
mask_true = jax.random.uniform(key_true, shape=self.mask_shape)
got = self.variant(get_jit_segmentation_metrics)(
mask_pred, mask_true, self.spacing
)
for _, v in got.items():
chex.assert_shape(v, (self.batch,))
@chex.variants(without_jit=True, with_device=True, without_device=True)
def test_nonjit_shapes(self) -> None:
"""Test shapes."""
key = jax.random.PRNGKey(0)
key_pred, key_true = jax.random.split(key)
mask_pred = jax.random.uniform(key_pred, shape=self.mask_shape)
mask_true = jax.random.uniform(key_true, shape=self.mask_shape)
got = self.variant(get_non_jit_segmentation_metrics)(
mask_pred, mask_true, self.spacing
)
for _, v in got.items():
chex.assert_shape(v, (self.batch,))
| 1,741 | 29.561404 | 75 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/unit/test_model_unet_3d.py | """Test Unet related classes and functions."""
from typing import Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from absl.testing import parameterized
from chex._src import fake
from imgx.model import Unet3d, Unet3dSlice
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestUnet3d(chex.TestCase):
"""Test the class Unet3d and Unet3dSlice."""
batch_size = 2
in_channels = 1
out_channels = 2
@parameterized.product(
(
{
"in_shape": (15, 16, 17),
"kernel_size": 3,
"scale_factor": 2,
},
{
"in_shape": (13, 14, 15),
"kernel_size": 5,
"scale_factor": 1,
},
{
"in_shape": (29, 30, 31),
"kernel_size": 5,
"scale_factor": 2,
},
{
"in_shape": (53, 54, 55),
"kernel_size": 5,
"scale_factor": 3,
},
),
model_cls=[Unet3d, Unet3dSlice],
)
def test_output_shape(
self,
in_shape: Tuple[int, int, int],
kernel_size: int,
scale_factor: int,
model_cls: hk.Module,
) -> None:
"""Test output shape.
Args:
in_shape: input shape
scale_factor: convolution stride for down-sampling/up-sampling.
kernel_size: convolution kernel size, the value(s) should be odd.
model_cls: model to be tested.
"""
channels = (2, 4, 2)
@hk.testing.transform_and_run()
def forward(
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
)
return net(x)
rng = jax.random.PRNGKey(0)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
out = forward(dummy_image)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
@chex.all_variants
@parameterized.named_parameters(
("Unet3d", Unet3d),
(
"Unet3dSlice",
Unet3dSlice,
),
)
def test_output_shape_variants(
self,
model_cls: hk.Module,
) -> None:
"""Test UNet3D output shape under different device variants.
Args:
model_cls: model to be tested.
"""
kernel_size = 3
scale_factor = 2
in_shape = (14, 15, 16)
channels = (2, 4)
@hk.testing.transform_and_run(jax_transform=self.variant)
def forward(
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
)
return net(x)
rng = jax.random.PRNGKey(0)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
out = forward(dummy_image)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
@parameterized.named_parameters(
("Unet3d", Unet3d),
(
"Unet3dSlice",
Unet3dSlice,
),
)
def test_output_real_shape(
self,
model_cls: hk.Module,
) -> None:
"""Test UNet3D output shape with real setting.
Args:
model_cls: model to be tested.
"""
kernel_size = 3
scale_factor = 2
in_shape = (256, 256, 48)
channels = (2, 2, 2, 2)
@hk.testing.transform_and_run()
def forward(
x: jnp.ndarray,
) -> jnp.ndarray:
"""Forward function of Unet.
Args:
x: input.
Returns:
Network prediction.
"""
net = model_cls(
in_shape=in_shape,
in_channels=self.in_channels,
out_channels=self.out_channels,
num_channels=channels,
kernel_size=kernel_size,
scale_factor=scale_factor,
)
return net(x)
rng = jax.random.PRNGKey(0)
dummy_image = jax.random.uniform(
rng, shape=(self.batch_size, *in_shape, self.in_channels)
)
out = forward(dummy_image)
chex.assert_shape(out, (self.batch_size, *in_shape, self.out_channels))
| 5,355 | 25.646766 | 79 | py |
ImgX-DiffSeg | ImgX-DiffSeg-main/tests/data/test_dataset_iterator.py | """Test image data iterators."""
from typing import Tuple
import chex
import haiku as hk
import jax
import numpy as np
import SimpleITK as sitk # noqa: N813
from absl.testing import parameterized
from chex._src import fake
from omegaconf import DictConfig
from imgx import IMAGE, LABEL, UID
from imgx.datasets import (
AMOS_CT,
DIR_TFDS_PROCESSED_MAP,
IMAGE_SHAPE_MAP,
MALE_PELVIC_MR,
NUM_CLASSES_MAP,
Dataset,
)
from imgx.datasets.iterator import get_image_tfds_dataset
# Set `FLAGS.chex_n_cpu_devices` CPU devices for all tests.
def setUpModule() -> None: # pylint: disable=invalid-name
"""Fake multi-devices."""
fake.set_n_cpu_devices(2)
class TestImageIterator(chex.TestCase):
"""Test image iterators."""
@chex.variants(without_jit=True, with_device=True, without_device=True)
@parameterized.named_parameters(
("AMOS CT", AMOS_CT),
("Male Pelvic MR", MALE_PELVIC_MR),
)
def test_output_shape_variants(
self,
dataset_name: str,
) -> None:
"""Test iterator output shape under different device variants.
Dataset num_valid_steps is tested too.
Args:
dataset_name: dataset name.
"""
num_devices_per_replica = jax.local_device_count()
batch_size = 2
batch_size_per_replica = 1
max_num_samples = 3
image_shape = IMAGE_SHAPE_MAP[dataset_name]
config = DictConfig(
{
"seed": 0,
"training": {
"num_devices_per_replica": 1,
"batch_size": batch_size,
"batch_size_per_replica": batch_size_per_replica,
"mixed_precision": {
"use": False,
},
},
"data": {
"max_num_samples": max_num_samples,
dataset_name: {
"data_augmentation": {
"max_rotation": [0.088, 0.088, 0.088],
"max_translation": [20, 20, 4],
"max_scaling": [0.15, 0.15, 0.15],
},
},
},
}
)
@hk.testing.transform_and_run(jax_transform=self.variant)
def get_batch() -> (
Tuple[Dataset, chex.ArrayTree, chex.ArrayTree, chex.ArrayTree]
):
"""Get one batch for iterator.
Returns:
Training batch
Validation batch
Test batch.
"""
ds = get_image_tfds_dataset(
dataset_name=dataset_name,
config=config,
)
return (
ds,
next(ds.train_iter),
next(ds.valid_iter),
next(ds.test_iter),
)
dataset, train_batch, valid_batch, test_batch = get_batch()
assert dataset.num_valid_steps == int(
np.ceil(max_num_samples / batch_size)
)
for i, batch in enumerate([train_batch, valid_batch, test_batch]):
chex.assert_shape(
batch[IMAGE],
(
num_devices_per_replica,
batch_size // num_devices_per_replica,
*image_shape,
),
)
chex.assert_shape(
batch[LABEL],
(
num_devices_per_replica,
batch_size // num_devices_per_replica,
*image_shape,
),
)
if i == 0:
assert UID not in batch
else:
chex.assert_shape(
batch[UID],
(
num_devices_per_replica,
batch_size // num_devices_per_replica,
),
)
class TestImageShape(chex.TestCase):
"""Test the data loader shapes."""
@parameterized.named_parameters(
("AMOS CT", AMOS_CT),
("Male Pelvic MR", MALE_PELVIC_MR),
)
def test_shape(
self,
dataset_name: str,
) -> None:
"""Test the data loader shapes.
Args:
dataset_name: dataset name.
"""
image_shape = IMAGE_SHAPE_MAP[dataset_name]
num_devices_per_replica = jax.local_device_count()
batch_size = 2
batch_size_per_replica = 1
assert batch_size % num_devices_per_replica == 0
config = DictConfig(
{
"seed": 0,
"training": {
"num_devices_per_replica": 1,
"batch_size": batch_size,
"batch_size_per_replica": batch_size_per_replica,
"mixed_precision": {
"use": False,
},
},
"data": {
"max_num_samples": 4,
"dataset_name": {
"data_augmentation": {
"max_rotation": [0.088, 0.088, 0.088],
"max_translation": [20, 20, 4],
"max_scaling": [0.15, 0.15, 0.15],
},
},
},
}
)
dataset = get_image_tfds_dataset(
dataset_name,
config,
)
batch_size_per_replica = batch_size // num_devices_per_replica
for it in [dataset.train_iter, dataset.valid_iter, dataset.test_iter]:
batch = next(it)
chex.assert_shape(
batch[IMAGE],
(
num_devices_per_replica,
batch_size_per_replica,
*image_shape,
),
)
chex.assert_shape(
batch[LABEL],
(
num_devices_per_replica,
batch_size_per_replica,
*image_shape,
),
)
# in AMOS not all images have all labels, even without resampling
@parameterized.named_parameters(
("Male Pelvic MR", MALE_PELVIC_MR),
)
def test_labels(
self,
dataset_name: str,
) -> None:
"""Test all mask labels have all classes.
Args:
dataset_name: dataset name.
"""
mask_paths = list(
DIR_TFDS_PROCESSED_MAP[dataset_name].glob(
"*_mask_preprocessed.nii.gz"
)
)
err_paths = []
for path in mask_paths:
volume = sitk.ReadImage(path)
arr = sitk.GetArrayFromImage(volume)
if np.unique(arr).size != NUM_CLASSES_MAP[dataset_name]:
err_paths.append(path.name)
if len(err_paths) > 0:
raise ValueError(
f"{err_paths} have less than {NUM_CLASSES_MAP[dataset_name]} "
f"classes including background."
)
| 7,123 | 29.444444 | 78 | py |
PIRA | PIRA-master/pira.py | """
File: pira.py
License: Part of the PIRA project. Licensed under BSD 3 clause license.
See LICENSE.txt file at https://github.com/tudasc/pira
Description: This is PIRA.
"""
__version__ = '0.5.0'
import argparse
import lib.Logging as log
import lib.Pira as pira
import lib.Utility as U
"""
Pira Main
This file contains the main entry point for the Pira framework.
Options are defined here and then passed to the Pira class.
"""
parser = argparse.ArgumentParser(prog='PIRA')
# --- Required arguments section
parser.add_argument('config', help='The configuration json file.')
# -- Pira folder option
pira_dir = U.get_default_pira_dir()
parser.add_argument('--pira-dir',
help='The directory which stores PIRA runtime files',
type=str,
default=pira_dir)
# --- Pira "mode" options
parser.add_argument('--config-version',
help='Which config file version to use',
choices=[1, 2],
default=2,
type=int)
parser.add_argument('--runtime-filter',
help='Use run-time filtering',
default=False,
action='store_true')
parser.add_argument('--iterations', help='Number of Pira iterations', default=3, type=int)
parser.add_argument('--repetitions', help='Number of measurement repetitions', default=3, type=int)
parser.add_argument(
'--analysis-parameters',
help='Path to json file containing analysis parameters. (required for Extra-P and LIDe mode',
default='')
# --- Pira debug options
parser.add_argument('--tape', help='Path to tape file to dump.')
# --- Pira modeling options
group = parser.add_argument_group('Extra-P Options')
group.add_argument('--extrap-dir',
help='The base directory where extra-p folder structure is placed',
type=str,
default='')
group.add_argument('--extrap-prefix', help='The prefix in extra-p naming scheme', type=str)
# CSV Export options
csv_group = parser.add_argument_group('CSV Export Options')
csv_group.add_argument('--csv-dir',
help='Export runtime measurements as CSV files to the specified directory',
type=str,
default='')
csv_group.add_argument(
'--csv-dialect',
help=
'The dialect the CSV file is written in. Possible values: excel, excel_tab, unix; defaults to unix',
type=str,
default='unix')
# Experimental options - even for research software they are experimental
experimental_group = parser.add_argument_group(
'Experimental Options - experimental even for research software')
experimental_group.add_argument(
'--call-site-instrumentation',
help='Enable call-site instrumentation. (May not work with current Score-P version',
default=False,
type=bool)
experimental_group.add_argument('--hybrid-filter-iters',
help='Do compiletime-filtering after x iterations',
default=0,
type=int)
experimental_group.add_argument('--export',
help='Export performance models to IPCG file.',
default=False,
action='store_true')
experimental_group.add_argument('--export-runtime-only',
help='Export only runtime data used for extra-p modeling',
default=False,
action='store_true')
experimental_group.add_argument('--lide',
help='Enable load imbalance detection',
action='store_true')
# --- Pira slurm option
experimental_group.add_argument('--slurm-config',
help='Path to the slurm configuration file',
type=str,
default=None)
# -- General Info
parser.add_argument('--version',
help='Shows the version of this PIRA installation',
action='version',
version='%(prog)s ' + __version__)
# ====== Start of Pira program ====== #
args = parser.parse_args()
try:
log.get_logger().log('Starting', level='debug')
pira.main(args)
finally:
if args.tape is not None:
log.get_logger().dump_tape(args.tape)
else:
log.get_logger().dump_tape('tape.tp')
log.get_logger().log('End of process')
log.get_logger().show_perf()
| 4,543 | 35.645161 | 104 | py |
PIRA | PIRA-master/test/unit/ConfigLoaderNewTest.py | """
File: ConfigLoaderNewTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the ConfigurationLoader module.
"""
import lib.Logging as L
from lib.BatchSystemBackends import BatchSystemBackendType, SlurmInterfaces, BatchSystemTimingType, SlurmBackend
from lib.ConfigurationLoader import ConfigurationLoader, SimplifiedConfigurationLoader, BatchSystemConfigurationLoader
from lib.Configuration import PiraConfigAdapter, PiraConfigII, InvocationConfig, BatchSystemHardwareConfig, SlurmConfig
import os
import unittest
logger = L.get_logger()
logger.set_state('debug')
logger.set_state('info')
logger.set_state('warn')
# These assertable values are "old" and refer to the config version 1.0
dep_aw_items = ['item01', 'item02']
dep_aw_builds = ['/home/something/top_dir']
dep_aw_flavors = {'item01': ['local-flav1', 'vanilla'], 'item02': ['local-flav1']}
dep_aw_ins_anal = {
'item01': ['/ins_anal/directory/for/functors', '/where/to/put/cube/files', '/path/to/analysis/tool'],
'item02': ['/item02/path/to/functors', '/shared/work/cubes', '/directory/where_to/find/analyzer']
}
dep_aw_builders = {'item01': '/builder/item01/directory', 'item02': '/another/builder/directory/for/item02'}
dep_aw_run = {
'item01': {
'args': [],
'runner': '/path/to/runner_functors/item01',
'submitter': '/another/path/to/item01/submitter',
'batch_script': '/some/madeup/script.sh'
},
'item02': {
'args': ['-i 200', '-g 100'],
'runner': '/item02/runner/functors.dir',
'submitter': '',
'batch_script': ''
}
}
class TestConfigLoader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.loader = ConfigurationLoader()
InvocationConfig.create_from_kwargs({'config' : './input/unit_input_001.json'})
def test_load_conf_not_none(self):
cfg = self.loader.load_conf()
self.assertIsNotNone(cfg)
def test_conf_builds(self):
cfg = self.loader.load_conf()
bs = cfg.get_builds()
self.assertIsNotNone(bs)
for b in bs:
self.assertIn(b, dep_aw_builds)
def test_conf_items(self):
cfg = self.loader.load_conf()
for b in cfg.get_builds():
itms = cfg.get_items(b)
self.assertIsNotNone(itms)
for i in itms:
self.assertIn(i, dep_aw_items)
def test_config_item01(self):
cfg = self.loader.load_conf()
# As the dicts are unordered, we set the keys manually!
b = '/home/something/top_dir'
i_01 = 'item01'
builder = cfg.get_flavor_func(b, i_01)
self.assertIsNotNone(builder)
self.assertIn(builder, dep_aw_builders[i_01])
expected_item = 'item01'
anal_func_dir = cfg.get_analyze_func(b, i_01)
self.assertEqual(anal_func_dir, dep_aw_ins_anal[expected_item][0])
cube_path = cfg.get_analyzer_exp_dir(b, i_01)
self.assertEqual(cube_path, dep_aw_ins_anal[expected_item][1])
tool_path = cfg.get_analyzer_dir(b, i_01)
self.assertEqual(tool_path, dep_aw_ins_anal[expected_item][2])
runner = cfg.get_runner_func(b, i_01)
self.assertEqual(runner, dep_aw_run[expected_item]['runner'])
submitter = cfg.get_submitter_func(b, i_01)
self.assertEqual(submitter, dep_aw_run[expected_item]['submitter'])
bs = cfg.get_batch_script_func(b, i_01)
self.assertEqual(bs, dep_aw_run[expected_item]['batch_script'])
flvs = cfg.get_flavors(b, i_01)
self.assertEqual(flvs, dep_aw_flavors[expected_item])
args = cfg.get_args(b, i_01)
self.assertListEqual(args[0], dep_aw_run[expected_item]['args'])
def test_config_item02(self):
cfg = self.loader.load_conf()
# As the dicts are unordered, we set the keys manually!
b = '/home/something/top_dir'
i_02 = 'item02'
builder = cfg.get_flavor_func(b, i_02)
self.assertIsNotNone(builder)
self.assertIn(builder, dep_aw_builders[i_02])
expected_item = 'item02'
anal_func_dir = cfg.get_analyze_func(b, i_02)
self.assertEqual(anal_func_dir, dep_aw_ins_anal[expected_item][0])
cube_path = cfg.get_analyzer_exp_dir(b, i_02)
self.assertEqual(cube_path, dep_aw_ins_anal[expected_item][1])
tool_path = cfg.get_analyzer_dir(b, i_02)
self.assertEqual(tool_path, dep_aw_ins_anal[expected_item][2])
runner = cfg.get_runner_func(b, i_02)
self.assertEqual(runner, dep_aw_run[expected_item]['runner'])
submitter = cfg.get_submitter_func(b, i_02)
self.assertEqual(submitter, dep_aw_run[expected_item]['submitter'])
bs = cfg.get_batch_script_func(b, i_02)
self.assertEqual(bs, dep_aw_run[expected_item]['batch_script'])
flvs = cfg.get_flavors(b, i_02)
self.assertEqual(flvs, dep_aw_flavors[expected_item])
args = cfg.get_args(b, i_02)
self.assertListEqual(args[0], dep_aw_run[expected_item]['args'])
self.assertFalse(cfg.is_submitter(b, i_02))
@unittest.skip('Global flavors are currently not implemented')
def test_global_flavors(self):
# TODO implement tests for retrieving global flavors
self.assertFalse('Need to check global flavors')
@unittest.skip('Global flavors are currently not implemented')
def test_generated_items(self):
# TODO build_directory, cube_directory, analyzer_dir, ..?
self.assertFalse('Need to check global flavors')
n_functor_path = {'item01': '/directory/for/functors/item01', 'item02': '/directory/for/functors/item02'}
n_analysis_path = '/path/to/analysis/tool'
n_cube_path = {'item01': '/where/to/put/cube/files', 'item02': '/where/to/put/cube/files/item02'}
n_flavors = {'item01': ['local-flav1', 'vanilla'], 'item02': ['test-flav']}
class TestSimplifiedConfigLoader(unittest.TestCase):
#@classmethod
def setUp(self):
self.loader = SimplifiedConfigurationLoader()
InvocationConfig.create_from_kwargs({'config' : './input/unit_input_002.json'})
def test_load_conf_not_none(self):
cfg = self.loader.load_conf()
self.assertIsNotNone(cfg)
def test_conf_builds(self):
cfg = self.loader.load_conf()
bs = cfg.get_builds()
self.assertIsNotNone(bs)
for b in bs:
self.assertIn(b, ['/this/is/my/home'])
def test_conf_items(self):
cfg = self.loader.load_conf()
for b in cfg.get_builds():
itms = cfg.get_items(b)
self.assertIsNotNone(itms)
for i in itms:
self.assertIn(i, dep_aw_items)
def test_config_item01(self):
cfg = self.loader.load_conf()
# As the dicts are unordered, we set the keys manually!
b = '/this/is/my/home'
i_01 = 'item01'
expected_item = 'item01'
anal_func_dir = cfg.get_analyzer_path(b, i_01)
self.assertEqual(anal_func_dir, n_functor_path[expected_item])
cube_path = cfg.get_analyzer_exp_dir(b, i_01)
self.assertEqual(cube_path, n_cube_path[expected_item])
tool_path = cfg.get_analyzer_dir(b, i_01)
self.assertEqual(tool_path, n_analysis_path)
runner = cfg.get_runner_func(b, i_01)
self.assertEqual(runner, n_functor_path[expected_item])
flvs = cfg.get_flavors(b, i_01)
self.assertEqual(flvs, n_flavors[expected_item])
args = cfg.get_args(b, i_01)
# FIXME correct asserted args
self.assertListEqual(args, [('param1', 'val1', 'param2', 'val3'), ('param1', 'val1', 'param2', 'val4'),
('param1', 'val2', 'param2', 'val3'), ('param1', 'val2', 'param2', 'val4'),
('param2', 'val3', 'param1', 'val1'), ('param2', 'val3', 'param1', 'val2'),
('param2', 'val4', 'param1', 'val1'), ('param2', 'val4', 'param1', 'val2')])
def test_config_item02(self):
cfg = self.loader.load_conf()
# As the dicts are unordered, we set the keys manually!
b = '/this/is/my/home'
i_02 = 'item02'
builder = cfg.get_builder_path(b, i_02)
self.assertIsNotNone(builder)
self.assertIn(builder, n_functor_path[i_02])
expected_item = 'item02'
anal_func_dir = cfg.get_analyzer_path(b, i_02)
self.assertEqual(anal_func_dir, n_functor_path[expected_item])
cube_path = cfg.get_analyzer_exp_dir(b, i_02)
self.assertEqual(cube_path, n_cube_path[expected_item])
tool_path = cfg.get_analyzer_dir(b, i_02)
self.assertEqual(tool_path, n_analysis_path)
runner = cfg.get_runner_func(b, i_02)
self.assertEqual(runner, n_functor_path[expected_item])
flvs = cfg.get_flavors(b, i_02)
self.assertEqual(flvs, n_flavors[expected_item])
args = cfg.get_args(b, i_02)
# FIXME correct asserted args
self.assertListEqual([tuple(x) for x in args], [('param1', 'val1'), ('param1', 'val2'), ('param1', 'val3')])
def test_config_linear_mapper(self):
InvocationConfig.create_from_kwargs({'config' : './input/unit_input_003.json'})
cfg = self.loader.load_conf()
# As the dicts are unordered, we set the keys manually!
b = '/this/is/my/home'
i_01 = 'item01'
self.assertIsNotNone(cfg)
builder = cfg.get_builder_path(b, i_01)
self.assertIsNotNone(builder)
self.assertIn(builder, n_functor_path[i_01])
expected_item = 'item01'
anal_func_dir = cfg.get_analyzer_path(b, i_01)
self.assertEqual(anal_func_dir, n_functor_path[expected_item])
cube_path = cfg.get_analyzer_exp_dir(b, i_01)
self.assertEqual(cube_path, '/where/to/put/cube/files/item01')
tool_path = cfg.get_analyzer_dir(b, i_01)
self.assertEqual(tool_path, n_analysis_path)
runner = cfg.get_runner_func(b, i_01)
self.assertEqual(runner, n_functor_path[expected_item])
flvs = cfg.get_flavors(b, i_01)
self.assertEqual(flvs, ['test'])
args = cfg.get_args(b, i_01)
# FIXME correct asserted args
expected = [ ('param1', 'val1', [], 'param2', 'yval1', []),
('param1', 'val2', [], 'param2', 'yval2', []),
('param1', 'val3', [], 'param2', 'yval3', []) ]
#self.assertListEqual(args, [('param1', 'val1', 'param2', 'yval1'), ('param1', 'val2', 'param2', 'yval2'),
# ('param1', 'val3', 'param2', 'yval3')])
for (exp, arg) in zip(expected, args):
self.assertEqual(exp, tuple(arg))
def test_basic_config_001(self):
InvocationConfig.create_from_kwargs({'config' : '../inputs/configs/basic_config_001.json'})
cfg = self.loader.load_conf()
self.assertIsNotNone(cfg)
self.assertFalse(cfg.is_empty())
def test_relative_paths(self):
InvocationConfig.create_from_kwargs({'config' : '../inputs/configs/basic_config_005.json'})
cfg = self.loader.load_conf()
self.assertFalse(cfg.is_empty())
self.assertTrue(isinstance(cfg, PiraConfigAdapter))
self.assertTrue(isinstance(cfg.get_adapted(), PiraConfigII))
def test_env_var_expansion(self):
expected_base = '/base'
expected_analyzer = '/analyzer_dir'
expected_cubes = '/cubes_dir'
expected_functors = '/functors_dir'
os.environ['PIRA_TEST_ENV_VAR_BASE'] = expected_base
os.environ['PIRA_TEST_ENV_VAR_ANALYZER'] = expected_analyzer
os.environ['PIRA_TEST_ENV_VAR_CUBES'] = expected_cubes
os.environ['PIRA_TEST_ENV_VAR_FUNCTORS'] = expected_functors
b = expected_base
i = 'test_item'
InvocationConfig.create_from_kwargs({'config' : '../inputs/configs/basic_config_006.json'})
cfg = self.loader.load_conf()
self.assertIsNotNone(cfg)
self.assertFalse(cfg.is_empty())
base_dir = cfg.get_place(b)
self.assertEqual(base_dir, expected_base)
analyzer_dir = cfg.get_analyzer_dir(b, i)
self.assertEqual(analyzer_dir, expected_analyzer)
cubes_dir = cfg.get_analyzer_exp_dir(b, i)
self.assertEqual(cubes_dir, expected_cubes)
functor_dir = cfg.get_cleaner_path(b, i)
self.assertEqual(functor_dir, expected_functors)
class BatchSystemConfigLoaderTest(unittest.TestCase):
"""
Test for the BatchSystemConfigurationLoader.
"""
def test_get_config1(self):
"""
Test the get_config method
"""
InvocationConfig.create_from_kwargs(
{"slurm-config": "../inputs/configs/batchsystem_config_001.json"})
invoc_conf = InvocationConfig.get_instance()
cl = BatchSystemConfigurationLoader(invoc_conf)
cfg: SlurmConfig = cl.get_config()
# compare everything to the file content
self.assertListEqual(cfg.modules, [
{
"name": "gcc",
"version": "8.5"
},
{
"name": "cmake",
"version": "8.5"
},
{
"name": "llvm",
"version": "10.0.0",
"depends-on": [
{
"name": "gcc",
"version": "8.5"
}
]
},
{
"name": "openmpi",
"version": "4.0.5",
"depends-on": [
{
"name": "gcc",
"version": "8.5"
}
]
},
{
"name": "python",
"version": "3.9.5",
"depends-on": [
{
"name": "openmpi",
"version": "4.0.5"
}
]
},
{
"name": "qt",
"version": "5.13.2",
"depends-on": [
{
"name": "python",
"version": "3.9.5"
}
]
}
])
self.assertEqual(cfg.time_str, "00:10:00")
self.assertEqual(cfg.mem_per_cpu, 3800)
self.assertEqual(cfg.number_of_tasks, 1)
self.assertEqual(cfg.partition, None)
self.assertEqual(cfg.reservation, None)
self.assertEqual(cfg.account, "project01823")
self.assertEqual(cfg.number_of_cores_per_task, 96)
def test_get_batch_interface1(self):
"""
Test for get_batch_interface
"""
InvocationConfig.create_from_kwargs(
{"slurm-config": "../inputs/configs/batchsystem_config_001.json"})
invoc_conf = InvocationConfig.get_instance()
cl = BatchSystemConfigurationLoader(invoc_conf)
cl.get_config()
bi = cl.get_batch_interface()
self.assertEqual(BatchSystemBackendType.SLURM, bi.backend)
self.assertEqual(SlurmInterfaces.PYSLURM, bi.interface)
self.assertEqual(BatchSystemTimingType.SUBPROCESS, bi.timing_type)
self.assertTrue(isinstance(bi, SlurmBackend))
def test_load_config2(self):
"""
Test with config 002. This is a minimal config.
"""
InvocationConfig.create_from_kwargs(
{"slurm-config": "../inputs/configs/batchsystem_config_002.json"})
invoc_conf = InvocationConfig.get_instance()
cl = BatchSystemConfigurationLoader(invoc_conf)
cfg: SlurmConfig = cl.get_config()
self.assertEqual(cfg.time_str, "00:10:00")
self.assertEqual(cfg.mem_per_cpu, 3800)
self.assertEqual(cfg.number_of_tasks, 1)
self.assertEqual(cfg.force_sequential, True)
def test_get_batch_interface2(self):
"""
Test with config 002. This tests if the defaults are correctly in place:
Even though it is not given in the config file, it should be returned here.
"""
InvocationConfig.create_from_kwargs(
{"slurm-config": "../inputs/configs/batchsystem_config_002.json"})
invoc_conf = InvocationConfig.get_instance()
cl = BatchSystemConfigurationLoader(invoc_conf)
cl.get_config()
bi = cl.get_batch_interface()
self.assertEqual(bi.backend, BatchSystemBackendType.SLURM)
self.assertEqual(bi.interface, SlurmInterfaces.PYSLURM)
self.assertEqual(bi.timing_type, BatchSystemTimingType.SUBPROCESS)
def test_get_config3(self):
"""
Uses the config 003, to test if missing specifications lead to errors.
"""
InvocationConfig.create_from_kwargs(
{"slurm-config": "../inputs/configs/batchsystem_config_003.json"})
invoc_conf = InvocationConfig.get_instance()
cl = BatchSystemConfigurationLoader(invoc_conf)
# 'batch-settings/number_of_tasks' option not found but mandatory.
self.assertRaises(RuntimeError, cl.get_config)
def test_get_config4(self):
"""
Uses the config 004, to test if missing specifications lead to errors.
"""
InvocationConfig.create_from_kwargs(
{"slurm-config": "../inputs/configs/batchsystem_config_004.json"})
invoc_conf = InvocationConfig.get_instance()
cl = BatchSystemConfigurationLoader(invoc_conf)
# 'batch-settings/mem_per_cpu' option not found but mandatory.
self.assertRaises(RuntimeError, cl.get_config)
def test_get_config5(self):
"""
Uses the config 005, to test if missing specifications lead to errors.
"""
InvocationConfig.create_from_kwargs(
{"slurm-config": "../inputs/configs/batchsystem_config_005.json"})
invoc_conf = InvocationConfig.get_instance()
cl = BatchSystemConfigurationLoader(invoc_conf)
# 'batch-settings/time_str' option not found but mandatory.
self.assertRaises(RuntimeError, cl.get_config)
def test_get_config6(self):
"""
Uses the config 006, to test if missing specifications lead to errors.
"""
InvocationConfig.create_from_kwargs(
{"slurm-config": "../inputs/configs/batchsystem_config_006.json"})
invoc_conf = InvocationConfig.get_instance()
cl = BatchSystemConfigurationLoader(invoc_conf)
# 'module-loads' Every module have to have a name.
self.assertRaises(RuntimeError, cl.get_config)
def test_get_config7(self):
"""
Uses the config 007, to test if missing specifications lead to errors.
"""
InvocationConfig.create_from_kwargs(
{"slurm-config": "../inputs/configs/batchsystem_config_007.json"})
invoc_conf = InvocationConfig.get_instance()
cl = BatchSystemConfigurationLoader(invoc_conf)
# 'module-loads': Every module dependency have to have a name (in module llvm).
self.assertRaises(RuntimeError, cl.get_config)
if __name__ == '__main__':
unittest.main()
| 17,598 | 34.553535 | 126 | py |
PIRA | PIRA-master/test/unit/MeasurementTest.py | """
File: MeasurementTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the argument mapping
"""
import shutil
import os
import unittest
import lib.Measurement as M
import lib.ConfigurationLoader as C
import lib.DefaultFlags as D
from lib.Configuration import PiraConfig, TargetConfig, InstrumentConfig, InvocationConfig
import lib.Utility as U
class TestRunResult(unittest.TestCase):
"""
Tests the RunResult class for its math
"""
def test_empty_init(self):
rr = M.RunResult()
self.assertFalse(rr.is_multi_value())
self.assertRaises(RuntimeError, rr.get_average)
self.assertRaises(RuntimeError, rr.compute_overhead, M.RunResult())
def test_single_value(self):
rr = M.RunResult(4.0, 1)
rr2 = M.RunResult(2.0, 1)
self.assertFalse(rr.is_multi_value())
self.assertEqual(rr.get_average(), 4.0)
self.assertEqual(rr.compute_overhead(rr2), 2.0)
def test_multi_values(self):
rr = M.RunResult()
rr2 = M.RunResult()
for v in range(1,4):
rr.add_values(float(v), 1)
rr2.add_values(float(2*v), 1)
self.assertTrue(rr.is_multi_value())
# Simple averages
self.assertEqual(rr.get_average(0), 1)
self.assertEqual(rr.get_average(1), 2)
self.assertEqual(rr.get_average(2), 3)
# Overheads
self.assertEqual(rr.compute_overhead(rr2, 0), 0.5)
self.assertEqual(rr.compute_overhead(rr2, 1), 0.5)
self.assertEqual(rr.compute_overhead(rr2, 2), 0.5)
class TestScorepHelper(unittest.TestCase):
"""
Tests the ScorepSystemHelper class and, currently, also the DefaultFlags.
TODO Separate the building portion of Score-P and the measurement system part.
"""
def setUp(self):
# get runtime folder
pira_dir = U.get_default_pira_dir()
self.cubes_dir = os.path.join(pira_dir, 'test_cubes')
# insert user runtime folder into test config
data = None
with open('input/unit_input_004.json', 'r') as file:
data = file.read()
data = data.replace('/tmp/where/cube/files/are', self.cubes_dir)
with open('input/unit_input_004.json', 'w') as file:
file.write(data)
InvocationConfig.create_from_kwargs({'config' : 'input/unit_input_004.json'})
self.cfg_loader = C.ConfigurationLoader()
self.cfg = self.cfg_loader.load_conf()
self.target_cfg = TargetConfig('/this/is/top_dir', '/this/is/top_dir', 'item01', 'item01-flavor01', '')
self.instr_cfg = InstrumentConfig(True, 0)
def tearDown(self):
# reset test config
data = None
with open('input/unit_input_004.json', 'r') as file:
data = file.read()
data = data.replace(self.cubes_dir, '/tmp/where/cube/files/are')
with open('input/unit_input_004.json', 'w') as file:
file.write(data)
shutil.rmtree(self.cubes_dir, ignore_errors=True)
def test_scorep_mh_init(self):
s_mh = M.ScorepSystemHelper(PiraConfig())
self.assertIn('.cubex', s_mh.known_files)
self.assertDictEqual(s_mh.data, {})
self.assertEqual('False', s_mh.cur_overwrite_exp_dir)
self.assertEqual('', s_mh.cur_mem_size)
self.assertEqual('', s_mh.cur_base_name)
self.assertEqual('', s_mh.cur_filter_file)
self.assertEqual('', s_mh.cur_exp_directory)
def test_scorep_mh_set_up_instr(self):
s_mh = M.ScorepSystemHelper(self.cfg)
s_mh.set_up(self.target_cfg, self.instr_cfg)
self.assertIn('cube_dir', s_mh.data)
self.assertEqual('500M', s_mh.cur_mem_size)
self.assertEqual('True', s_mh.cur_overwrite_exp_dir)
self.assertEqual('item01-flavor01-item01', s_mh.cur_base_name)
self.assertEqual(self.cubes_dir + '/item01-item01-flavor01-0', s_mh.cur_exp_directory)
def test_scorep_mh_set_up_no_instr(self):
s_mh = M.ScorepSystemHelper(self.cfg)
self.instr_cfg._is_instrumentation_run = False
s_mh.set_up(self.target_cfg, self.instr_cfg)
self.assertDictEqual({}, s_mh.data)
self.assertEqual('', s_mh.cur_mem_size)
self.assertEqual('False', s_mh.cur_overwrite_exp_dir)
self.assertEqual('', s_mh.cur_base_name)
self.assertEqual('', s_mh.cur_exp_directory)
def test_scorep_mh_dir_invalid(self):
s_mh = M.ScorepSystemHelper(self.cfg)
s_mh.set_up(self.target_cfg, self.instr_cfg)
self.assertEqual(self.cubes_dir + '/item01-item01-flavor01-0', s_mh.cur_exp_directory)
self.assertRaises(M.MeasurementSystemException, s_mh.set_exp_dir, '+/invalid/path/haha', 'item01-flavor01', 0)
self.assertRaises(M.MeasurementSystemException, s_mh.set_exp_dir, '/inv?alid/path/haha', 'item01-flavor01', 0)
def test_get_instr_file_flags(self):
s_mh = M.ScorepSystemHelper(self.cfg)
s_mh.set_up(self.target_cfg, self.instr_cfg)
instr_file = 'myFile.filt'
ct_filter = True
cc = M.ScorepSystemHelper.get_scorep_compliant_CC_command(instr_file)
self.assertEqual('\"clang -finstrument-functions -finstrument-functions-whitelist-inputfile='+instr_file+'\"', cc)
cpp = M.ScorepSystemHelper.get_scorep_compliant_CXX_command(instr_file)
self.assertEqual('\"clang++ -finstrument-functions -finstrument-functions-whitelist-inputfile='+instr_file+'\"', cpp)
def test_get_no_instr_file_flags(self):
s_mh = M.ScorepSystemHelper(self.cfg)
s_mh.set_up(self.target_cfg, self.instr_cfg)
instr_file = 'myFile.filt'
ct_filter = False
kw_dict = D.BackendDefaults().get_default_kwargs()
cc = kw_dict['CC']
self.assertEqual('\"clang\"', cc)
cpp = kw_dict['CXX']
self.assertEqual('\"clang++\"', cpp)
if __name__ == '__main__':
unittest.main()
| 5,591 | 35.311688 | 126 | py |
PIRA | PIRA-master/test/unit/ProfileSinkTest.py | """
File: ProfileSinkTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the argument mapping
"""
import lib.ProfileSink as P
import lib.Configuration as C
from lib.DefaultFlags import BackendDefaults
import unittest
class TestProfileSink(unittest.TestCase):
def setUp(self):
self._target = 'asd'
self._flavor = 'fl'
self._dbi = 'a'
self._nreps = 1
self._ic_true = C.InstrumentConfig(True, self._nreps)
self._ic_false = C.InstrumentConfig()
self._params = ['par1']
self._prefix = 'pre'
self._postfix = 'post'
self._filename = 'profile.cubex'
C.InvocationConfig.create_from_kwargs({'config' : '../inputs/configs/basic_config_005.json'})
def test_base_raises(self):
sb = P.ProfileSinkBase()
backend_provider = BackendDefaults()
self._dir = backend_provider.instance.get_pira_dir()
self._tc = C.TargetConfig(self._dir, self._dir, self._target, self._flavor, self._dbi)
self.assertRaises(RuntimeError, sb.process, self._dir, self._tc, self._ic_true)
def test_extrap_create(self):
backend_provider = BackendDefaults()
self._dir = backend_provider.instance.get_pira_dir()
self._tc = C.TargetConfig(self._dir, self._dir, self._target, self._flavor, self._dbi)
es = P.ExtrapProfileSink(self._dir, self._params, self._prefix, self._postfix, self._filename)
self.assertEqual(es.get_target(), '')
if __name__ == '__main__':
unittest.main() | 1,535 | 33.133333 | 126 | py |
PIRA | PIRA-master/test/unit/BatchSystemBackendsTest.py | """
File: BatchSystemBackendsTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the BatchSystemBackends modules.
"""
import unittest
from lib.BatchSystemBackends import *
class BatchSystemInterfaceTests(unittest.TestCase):
"""
Tests for the BatchSystemInterface class.
"""
def setUp(self) -> None:
"""
Setup
"""
self.bi = BatchSystemInterface(
backend_type=BatchSystemBackendType.SLURM,
interface_type=SlurmInterfaces.PYSLURM,
timings_type=BatchSystemTimingType.SUBPROCESS
)
self.key = U.generate_random_string()
def test_get_interfaces(self):
"""
Tests for get_interfaces.
"""
self.assertEqual(self.bi.get_interfaces(), None)
def test_set_interface(self):
"""
Tests for set_interface.
"""
self.assertEqual(self.bi.interface, SlurmInterfaces.PYSLURM)
self.bi.set_interface(SlurmInterfaces.SBATCH_WAIT)
self.assertEqual(self.bi.interface, SlurmInterfaces.SBATCH_WAIT)
def test_configure(self):
"""
Tests for configure().
"""
batch_hardware_conf = BatchSystemHardwareConfig(3800, 1, 96)
batch_gen = BatchSystemGenerator(batch_hardware_conf)
self.bi.configure(batch_hardware_conf, batch_gen)
self.assertEqual(self.bi.config, batch_hardware_conf)
self.assertEqual(self.bi.generator, batch_gen)
def test_add_preparation_command(self):
"""
Tests for add_preparation_command
"""
cmd = "test.exe"
self.bi.add_preparation_command(self.key, cmd)
self.assertTrue(self.key in self.bi.preparation_commands)
self.assertEqual(self.bi.preparation_commands[self.key], cmd)
def test_add_teardown_command(self):
"""
Tests for add_teardown_command
"""
cmd = "test2.exe"
self.bi.add_teardown_command(self.key, cmd)
self.assertTrue(self.key in self.bi.teardown_commands)
self.assertEqual(self.bi.teardown_commands[self.key], cmd)
def test_add_timed_command(self):
"""
Test for add_timed_command
"""
cmd = "test3.exe"
self.bi.add_timed_command(self.key, cmd)
self.assertTrue(self.key in self.bi.timed_commands)
self.assertTrue((self.key, None) in self.bi.results)
self.assertEqual(self.bi.timed_commands[self.key], cmd)
self.assertEqual(self.bi.results[(self.key, None)], None)
def test_get_results(self):
"""
Test for get_results
"""
# setup
cmd = "test4.exe"
self.bi.add_timed_command(self.key, cmd)
# results asserts
self.assertEqual(self.bi.results[(self.key, None)], None)
self.bi.results[(self.key, None)] = 42
self.assertEqual(self.bi.get_results(self.key, None), 42)
def test_cleanup(self):
"""
Test for cleanup()
"""
cmd = "test5.exe"
self.bi.add_preparation_command(self.key, cmd)
self.bi.add_teardown_command(self.key, cmd)
self.bi.add_timed_command(self.key, cmd)
U.write_file(f"{U.get_default_pira_dir()}/pira-slurm-UNITTESTFILE.txt",
"Test - You can remove this without worrying.")
self.bi.cleanup()
self.assertEqual(self.bi.results, {})
self.assertEqual(self.bi.preparation_commands, {})
self.assertEqual(self.bi.teardown_commands, {})
self.assertEqual(self.bi.timed_commands, {})
self.assertFalse(U.is_file(f"{U.get_default_pira_dir()}/pira-slurm-UNITTESTFILE.txt"))
class SlurmBackendTest(unittest.TestCase):
"""
Tests for the lib.SlurmBackend class
"""
def setUp(self) -> None:
"""
Setup
"""
self.si = SlurmBackend(
backend_type=BatchSystemBackendType.SLURM,
interface_type=SlurmInterfaces.PYSLURM,
timing_type=BatchSystemTimingType.SUBPROCESS
)
slurm_conf = SlurmConfig(job_array_start=1, job_array_end=3) # use the defaults mostly
slurm_gen = SlurmGenerator(slurm_conf)
self.si.configure(slurm_conf, slurm_gen)
self.key = U.generate_random_string()
def test_get_interfaces(self):
"""
Tests for get_interfaces.
"""
self.assertEqual(self.si.get_interfaces(), SlurmInterfaces)
def test_set_interface(self):
"""
Tests for set_interface.
"""
self.assertEqual(self.si.interface, SlurmInterfaces.PYSLURM)
self.si.set_interface(SlurmInterfaces.SBATCH_WAIT)
self.assertEqual(self.si.interface, SlurmInterfaces.SBATCH_WAIT)
def test_configure(self):
"""
Tests for configure().
"""
slurm_conf = SlurmConfig(job_array_start=1, job_array_end=3) # use the defaults mostly
slurm_gen = SlurmGenerator(slurm_conf)
self.si.configure(slurm_conf, slurm_gen)
self.assertEqual(self.si.config, slurm_conf)
self.assertEqual(self.si.generator, slurm_gen)
def test_add_preparation_command(self):
"""
Tests for add_preparation_command
"""
cmd = "test.exe"
self.si.add_preparation_command(self.key, cmd)
self.assertTrue(self.key in self.si.preparation_commands)
self.assertEqual(self.si.preparation_commands[self.key], cmd)
def test_add_teardown_command(self):
"""
Tests for add_teardown_command
"""
cmd = "test2.exe"
self.si.add_teardown_command(self.key, cmd)
self.assertTrue(self.key in self.si.teardown_commands)
self.assertEqual(self.si.teardown_commands[self.key], cmd)
def test_add_timed_command(self):
"""
Test for add_timed_command
"""
cmd = "test3.exe"
self.si.add_timed_command(self.key, cmd)
self.assertTrue(self.key in self.si.timed_commands)
self.assertTrue((self.key, 1) in self.si.results)
self.assertTrue((self.key, 2) in self.si.results)
self.assertTrue((self.key, 3) in self.si.results)
self.assertEqual(self.si.timed_commands[self.key], cmd)
self.assertEqual(self.si.results[(self.key, 1)], None)
self.assertEqual(self.si.results[(self.key, 2)], None)
self.assertEqual(self.si.results[(self.key, 3)], None)
def test_get_results(self):
"""
Test for get_results
"""
# setup
cmd = "test4.exe"
self.si.add_timed_command(self.key, cmd)
# results asserts
self.assertEqual(self.si.results[(self.key, 1)], None)
self.assertEqual(self.si.results[(self.key, 2)], None)
self.assertEqual(self.si.results[(self.key, 3)], None)
self.si.results[(self.key, 1)] = 42
self.si.results[(self.key, 2)] = 43
self.si.results[(self.key, 3)] = 44
self.assertEqual(self.si.get_results(self.key, 1), 42)
self.assertEqual(self.si.get_results(self.key, 2), 43)
self.assertEqual(self.si.get_results(self.key, 3), 44)
def test_cleanup(self):
"""
Test for cleanup()
"""
cmd = "test5.exe"
self.si.add_preparation_command(self.key, cmd)
self.si.add_teardown_command(self.key, cmd)
self.si.add_timed_command(self.key, cmd)
self.si.cleanup()
self.assertEqual(self.si.results, {})
self.assertEqual(self.si.preparation_commands, {})
self.assertEqual(self.si.teardown_commands, {})
self.assertEqual(self.si.timed_commands, {})
def test_populate_result_dict_subprocess(self):
"""
Tests for populate_result_dict with timing_type Subprocess.
"""
self.si.timing_type = BatchSystemTimingType.SUBPROCESS
# generate some test input
self.si.add_timed_command(self.key, "test6.txt")
jobid = str(int(time.time()))
self.si.job_id_map[self.key] = jobid
for i in range(1, 3+1, 1):
with open(f"{U.get_default_pira_dir()}/pira-slurm-{jobid}-{self.key}-{i}.json", "w") as f:
json.dump({
'cutime': 1234,
'cstime': 4321,
'elapsed': i,
'output': 'thisissomeoutput'
}, f, indent=4)
# at begin the results should be not set
self.assertEqual(self.si.results[(self.key, 1)], None)
self.assertEqual(self.si.results[(self.key, 2)], None)
self.assertEqual(self.si.results[(self.key, 3)], None)
# files should be read into the result variable
# each result is tuple of (elapsed time, output)
self.si.populate_result_dict()
self.assertEqual(self.si.results[(self.key, 1)], (float(1), "thisissomeoutput"))
self.assertEqual(self.si.results[(self.key, 2)], (float(2), "thisissomeoutput"))
self.assertEqual(self.si.results[(self.key, 3)], (float(3), "thisissomeoutput"))
# and files should be deleted
for i in range(1, 3+1, 1):
U.remove_file(f"{U.get_default_pira_dir()}/pira-slurm-{jobid}-{self.key}-{i}.json")
def test_populate_result_dict_os_time(self):
"""
Test for populate_result_dict with timing_type os_time
"""
self.si.timing_type = BatchSystemTimingType.OS_TIME
# generate some test input
self.si.add_timed_command(self.key, "test7.txt")
jobid = str(int(time.time()))
self.si.job_id_map[self.key] = jobid
for i in range(1, 3+1, 1):
with open(f"{U.get_default_pira_dir()}/pira-slurm-err.{jobid}_{i}", "w") as f:
f.write("thisissomeoutput\n")
# write the actual timing like from /usr/bin/time --format=%e
f.write(f"{i}.0\n")
# at begin the results should be not set
self.assertEqual(self.si.results[(self.key, 1)], None)
self.assertEqual(self.si.results[(self.key, 2)], None)
self.assertEqual(self.si.results[(self.key, 3)], None)
# it should grep the last line from the file as timing
self.si.populate_result_dict()
self.assertEqual(self.si.results[(self.key, 1)], (float(1), "thisissomeoutput\n"))
self.assertEqual(self.si.results[(self.key, 2)], (float(2), "thisissomeoutput\n"))
self.assertEqual(self.si.results[(self.key, 3)], (float(3), "thisissomeoutput\n"))
for i in range(1, 3+1, 1):
U.remove_file(f"{U.get_default_pira_dir()}//pira-slurm-err.{jobid}_{i}")
| 9,766 | 34.007168 | 126 | py |
PIRA | PIRA-master/test/unit/BatchSystemTimerTest.py | """
File: BatchSystemTimerTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the batch system timer module.
"""
import json
import os
import subprocess
import unittest
import lib.Utility as U
class BatchSystemTimerTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key = "ABCDEFG"
self.job_id = "123456"
self.job_array_id = "0"
self.export_path = "."
self.command = "sleep 2"
# comparison options
self.elapsed = 2
self.output = ""
def setUp(self) -> None:
self.cmd = ["python3", f"{U.get_pira_code_dir()}/lib/BatchSystemTimer.py", self.key, self.job_id,
self.job_array_id, self.export_path, self.command]
def tearDown(self) -> None:
os.remove(f"{self.export_path}/pira-slurm-{self.job_id}-{self.key}-{self.job_array_id}.json")
def test_result_file(self):
subprocess.run(self.cmd)
try:
with open(f"{self.export_path}/pira-slurm-{self.job_id}-{self.key}-{self.job_array_id}.json", "r") as f:
content = json.load(f)
print(content["output"])
self.assertTrue(self.elapsed - 1 < content["elapsed"] < self.elapsed + 1)
newline = "\n"
self.assertEqual(content['output'], f"{self.output}{newline if self.output != '' else ''}")
except FileNotFoundError:
assert False
class BatchSystemTimerTest2(BatchSystemTimerTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.command = "sleep 10"
# comparison options
self.elapsed = 10
self.output = ""
class BatchSystemTimerTest3(BatchSystemTimerTest):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.command = "sleep 2; echo 'Hello World!'"
# comparison options
self.elapsed = 2
self.output = "Hello World!"
| 1,941 | 29.825397 | 126 | py |
PIRA | PIRA-master/test/unit/RunnerFactoryTest.py | """
File: RunnerFactoryTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Module to create different Runner objects, depending on the configuration.
"""
from lib.RunnerFactory import PiraRunnerFactory
from lib.Configuration import PiraConfig, ExtrapConfig, InvocationConfig, PiraConfigII, \
PiraConfigAdapter, PiraItem, PiraConfigErrorException
from lib.ConfigurationLoader import BatchSystemConfigurationLoader
from lib.Runner import LocalRunner, LocalScalingRunner, SlurmRunner, SlurmScalingRunner
from lib.ArgumentMapping import CmdlineLinearArgumentMapper
import lib.Utility as U
import unittest
import os
class TestRunnerFactory(unittest.TestCase):
def setUp(self):
self._pira_dir = U.get_default_pira_dir()
self._path_to_config = os.path.join(self._pira_dir, 'config')
self._compile_t_filter = True
self._pira_iters = 3
self._num_reps = 4
self._pira_one_cfg = PiraConfig()
self._hybrid_filter_iters = 0
self._pira_two_cfg = PiraConfigII()
self._item_name = 'test_item'
self._it_dir = os.path.join(self._pira_dir, self._item_name)
item = PiraItem(self._item_name)
item.set_analyzer_dir('/analyzer')
item.set_cubes_dir('/cubes')
item.set_flavors(['dflt'])
item.set_functors_base_path('/functors')
item.set_mode('ct')
InvocationConfig.create_from_kwargs({'config' : 'test/gol_config.json', 'slurm-config': '../inputs/configs/batchsystem_config_001.json'})
run_opts = CmdlineLinearArgumentMapper({'x': [1]})
item.set_run_options(run_opts)
self._item = item
scl = BatchSystemConfigurationLoader(InvocationConfig.get_instance())
self.slurm_config = scl.get_config()
self.slurm_interface = scl.get_batch_interface()
self._pira_two_cfg.add_item(self._it_dir, item)
self._pira_two_cfg._empty = False # This is usually done in ConfigurationLoader
self._pira_two_adapter = PiraConfigAdapter(self._pira_two_cfg)
def test_init_empty_config(self):
prf = PiraRunnerFactory(PiraConfig())
self.assertIsNotNone(prf)
self.assertTrue(isinstance(prf, PiraRunnerFactory))
prfII = PiraRunnerFactory(PiraConfigII())
self.assertIsNotNone(prf)
self.assertTrue(isinstance(prfII, PiraRunnerFactory))
def test_init_nonempty_config(self):
prf = PiraRunnerFactory(self._pira_two_cfg)
self.assertIsNotNone(prf)
self.assertTrue(isinstance(prf, PiraRunnerFactory))
prfII = PiraRunnerFactory(self._pira_two_adapter)
self.assertIsNotNone(prfII)
self.assertTrue(isinstance(prf, PiraRunnerFactory))
def test_get_simple_local_runner_empty_config(self):
prf = PiraRunnerFactory(PiraConfig())
self.assertIsNotNone(prf)
runner = prf.get_simple_local_runner()
self.assertIsNotNone(runner)
self.assertTrue(isinstance(runner, LocalRunner))
self.assertTrue(runner.has_sink())
def test_get_scalability_runner_empty_config(self):
prf = PiraRunnerFactory(PiraConfig())
self.assertIsNotNone(prf)
ep_cfg = ExtrapConfig('/extrap', 'pre', 'post')
with self.assertRaises(PiraConfigErrorException):
prf.get_scalability_runner(ep_cfg)
prfII = PiraRunnerFactory(PiraConfigII())
self.assertIsNotNone(prfII)
with self.assertRaises(PiraConfigErrorException):
prfII.get_scalability_runner(ep_cfg)
def test_get_scalability_runner_nonempty_config(self):
prfII = PiraRunnerFactory(self._pira_two_cfg)
self.assertIsNotNone(prfII)
ep_cfg = ExtrapConfig('/extrap', 'pre', 'post')
runner = prfII.get_scalability_runner(ep_cfg)
self.assertIsNotNone(runner)
self.assertTrue(isinstance(runner, LocalScalingRunner))
def test_get_scalability_runner_config_adapter(self):
prfII = PiraRunnerFactory(self._pira_two_adapter)
self.assertIsNotNone(prfII)
ep_cfg = ExtrapConfig('/extrap', 'pre', 'post')
runner = prfII.get_scalability_runner(ep_cfg)
self.assertIsNotNone(runner)
self.assertTrue(isinstance(runner, LocalScalingRunner))
def test_get_simple_slurm_runner_empty_config(self):
prf = PiraRunnerFactory(PiraConfig())
self.assertIsNotNone(prf)
runner = prf.get_simple_slurm_runner(self.slurm_config, self.slurm_interface)
self.assertIsNotNone(runner)
self.assertTrue(isinstance(runner, SlurmRunner))
self.assertTrue(runner.has_sink())
def test_get_scalability_slurm_runner_empty_config(self):
prf = PiraRunnerFactory(PiraConfig())
self.assertIsNotNone(prf)
ep_cfg = ExtrapConfig('/extrap', 'pre', 'post')
with self.assertRaises(PiraConfigErrorException):
prf.get_scalability_slurm_runner(self.slurm_config, self.slurm_interface, ep_cfg)
prfII = PiraRunnerFactory(PiraConfigII())
self.assertIsNotNone(prfII)
with self.assertRaises(PiraConfigErrorException):
prfII.get_scalability_runner(ep_cfg)
def test_get_scalability_slurm_runner_nonempty_config(self):
prfII = PiraRunnerFactory(self._pira_two_cfg)
self.assertIsNotNone(prfII)
ep_cfg = ExtrapConfig('/extrap', 'pre', 'post')
runner = prfII.get_scalability_slurm_runner(self.slurm_config, self.slurm_interface, ep_cfg)
self.assertIsNotNone(runner)
self.assertTrue(isinstance(runner, SlurmScalingRunner))
def test_get_slurm_scalability_runner_config_adapter(self):
prfII = PiraRunnerFactory(self._pira_two_adapter)
self.assertIsNotNone(prfII)
ep_cfg = ExtrapConfig('/extrap', 'pre', 'post')
runner = prfII.get_scalability_slurm_runner(self.slurm_config, self.slurm_interface, ep_cfg)
self.assertIsNotNone(runner)
self.assertTrue(isinstance(runner, SlurmScalingRunner))
| 5,687 | 35.696774 | 141 | py |
PIRA | PIRA-master/test/unit/TimeTrackTest.py | """
File: TimeTrackTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the argument mapping
"""
import unittest
import lib.TimeTracking as T
class Dummy:
def __init__(self, arg):
self.val = arg
def func(self):
self.val += 1
return self.val
def func():
return 0
def func1(arg):
return arg + 1
class TestTimeTracking(unittest.TestCase):
def test_create(self):
tracker = T.TimeTracker()
def test_f_track(self):
tracker = T.TimeTracker()
r, time = tracker.f_track('invocation', func)
self.assertGreater(time[0], -1.0)
self.assertGreater(time[1], -1.0)
self.assertEqual(r, 0)
def test_f_track_arg(self):
tracker = T.TimeTracker()
r, time = tracker.f_track('invocation 2', func1, 2)
self.assertGreater(time[0], -1.0)
self.assertGreater(time[1], -1.0)
self.assertEqual(r, 3);
def test_m_track(self):
tracker = T.TimeTracker()
obj = Dummy(1)
r, time = tracker.m_track('obj invocation', obj, 'func')
self.assertGreater(time[0], -1.0)
self.assertGreater(time[1], -1.0)
self.assertEqual(r, 2)
self.assertEqual(obj.val, 2)
if __name__ == '__main__':
unittest.main()
| 1,275 | 21.785714 | 126 | py |
PIRA | PIRA-master/test/unit/ExporterTest.py | """
File: ExporterTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Unit test for Exporter module.
"""
import unittest
import lib.Exporter as E
import lib.Measurement as M
import os
class TestCSVExporter(unittest.TestCase):
def test_init_None(self):
with self.assertRaises(RuntimeError) as ex_cm:
exporter = E.CSVExporter(None)
exception = ex_cm.exception
self.assertEqual(str(exception), 'name argument for CSVExport-ctor must not be None')
def test_init(self):
exporter = E.CSVExporter('test exporter')
self.assertIsNotNone(exporter)
self.assertEqual(exporter.get_name(), 'test exporter')
def test_add_new_export_None_arguments(self):
exporter = E.CSVExporter('TE')
with self.assertRaises(RuntimeError) as e_cm:
exporter.add_new_export(None, [1,2])
exception = e_cm.exception
self.assertEqual(str(exception), 'name argument needs to be not None')
with self.assertRaises(RuntimeError) as e_cm:
exporter.add_new_export('key', None)
exception = e_cm.exception
self.assertEqual(str(exception), 'values argument needs to be not None')
def test_add_new_export(self):
exporter = E.CSVExporter('TE')
exporter.add_new_export('a', [1])
self.assertTrue('a' in exporter._exports)
self.assertEqual(exporter._exports['a'], [1])
def test_add_new_export_key_exists(self):
exporter = E.CSVExporter('TE')
exporter.add_new_export('a', [1])
with self.assertRaises(KeyError) as e_cm:
exporter.add_new_export('a', [2])
ex = e_cm.exception
# This seems to be an incosistency in Python (?), as the other exceptions do not show this sort of behavior
self.assertEqual(str(ex), "'Key already exists'")
def test_add_export_key_error(self):
exporter = E.CSVExporter('TE')
with self.assertRaises(KeyError) as e_cm:
exporter.add_export('a', [2])
ex = e_cm.exception
self.assertEqual(str(ex), "'a'")
def test_add_export(self):
exporter = E.CSVExporter('TE')
exporter.add_new_export('a', [1])
exporter.add_export('a', [2])
self.assertEqual(exporter._exports['a'], [1, 2])
class TestRunResultExporter(unittest.TestCase):
def test_init(self):
rre = E.RunResultExporter()
self.assertIsNotNone(rre)
def test_add_row(self):
rre = E.RunResultExporter()
rr = M.RunResult(1,1)
rr.add_values(2,2)
rre.add_row("test", rr)
self.assertEqual(len(rre.rows), 1)
self.assertEqual(len(rre.rows[0]), 5)
self.assertEqual(rre.rows[0][0], "test")
self.assertEqual(rre.rows[0][1], 1)
self.assertEqual(rre.rows[0][2], 1)
self.assertEqual(rre.rows[0][3], 2)
self.assertEqual(rre.rows[0][4], 2)
self.assertEqual(rre.width, 5)
def test_export(self):
rre = E.RunResultExporter()
rr = M.RunResult(1, 1)
rr.add_values(2, 2)
rre.add_row("test", rr)
rre.export("test_file")
with open("test_file", "r") as tf:
data = tf.read()
expected_data = '"Type of Run","Accumulated Runtime","Number of Runs","Accumulated Runtime","Number of Runs"\n"test","1","1","2","2"\n'
self.assertEqual(data, expected_data)
os.remove("test_file") | 3,259 | 31.6 | 141 | py |
PIRA | PIRA-master/test/unit/CheckerTest.py | """
File: CheckerTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the Checker-module
"""
import unittest
import lib.Utility as U
import lib.Logging as L
import lib.Checker as C
import lib.ConfigurationLoader as CL
from lib.Configuration import PiraConfig, PiraConfigErrorException, PiraConfigII, PiraItem, PiraConfigAdapter, InvocationConfig
functor_files = [
"/home/pira/build_dir/item1/functors/analyze_item1_ct.py",
"/home/pira/build_dir/item1/functors/clean_item1_ct.py",
"/home/pira/build_dir/item1/functors/no_instr_item1_ct.py",
"/home/pira/build_dir/item1/functors/item1_ct.py",
"/home/pira/build_dir/item1/functors/runner_item1_ct.py",
"/home/pira/build_dir/item2/functors/analyze_item2.py",
"/home/pira/build_dir/item2/functors/clean_item2.py",
"/home/pira/build_dir/item2/functors/no_instr_item2.py",
"/home/pira/build_dir/item2/functors/item2.py",
"/home/pira/build_dir/item2/functors/runner_item2.py",
]
directories_to_create = [
"/home/pira/build_dir/item1/analyzer",
"/home/pira/build_dir/item1/functors",
"/home/pira/build_dir/item2/analyzer",
"/home/pira/build_dir/item2/functors"
]
tempdir = U.get_tempdir()
dep_aw_ins_anal = {
'item1': [tempdir + '/home/pira/build_dir/item1/functors',
tempdir + '/home/pira/build_dir/item1/cubes',
tempdir + '/home/pira/build_dir/item1/analyzer'],
'item2': [tempdir + '/home/pira/build_dir/item2/functors',
tempdir + '/home/pira/build_dir/item2/cubes',
tempdir + '/home/pira/build_dir/item2/analyzer']
}
build_dirs_v2 = {"item1": tempdir + "/home/pira/build_dir/item1",
"item2": tempdir + "/home/pira/build_dir/item2"}
items_v2 = ["item1","item2"]
class CheckerTestCase(unittest.TestCase):
@classmethod
def setUp(self):
self.config_v1= PiraConfig()
self.config_v1.set_build_directories([tempdir + '/home/pira/build_dir'])
self.config_v1.populate_build_dict(self.config_v1.directories)
self.config_v1.set_items(['item1', 'item2'], self.config_v1.directories[0])
self.config_v1.initialize_item_dict(self.config_v1.directories[0], self.config_v1.builds[self.config_v1.directories[0]]['items'])
for build_dir in self.config_v1.directories:
for item in self.config_v1.builds[build_dir]['items']:
self.config_v1.set_item_instrument_analysis(dep_aw_ins_anal[item], build_dir, item)
self.config_v1.set_item_builders(tempdir + '/home/pira/build_dir/item1/functors', build_dir, item)
self.config_v1.set_item_args([], build_dir, item)
self.config_v1.set_item_runner("/" + item + "/runner/functors.dir", build_dir, item)
self.config_v2 = PiraConfigII()
pira_item1 = PiraItem("item1")
pira_item1.set_analyzer_dir(tempdir + "/home/pira/build_dir/item1/analyzer")
pira_item1.set_cubes_dir(tempdir + "/home/pira/build_dir/item1/cubes")
pira_item1.set_flavors(["ct"])
pira_item1.set_functors_base_path(tempdir + "/home/pira/build_dir/item1/functors")
pira_item1.set_mode("CT")
self.config_v2.add_item(tempdir + "/home/pira/build_dir/item1/",pira_item1)
pira_item2 = PiraItem("item2")
pira_item2.set_analyzer_dir(tempdir + "/home/pira/build_dir/item2/analyzer")
pira_item2.set_cubes_dir(tempdir + "/home/pira/build_dir/item2/cubes")
pira_item2.set_flavors([])
pira_item2.set_functors_base_path(tempdir + "/home/pira/build_dir/item2/functors")
pira_item2.set_mode("CT")
self.config_v2.add_item(tempdir + "/home/pira/build_dir/item2/",pira_item2)
self.config_adapter = PiraConfigAdapter(self.config_v2)
self.create_tempfiles(self)
InvocationConfig.create_from_kwargs({'config' : '../inputs/configs/basic_config_005.json'})
def tearDown(self):
self.delete_tempfolders()
def create_tempfiles(self):
for directory in directories_to_create:
U.make_dirs(tempdir + directory)
for filepath in functor_files:
tempfile = open(tempdir + filepath,'a')
tempfile.close()
def delete_tempfolders(self):
U.remove_dir(tempdir + "/home/pira/")
def test_checker_v1_valid_config(self):
InvocationConfig.create_from_kwargs({'config' : 'test/gol_config.json', 'config_version' : 1})
C.Checker.check_configfile_v1(self.config_v1)
def test_checker_v1_general_valid_config(self):
InvocationConfig.create_from_kwargs({'config' : 'test/gol_config.json', 'config_version' : 1})
C.Checker.check_configfile(self.config_v1)
def test_checker_v1_dirs_missing(self):
InvocationConfig.create_from_kwargs({'config' : 'test/gol_config.json', 'config_version' : 1})
for directory in directories_to_create:
U.remove_dir(tempdir + directory)
with self.assertRaises(PiraConfigErrorException): C.Checker.check_configfile_v1(self.config_v1)
self.create_tempfiles()
def test_checker_v2_valid_config(self):
C.Checker.check_configfile_v2(self.config_v2)
def test_checker_v2_general_valid_config(self):
C.Checker.check_configfile(self.config_v2)
def test_checker_v2_adapter_valid_config(self):
C.Checker.check_configfile_v2(self.config_adapter)
def test_checker_v2_functors_missing(self):
for file in functor_files:
U.remove_file(tempdir + file)
with self.assertRaises(PiraConfigErrorException): C.Checker.check_configfile_v2(self.config_v2)
self.create_tempfiles()
def test_checker_v2_dirs_missing(self):
for directory in directories_to_create:
U.remove_dir(tempdir + directory)
with self.assertRaises(PiraConfigErrorException): C.Checker.check_configfile_v2(self.config_v2)
self.create_tempfiles()
def test_check_basic_config_005(self):
cl = CL.SimplifiedConfigurationLoader()
cfg = cl.load_conf()
C.Checker.check_configfile_v2(cfg)
if __name__ == '__main__':
L.get_logger().set_state('info', False)
unittest.main()
| 5,953 | 38.171053 | 133 | py |
PIRA | PIRA-master/test/unit/ArgumentMappingTest.py | """
File: ArgumentMappingTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the argument mapping
"""
import lib.ArgumentMapping as A
import unittest
class TestPiraArgument(unittest.TestCase):
def setUp(self):
self._arg_name = 'arg1'
self._arg_val = 'val_arg1'
self._file_arg = 'file_val'
self.one_arg_pa = A.PiraArgument(self._arg_name, self._arg_val)
self.file_arg_pa = A.PiraArgument(self._arg_name, self._arg_val, self._file_arg)
def test_arg_indexing(self):
self.assertEqual(self.one_arg_pa[0], self._arg_name)
self.assertEqual(self.one_arg_pa[1], self._arg_val)
self.assertRaises(IndexError, self.one_arg_pa.__getitem__, 2)
def test_file_args(self):
self.assertEqual(self.file_arg_pa[0], self._arg_name)
self.assertEqual(self.file_arg_pa[1], self._file_arg)
self.assertRaises(IndexError, self.file_arg_pa.__getitem__, 2)
def test_arg_as_string(self):
self.assertEqual(str(self.one_arg_pa), 'arg1val_arg1')
self.assertEqual(str(self.file_arg_pa), 'arg1val_arg1')
class TestCmdlineLinearArgumentMapper(unittest.TestCase):
def setUp(self):
self.mapper_as_in_cfg = {'mapper': 'Linear',
'argmap': {
'arg1': ['a', 'b', 'c']
}
}
self.mapper_2_params = {'mapper': 'Linear',
'argmap': {
'arg1': ['a', 'b', 'c'],
'arg2': ['x', 'y', 'z']
}
}
self.mapper_diff_sizes = {'mapper': 'Linear',
'argmap': {
'arg1': ['a', 'b', 'c'],
'arg2': ['x', 'y']
}
}
def test_correct_factory(self):
mapper = A.ArgumentMapperFactory.get_mapper(self.mapper_as_in_cfg)
self.assertIsNotNone(mapper)
self.assertIsInstance(mapper, A.CmdlineLinearArgumentMapper)
def test_arg_mapping(self):
mapper = A.ArgumentMapperFactory.get_mapper(self.mapper_as_in_cfg)
expected = [ ('arg1', 'a'),
('arg1', 'b'),
('arg1', 'c') ]
for (exp, mapped) in zip(expected, mapper):
self.assertEqual(exp, tuple(mapped))
def test_mapper_as_string(self):
mapper = A.ArgumentMapperFactory.get_mapper(self.mapper_as_in_cfg)
expected = 'arg1a.arg1b.arg1c.'
m_str = mapper.as_string()
self.assertEqual(expected, m_str)
def test_diff_sizes_except(self):
self.assertRaises(RuntimeError, A.ArgumentMapperFactory.get_mapper, self.mapper_diff_sizes)
def test_linear_2_params(self):
mapper = A.ArgumentMapperFactory.get_mapper(self.mapper_2_params)
expected = [ ('arg1', 'a', [], 'arg2', 'x', []),
('arg1', 'b', [], 'arg2', 'y', []),
('arg1', 'c', [], 'arg2', 'z', []) ]
for (exp, mapped) in zip(expected, mapper):
self.assertEqual(exp, tuple(mapped))
class TestCmdlineCatesianProductArgumentMapper(unittest.TestCase):
def setUp(self):
self.mapper_as_in_cfg = {'mapper': 'CartesianProduct',
'argmap': {
'arg1': ['a'],
'arg2': ['x']
}
}
self.mapper_2_params = {'mapper': 'Linear',
'argmap': {
'arg1': ['a', 'b'],
'arg2': ['x', 'y']
}
}
self.mapper_diff_sizes = {'mapper': 'Linear',
'argmap': {
'arg1': ['a'],
'arg2': ['x', 'y']
}
}
self.mapper_3_params_cartesian = {'mapper': 'CartesianProduct',
'argmap': {
'arg1': ['a'],
'arg2': ['b'],
'arg3': ['c']
}
}
def test_correct_factory(self):
mapper = A.ArgumentMapperFactory.get_mapper(self.mapper_as_in_cfg)
self.assertIsNotNone(mapper)
self.assertIsInstance(mapper, A.CmdlineCartesianProductArgumentMapper)
def test_arg_mapping(self):
mapper = A.ArgumentMapperFactory.get_mapper(self.mapper_as_in_cfg)
expected = [ ('arg1', 'a', 'arg2', 'x'),
('arg2', 'x', 'arg1', 'a') ]
for (exp, mapped) in zip(expected, mapper):
self.assertEqual(exp, mapped)
@unittest.expectedFailure
def test_arg_mapping_3_params(self):
mapper = A.ArgumentMapperFactory.get_mapper(self.mapper_3_params_cartesian)
expected = [ ('arg1', 'a', 'arg2', 'b', 'arg3', 'c'),
('arg1', 'a', 'arg3', 'c', 'arg2', 'b'),
('arg2', 'b', 'arg1', 'a', 'arg3', 'c'),
('arg2', 'b', 'arg3', 'c', 'arg1', 'a'),
('arg3', 'c', 'arg2', 'b', 'arg1', 'a'),
('arg3', 'c', 'arg1', 'a', 'arg2', 'b') ]
for (exp, mapped) in zip(expected, mapper):
self.assertEqual(exp, mapped)
def test_mapper_as_string(self):
mapper = A.ArgumentMapperFactory.get_mapper(self.mapper_as_in_cfg)
expected = 'arg1a.arg2x.'
m_str = mapper.as_string()
self.assertEqual(expected, m_str)
if __name__ == '__main__':
unittest.main()
| 5,713 | 34.7125 | 126 | py |
PIRA | PIRA-master/test/unit/FunctorManagerTest.py | """
File: FunctorManagementTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the argument mapping
"""
import lib.ConfigurationLoader as C
from lib.Configuration import InvocationConfig
import lib.FunctorManagement as F
import lib.Configuration as CO
import unittest
class TestFunctorManagerConstruction(unittest.TestCase):
"""
These tests consider the construction and correct return of singleton functor managers
"""
def setUp(self):
InvocationConfig.create_from_kwargs({'config' : './input/unit_input_001.json'})
def test_construction_from_None(self):
F.FunctorManager.instance = None
self.assertIsNone(F.FunctorManager.instance)
self.assertRaises(F.FunctorManagementException, F.FunctorManager, None)
def test_construction_from_empty_config(self):
with self.assertRaises(F.FunctorManagementException):
fm = F.FunctorManager(CO.PiraConfig())
with self.assertRaises(F.FunctorManagementException):
fm = F.FunctorManager(CO.PiraConfigII())
with self.assertRaises(F.FunctorManagementException):
fm = F.FunctorManager(CO.PiraConfigAdapter(CO.PiraConfigII()))
def test_construction_from_config(self):
cfg_loader = C.ConfigurationLoader()
fm = F.FunctorManager(cfg_loader.load_conf())
fm.reset()
def test_construction_from_classmethod(self):
cfg_loader = C.ConfigurationLoader()
fm = F.FunctorManager.from_config(cfg_loader.load_conf())
fm.reset()
def test_construction_is_singleton(self):
F.FunctorManager.instance = None
self.assertIsNone(F.FunctorManager.instance)
cfg_loader = C.ConfigurationLoader()
fm = F.FunctorManager.from_config(cfg_loader.load_conf())
fm2 = F.FunctorManager()
self.assertEqual(fm.instance, fm2.instance)
fm.reset()
class TestFunctorManager(unittest.TestCase):
"""
These tests perform basic query functions on the FunctorManager to compare the
awaited filesystem paths are returned for the different flavors
"""
def setUp(self):
InvocationConfig.create_from_kwargs({'config' : './input/unit_input_001.json'})
self.build = '/home/something/top_dir'
self.b_i_01 = '/builder/item01/directory'
self.ia_i_01 = '/ins_anal/directory/for/functors'
self.r_i_01 = '/path/to/runner_functors/item01'
self.i_01 = 'item01'
self.flavor = 'vanilla'
self.cl = C.ConfigurationLoader()
self.cfg = self.cl.load_conf()
self.fm = F.FunctorManager(self.cfg)
def tearDown(self):
self.fm.reset()
def test_get_clean_functor_filename(self):
expected_file_name = 'clean_item01_vanilla'
cl_func_name = self.fm.get_cleaner_name(self.build, self.i_01, self.flavor)
self.assertEqual(cl_func_name, expected_file_name)
def test_get_clean_functor_wholefile(self):
expected_file_name = 'clean_item01_vanilla.py'
cl_file = self.fm.get_cleaner_file(self.build, self.i_01, self.flavor)
self.assertEqual(cl_file, self.b_i_01 + '/' + expected_file_name)
def test_get_build_functor_filename(self):
expected_file_name = 'item01_vanilla'
# TODO Probably want to refactor that. If all the other functors prepend the respective task
# to their file name, we should do this with the build functor as well.
# However, for the time being just refactor the software design.
cl_func_name = self.fm.get_builder_name(self.build, self.i_01, self.flavor)
self.assertEqual(cl_func_name, expected_file_name)
def test_get_build_functor_wholefile(self):
expected_file_name = 'item01_vanilla.py'
cl_file = self.fm.get_builder_file(self.build, self.i_01, self.flavor)
self.assertEqual(cl_file, self.b_i_01 + '/' + expected_file_name)
def test_get_analyze_functor_filename(self):
expected_file_name = 'analyze_item01_vanilla'
cl_func_name = self.fm.get_analyzer_name(self.build, self.i_01, self.flavor)
self.assertEqual(cl_func_name, expected_file_name)
def test_get_analyze_functor_wholefile(self):
expected_file_name = 'analyze_item01_vanilla.py'
cl_file = self.fm.get_analyzer_file(self.build, self.i_01, self.flavor)
self.assertEqual(cl_file, self.ia_i_01 + '/' + expected_file_name)
def test_get_run_functor_filename(self):
expected_file_name = 'runner_item01_vanilla'
# TODO Refactor from 'runner' to 'run' to comply with the overall design
# and prepend-schema.
cl_func_name = self.fm.get_runner_name(self.build, self.i_01, self.flavor)
self.assertEqual(cl_func_name, expected_file_name)
def test_get_run_functor_wholefile(self):
expected_file_name = 'runner_item01_vanilla.py'
cl_file = self.fm.get_runner_file(self.build, self.i_01, self.flavor)
self.assertEqual(cl_file, self.r_i_01 + '/' + expected_file_name)
def test_get_builder(self):
expected_file_name = 'item01_vanilla'
path, name, whole_nm = self.fm.get_builder(self.build, self.i_01, self.flavor)
self.assertEqual(path, self.b_i_01)
self.assertEqual(name, expected_file_name)
self.assertEqual(whole_nm, self.b_i_01 + '/' + expected_file_name + '.py')
def test_get_cleaner(self):
expected_file_name = 'clean_item01_vanilla'
path, name, whole_nm = self.fm.get_cleaner(self.build, self.i_01, self.flavor)
self.assertEqual(path, self.b_i_01)
self.assertEqual(name, expected_file_name)
self.assertEqual(whole_nm, self.b_i_01 + '/' + expected_file_name + '.py')
def test_get_runner(self):
expected_file_name = 'runner_item01_vanilla'
path, name, whole_nm = self.fm.get_runner(self.build, self.i_01, self.flavor)
self.assertEqual(path, self.r_i_01)
self.assertEqual(name, expected_file_name)
self.assertEqual(whole_nm, self.r_i_01 + '/' + expected_file_name + '.py')
def test_get_analyzer(self):
expected_file_name = 'analyze_item01_vanilla'
path, name, whole_nm = self.fm.get_analyzer(self.build, self.i_01, self.flavor)
self.assertEqual(path, self.ia_i_01)
self.assertEqual(name, expected_file_name)
self.assertEqual(whole_nm, self.ia_i_01 + '/' + expected_file_name + '.py')
class FunctorManagerFromConfig(unittest.TestCase):
def setUp(self):
self.base_path = '../inputs/configs'
self.cfg001 = 'basic_config_001.json'
self.cfg002 = 'basic_config_002.json'
self.cfg003 = 'basic_config_003.json'
self.cfg004 = 'basic_config_004.json'
self.scl = C.SimplifiedConfigurationLoader()
self.fm = None
def tearDown(self):
self.fm.reset()
def get_filename(self, filename):
return self.base_path + '/' + filename
@unittest.skip('This requires the correct implementation of PiraConfiguration.is_valid()')
def test_get_invalid_path(self):
InvocationConfig.create_from_kwargs({'config' : self.get_filename(self.cfg003)})
self.assertRaises(CO.PiraConfigErrorException, F.FunctorManager, self.scl.load_conf())
def test_get_valid_path(self):
InvocationConfig.create_from_kwargs({'config': self.get_filename(self.cfg004)})
self.fm = F.FunctorManager(self.scl.load_conf())
def test_get_runner_functor(self):
InvocationConfig.create_from_kwargs({'config': self.get_filename(self.cfg004)})
self.fm = F.FunctorManager(self.scl.load_conf())
def test_get_builder_functor(self):
InvocationConfig.create_from_kwargs({'config': self.get_filename(self.cfg004)})
self.fm = F.FunctorManager(self.scl.load_conf())
def test_get_analyzer_functor(self):
InvocationConfig.create_from_kwargs({'config': self.get_filename(self.cfg004)})
self.fm = F.FunctorManager(self.scl.load_conf())
def test_get_builder_command(self):
InvocationConfig.create_from_kwargs({'config': self.get_filename(self.cfg004)})
self.fm = F.FunctorManager(self.scl.load_conf())
def test_get_runner_command(self):
InvocationConfig.create_from_kwargs({'config': self.get_filename(self.cfg004)})
self.fm = F.FunctorManager(self.scl.load_conf())
def test_get_analyzer_command(self):
InvocationConfig.create_from_kwargs({'config': self.get_filename(self.cfg004)})
self.fm = F.FunctorManager(self.scl.load_conf())
if __name__ == "__main__":
unittest.main()
| 8,195 | 38.786408 | 126 | py |
PIRA | PIRA-master/test/unit/BatchSystemGeneratorTest.py | """
File: BatchSystemGeneratorTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the batch system generator module.
"""
import unittest
from lib.BatchSystemGenerator import *
from lib.BatchSystemGenerator import _Module
class ModuleTests(unittest.TestCase):
"""
Testcases for the Module class of slurm configuration module.
"""
def setUp(self) -> None:
self.m1 = _Module(name="gcc", version="10.2")
self.m2 = _Module(name="make", version="10.2")
self.m3 = _Module(name="cmake", version="10.2")
self.m4 = _Module(name="llvm", version="10.2")
self.m5 = _Module(name="cuda", version="10.2", depends_on=["gcc/10.2"])
self.m6 = _Module(name="openmpi", version="10.2")
self.slurm_config = SlurmConfig(uses_module_system=True)
self.gen = SlurmGenerator(self.slurm_config)
def tearDown(self) -> None:
del self.m1
del self.m2
del self.m3
del self.m4
del self.m5
del self.m6
del self.slurm_config
del self.gen
def test_module_dependency1(self):
self.gen.add_module("gcc", depends_on=["cuda"])
with self.assertRaises(RuntimeError):
with self.assertRaises(ModuleDependencyConflict):
# should assert because there is a circular dependency (cuda <-> gcc)
self.gen.add_module("cuda", depends_on=["gcc"])
def test_module_dependency2(self):
self.gen.add_module("gcc", version="10.2", depends_on=["cuda/11.2"])
with self.assertRaises(RuntimeError):
with self.assertRaises(ModuleDependencyConflict):
# should assert because there is a circular dependency (cuda <-> gcc <-> cmake)
self.gen.add_module("cuda", version="11.2", depends_on=["gcc/10.2"])
def test_module_order1(self):
self.gen.add_module("cuda", depends_on=["gcc"])
self.gen.add_module("gcc")
res = self.gen.sbatch(active=False, load_modules=True)
self.assertIn("module load gcc; module load cuda", res)
def test_module_order2(self):
self.gen.add_module("cuda", version="10.2", depends_on=["gcc/11.2"])
self.gen.add_module("gcc", version="11.2")
res = self.gen.sbatch(active=False, load_modules=True)
self.assertIn("module load gcc/11.2; module load cuda/10.2", res)
def test_module_order3(self):
# this unlike the dependency can check for circular imports of more than two modules.
# It tries to sort the modules in a way that they do not conflict each other, if this cannot be done,
# it is likely that there is a circular import
self.gen.add_module("cuda", depends_on=["gcc"])
self.gen.add_module("gcc", depends_on=["cmake"])
self.gen.add_module("cmake", depends_on=["cuda"])
with self.assertRaises(ModuleDependencyConflict):
self.gen.sbatch(active=False, load_modules=True)
class BatchSystemGeneratorTests(unittest.TestCase):
"""
Testcases for the slurm configuration class of the slurm configuration module.
"""
def __init__(
self,
*args,
**kwargs
):
self.test_file = "test.sh"
self.slurm_script_file = "exampleslurmscript.sh"
self.job_name = "examplejobname"
self.std_out_path = "./examplejoboutput/out"
self.std_err_path = "./examplejoboutput/err"
self.time_str = "00:10:00"
self.in_minutes = 10
self.mem_per_cpu = 3800
self.number_of_tasks = 1
self.number_of_cores_per_task = 1
self.cpu_frequency_str = "High-High"
self.partition = "pira"
self.reservation = "pira"
self.account = "pira"
self.job_array_start = 1
self.job_array_end = 5
self.job_array_step = 1
self.exclusive = True
self.wait = False
self.dependencies = None
self.mail_types = [MailType.FAIL, MailType.BEGIN]
self.mail_address = "test@test.com"
self.uses_module_system = True
self.shell = "/bin/bash"
self.purge_modules_at_start = True
self.force_sequential = False
# should be a dict of {"name": name, "version": "10.0", "dependencies": "name[/version]"}
# (version, dependencies are optional)
self.modules = []
# list of strings
self.commands = []
# expected module sorting order
self.expected_module_order = []
# whether it is expected this raises a "ModuleDependencyConflict: Modules could not be sorted in a way
# they do not conflict each other"
self.expected_module_error = False
super().__init__(*args, **kwargs)
def setUp(self) -> None:
self.slurm_config = SlurmConfig(
slurm_script_file=self.slurm_script_file,
job_name=self.job_name,
std_out_path=self.std_out_path,
std_err_path=self.std_err_path,
time_str=self.time_str,
mem_per_cpu=self.mem_per_cpu,
number_of_tasks=self.number_of_tasks,
number_of_cores_per_task=self.number_of_cores_per_task,
cpu_frequency_str=self.cpu_frequency_str,
partition=self.partition,
reservation=self.reservation,
account=self.account,
job_array_start=self.job_array_start,
job_array_end=self.job_array_end,
job_array_step=self.job_array_step,
exclusive=self.exclusive,
wait=self.wait,
dependencies=self.dependencies,
mail_types=self.mail_types,
mail_address=self.mail_address,
uses_module_system=self.uses_module_system,
shell=self.shell,
purge_modules_at_start=self.purge_modules_at_start,
modules=self.modules,
force_sequential=self.force_sequential
)
self.gen = SlurmGenerator(self.slurm_config)
# adding modules
for m in self.modules:
version = None
if "version" in m:
version = m["version"]
dependencies = None
if "depends_on" in m:
dependencies = []
for d in m["depends_on"]:
dep = d["name"]
if "version" in d:
dep = dep + "/" + d["version"]
dependencies.append(dep)
self.gen.add_module(m["name"], version, dependencies)
# adding commands
for c in self.commands:
self.gen.add_command(c)
def tearDown(self) -> None:
del self.slurm_config
del self.gen
# remove test script again
U.remove_file("test.sh")
def test_write_slurm_script(self):
# test target - produces file
self.gen.write_slurm_script(self.test_file, load_modules=True)
# asserts
with open(self.test_file, "r") as f:
content = f.readlines()
self.assertIn(f"#SBATCH --job-name={self.job_name}\n", content)
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"#SBATCH --output={self.std_out_path}.%A_%a\n", content)
self.assertIn(f"#SBATCH --error={self.std_err_path}.%A_%a\n", content)
else:
self.assertIn(f"#SBATCH --output={self.std_out_path}.%j\n", content)
self.assertIn(f"#SBATCH --error={self.std_err_path}.%j\n", content)
self.assertIn(f"#SBATCH --time={self.time_str}\n", content)
self.assertIn(f"#SBATCH --mem-per-cpu={self.mem_per_cpu}\n", content)
self.assertIn(f"#SBATCH --ntasks={self.number_of_tasks}\n", content)
self.assertIn(f"#SBATCH --cpus-per-task={self.number_of_cores_per_task}\n", content)
self.assertIn(f"#SBATCH --cpu-freq={self.cpu_frequency_str}\n", content)
self.assertIn(f"#SBATCH --partition={self.partition}\n", content)
self.assertIn(f"#SBATCH --reservation={self.reservation}\n", content)
self.assertIn(f"#SBATCH --account={self.account}\n", content)
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"#SBATCH --array={self.job_array_start}-{self.job_array_end}:{self.job_array_step}"
f"{'%1' if self.force_sequential else ''}\n", content)
if self.exclusive:
self.assertIn(f"#SBATCH --exclusive\n", content)
else:
self.assertNotIn(f"#SBATCH --exclusive\n", content)
if not self.dependencies:
self.assertNotIn(f"#SBATCH --dependency={self.dependencies}\n", content)
else:
self.assertIn(f"#SBATCH --dependency={self.dependencies}\n", content)
mts = ",".join(self.mail_types)
self.assertIn(f"#SBATCH --mail-type={mts}\n", content)
self.assertIn(f"#SBATCH --mail-user={self.mail_address}\n", content)
self.assertIn(f"#!{self.shell}\n", content)
if self.uses_module_system and self.purge_modules_at_start:
self.assertIn(f"module purge\n", content)
# modules
for m in self.modules:
mod = m["name"]
if "version" in m:
mod = mod + "/" + m["version"]
self.assertIn(f"module load {mod}\n", content)
# commands
for c in self.commands:
self.assertIn(f"{c}\n", content)
def test_sbatch_passive_no_script(self):
# test target
result = self.gen.sbatch(active=False, load_modules=True)
# asserts
self.assertIn("sbatch", result)
self.assertIn(f"--job-name={self.job_name}", result)
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"--output={self.std_out_path}.%A_%a", result)
self.assertIn(f"--error={self.std_err_path}.%A_%a", result)
else:
self.assertIn(f"--output={self.std_out_path}.%j", result)
self.assertIn(f"--error={self.std_err_path}.%j", result)
self.assertIn(f"--time={self.time_str}", result)
self.assertIn(f"--mem-per-cpu={self.mem_per_cpu}", result)
self.assertIn(f"--ntasks={self.number_of_tasks}", result)
self.assertIn(f"--cpus-per-task={self.number_of_cores_per_task}", result)
self.assertIn(f"--cpu-freq={self.cpu_frequency_str}", result)
self.assertIn(f"--partition={self.partition}", result)
self.assertIn(f"--reservation={self.reservation}", result)
self.assertIn(f"--account={self.account}", result)
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"--array={self.job_array_start}-{self.job_array_end}:{self.job_array_step}"
f"{'%1' if self.force_sequential else ''}",
result)
if self.exclusive:
self.assertIn(f"--exclusive", result)
else:
self.assertNotIn(f"--exclusive", result)
if not self.dependencies:
self.assertNotIn(f"--dependency={self.dependencies}", result)
else:
self.assertIn(f"--dependency={self.dependencies}", result)
mts = ",".join(self.mail_types)
self.assertIn(f"--mail-type={mts}", result)
self.assertIn(f"--mail-user={self.mail_address}", result)
if self.uses_module_system and self.purge_modules_at_start:
self.assertIn(f"module purge", result)
# modules
for m in self.modules:
mod = m["name"]
if "version" in m:
mod = mod + "/" + m["version"]
self.assertIn(f"module load {mod};", result)
# commands
for c in self.commands:
if c == self.commands[-1]:
# for last command: without semicolon
self.assertIn(f"{c}", result)
else:
self.assertIn(f"{c};", result)
def test_sbatch_passive_script(self):
# test target
result = self.gen.sbatch(script_path=self.test_file, active=False, load_modules=True)
# asserts
self.assertEqual(f"sbatch {self.test_file}", result)
with open(self.test_file, "r") as f:
content = f.readlines()
self.assertIn(f"#SBATCH --job-name={self.job_name}\n", content)
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"#SBATCH --output={self.std_out_path}.%A_%a\n", content)
self.assertIn(f"#SBATCH --error={self.std_err_path}.%A_%a\n", content)
else:
self.assertIn(f"#SBATCH --output={self.std_out_path}.%j\n", content)
self.assertIn(f"#SBATCH --error={self.std_err_path}.%j\n", content)
self.assertIn(f"#SBATCH --time={self.time_str}\n", content)
self.assertIn(f"#SBATCH --mem-per-cpu={self.mem_per_cpu}\n", content)
self.assertIn(f"#SBATCH --ntasks={self.number_of_tasks}\n", content)
self.assertIn(f"#SBATCH --cpus-per-task={self.number_of_cores_per_task}\n", content)
self.assertIn(f"#SBATCH --cpu-freq={self.cpu_frequency_str}\n", content)
self.assertIn(f"#SBATCH --partition={self.partition}\n", content)
self.assertIn(f"#SBATCH --reservation={self.reservation}\n", content)
self.assertIn(f"#SBATCH --account={self.account}\n", content)
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"#SBATCH --array={self.job_array_start}-{self.job_array_end}:{self.job_array_step}"
f"{'%1' if self.force_sequential else ''}\n", content)
if self.exclusive:
self.assertIn(f"#SBATCH --exclusive\n", content)
else:
self.assertNotIn(f"#SBATCH --exclusive\n", content)
if not self.dependencies:
self.assertNotIn(f"#SBATCH --dependency={self.dependencies}\n", content)
else:
self.assertIn(f"#SBATCH --dependency={self.dependencies}\n", content)
mts = ",".join(self.mail_types)
self.assertIn(f"#SBATCH --mail-type={mts}\n", content)
self.assertIn(f"#SBATCH --mail-user={self.mail_address}\n", content)
self.assertIn(f"#!{self.shell}\n", content)
if self.uses_module_system and self.purge_modules_at_start:
self.assertIn(f"module purge\n", content)
# modules
for m in self.modules:
mod = m["name"]
if "version" in m:
mod = mod + "/" + m["version"]
self.assertIn(f"module load {mod}\n", content)
# commands
for c in self.commands:
self.assertIn(f"{c}\n", content)
os.remove(self.test_file)
def test_module_order_slurm_script(self):
# target
self.gen.write_slurm_script(self.test_file, load_modules=True)
# asserts
with open(self.test_file, "r") as f:
module_lines = [line for line in f.readlines() if "module" in line]
if self.uses_module_system:
if self.purge_modules_at_start:
self.assertEqual("module purge\n", module_lines[0])
module_lines = module_lines[1:]
for i in range(len(module_lines)):
self.assertEqual(f"module load {self.expected_module_order[i]}\n", module_lines[i])
else:
self.assertEqual(module_lines, [])
def test_module_order_sbatch_passive_script(self):
if self.expected_module_error:
with self.assertRaises(ModuleDependencyConflict):
self.gen.sbatch(script_path=self.test_file, active=False, load_modules=True)
else:
result = self.gen.sbatch(script_path=self.test_file, active=False, load_modules=True)
# assert
self.assertEqual(f"sbatch {self.test_file}", result)
with open(self.test_file, "r") as f:
module_lines = [line for line in f.readlines() if "module" in line]
if self.uses_module_system:
if self.purge_modules_at_start:
self.assertEqual("module purge\n", module_lines[0])
module_lines = module_lines[1:]
for i in range(len(module_lines)):
self.assertEqual(f"module load {self.expected_module_order[i]}\n", module_lines[i])
else:
self.assertEqual(module_lines, [])
def test_module_order_sbatch_passive_no_script(self):
if self.expected_module_error:
with self.assertRaises(ModuleDependencyConflict):
self.gen.sbatch(active=False, load_modules=True)
else:
result = self.gen.sbatch(active=False, load_modules=True)
# asserts
self.assertIn("sbatch", result)
module_lines = [m.strip() for m in result.split(";") if "module" in m]
if self.uses_module_system:
module_lines[0] = "module" + module_lines[0].split("module")[1].rstrip()
if self.purge_modules_at_start:
self.assertEqual("module purge", module_lines[0])
module_lines = module_lines[1:]
for i in range(len(module_lines)):
self.assertEqual(f"module load {self.expected_module_order[i]}", module_lines[i])
else:
self.assertEqual(module_lines, [])
def test_add_modules(self):
"""
set_up uses add_module. Tests "add_modules" wrapper here.
"""
self.gen.clear_modules()
self.gen.add_modules()
for mod_is, mod_exp in zip(self.gen.modules, self.modules):
self.assertEqual(mod_is.name, mod_exp["name"])
if "version" in mod_exp:
self.assertEqual(mod_is.version, mod_exp["version"])
if "depends_on" in mod_exp:
for dep_is, dep_exp in zip(mod_is.depends_on, mod_exp["depends_on"]):
if "version" in dep_exp:
self.assertEqual(dep_is, f"{dep_exp['name']}/{dep_exp['version']}")
else:
self.assertEqual(dep_is, dep_exp['name'])
def test_clear_modules(self):
self.gen.clear_modules()
self.assertListEqual(self.gen.modules, [])
def test_clear_commands(self):
self.gen.clear_commands()
self.assertListEqual(self.gen.commands, [])
def test_to_slurm_args(self):
self.gen.to_slurm_options()
if self.account:
self.assertEqual(self.account, self.gen.slurm_options["--account"])
if self.reservation:
self.assertEqual(self.reservation, self.gen.slurm_options["--reservation"])
if self.partition:
self.assertEqual(self.partition, self.gen.slurm_options["--partition"])
if self.job_name:
self.assertEqual(self.job_name, self.gen.slurm_options["--job-name"])
if self.std_out_path:
if self.job_array_start is not None and self.job_array_end is not None:
self.assertEqual(self.gen.slurm_options["--output"], f"{self.std_out_path}.%A_%a")
else:
self.assertEqual(self.gen.slurm_options["--output"], f"{self.std_out_path}.%j")
if self.std_err_path:
if self.job_array_start is not None and self.job_array_end is not None:
self.assertEqual(self.gen.slurm_options["--error"], f"{self.std_err_path}.%A_%a")
else:
self.assertEqual(self.gen.slurm_options["--error"], f"{self.std_err_path}.%j")
if self.time_str:
self.assertEqual(self.gen.slurm_options["--time"], self.time_str)
if self.mem_per_cpu:
self.assertEqual(self.gen.slurm_options["--mem-per-cpu"], self.mem_per_cpu)
if self.number_of_tasks:
self.assertEqual(self.gen.slurm_options["--ntasks"], self.number_of_tasks)
if self.number_of_cores_per_task:
self.assertEqual(self.gen.slurm_options["--cpus-per-task"], self.number_of_cores_per_task)
if self.exclusive:
self.assertEqual(self.gen.slurm_options["--exclusive"], None)
if self.wait:
self.assertEqual(self.gen.slurm_options["--wait"], None)
if self.job_array_start is not None and self.job_array_end is not None:
self.assertEqual(self.gen.slurm_options["--array"], f"{self.job_array_start}-{self.job_array_end}:"
f"{self.job_array_step}"
f"{'%1' if self.force_sequential else ''}")
if self.cpu_frequency_str:
self.assertEqual(self.gen.slurm_options["--cpu-freq"], self.cpu_frequency_str)
if self.dependencies:
self.assertEqual(self.gen.slurm_options["--dependency"], self.dependencies)
if self.mail_address:
self.assertEqual(self.gen.slurm_options["--mail-user"], self.mail_address)
if self.mail_types:
self.assertEqual(self.gen.slurm_options["--mail-type"], ",".join(self.mail_types))
def test_to_arg_strings(self):
res = self.gen.to_arg_strings()
if self.account:
self.assertIn(f"--account={self.account}", res)
if self.reservation:
self.assertIn(f"--reservation={self.reservation}", res)
if self.partition:
self.assertIn(f"--partition={self.partition}", res)
if self.job_name:
self.assertIn(f"--job-name={self.job_name}", res)
if self.std_out_path:
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"--output={self.std_out_path}.%A_%a", res)
else:
self.assertIn(f"--output={self.std_out_path}.%j", res)
if self.std_err_path:
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"--error={self.std_err_path}.%A_%a", res)
else:
self.assertIn(f"--error={self.std_err_path}.%j", res)
if self.time_str:
self.assertIn(f"--time={self.time_str}", res)
if self.mem_per_cpu:
self.assertIn(f"--mem-per-cpu={self.mem_per_cpu}", res)
if self.number_of_tasks:
self.assertIn(f"--ntasks={self.number_of_tasks}", res)
if self.number_of_cores_per_task:
self.assertIn(f"--cpus-per-task={self.number_of_cores_per_task}", res)
if self.exclusive:
self.assertIn(f"--exclusive", res)
if self.wait:
self.assertIn(f"--wait", res)
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"--array={self.job_array_start}-{self.job_array_end}:{self.job_array_step}"
f"{'%1' if self.force_sequential else ''}", res)
if self.cpu_frequency_str:
self.assertIn(f"--cpu-freq={self.cpu_frequency_str}", res)
if self.dependencies:
self.assertIn(f"--dependency={self.dependencies}", res)
if self.mail_address:
self.assertIn(f"--mail-user={self.mail_address}", res)
if self.mail_types:
self.assertIn(f"--mail-type={','.join(self.mail_types)}", res)
def test_get_pyslurm_args(self):
"""
Test for get_pyslurm_args.
"""
res = self.gen.get_pyslurm_args()
if self.account:
# "|" is the union operator: So we add account to res
# which will change nothing if it is already there.
# So this test will pass, if the account is in res,
# fail otherwise (cause it will add account and res will
# be more then the res dict and not equal)
self.assertEqual(res, res | {"account": self.account})
if self.reservation:
self.assertEqual(res, res | {"reservation": self.reservation})
if self.partition:
self.assertEqual(res, res | {"partition": self.partition})
if self.job_name:
self.assertEqual(res, res | {"job_name": self.job_name})
if self.std_out_path:
if self.job_array_start is not None and self.job_array_end is not None:
self.assertEqual(res, res | {"output": f"{self.std_out_path}.%A_%a"})
else:
self.assertEqual(res, res | {"output": f"{self.std_out_path}.%j"})
if self.std_err_path:
if self.job_array_start is not None and self.job_array_end is not None:
self.assertEqual(res, res | {"error": f"{self.std_err_path}.%A_%a"})
else:
self.assertEqual(res, res | {"error": f"{self.std_err_path}.%j"})
if self.time_str:
# first real pyslurm difference: time -> time limit
self.assertEqual(res, res | {"time_limit": self.in_minutes})
if self.mem_per_cpu:
self.assertEqual(res, res | {"mem_per_cpu": self.mem_per_cpu})
if self.number_of_tasks:
self.assertEqual(res, res | {"ntasks": self.number_of_tasks})
if self.number_of_cores_per_task:
self.assertEqual(res, res | {"cpus_per_task": self.number_of_cores_per_task})
if self.exclusive:
# exclusive should not be in it
self.assertNotEqual(res, res | {"exclusive": None})
if self.wait:
# wait should not be in pyslurm args
self.assertNotEqual(res, res | {"wait": True})
if self.job_array_start is not None and self.job_array_end is not None:
# second difference: array -> array_inx
inx = []
for i in range(int(self.job_array_start), int(self.job_array_end)+1, int(self.job_array_step)):
inx.append(str(i))
inx = ",".join(inx)
self.assertEqual(res, res | {"array_inx": inx})
if self.cpu_frequency_str:
self.assertEqual(res, res | {"cpu_freq_min": self.cpu_frequency_str.split("-")[0]})
self.assertEqual(res, res | {"cpu_freq_max": self.cpu_frequency_str.split("-")[1]})
if self.dependencies:
self.assertEqual(res, res | {"dependency": self.dependencies})
if self.mail_address:
self.assertEqual(res, res | {"mail-user": self.mail_address})
if self.mail_types:
self.assertEqual(res, res | {"mail-type": ','.join(self.mail_types)})
if self.commands:
self.assertEqual(res, res | {"wrap": ";".join(self.commands)})
def test_get_slurm_cmd_line_args(self):
"""
Test for get_slurm_cmd_line_args. The same as test_to_arg_strings,
but the assertIN tests for in string, not in list.
"""
res = self.gen.get_slurm_cmd_line_args()
if self.account:
self.assertIn(f"--account={self.account}", res)
if self.reservation:
self.assertIn(f"--reservation={self.reservation}", res)
if self.partition:
self.assertIn(f"--partition={self.partition}", res)
if self.job_name:
self.assertIn(f"--job-name={self.job_name}", res)
if self.std_out_path:
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"--output={self.std_out_path}.%A_%a", res)
else:
self.assertIn(f"--output={self.std_out_path}.%j", res)
if self.std_err_path:
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"--error={self.std_err_path}.%A_%a", res)
else:
self.assertIn(f"--error={self.std_err_path}.%j", res)
if self.time_str:
self.assertIn(f"--time={self.time_str}", res)
if self.mem_per_cpu:
self.assertIn(f"--mem-per-cpu={self.mem_per_cpu}", res)
if self.number_of_tasks:
self.assertIn(f"--ntasks={self.number_of_tasks}", res)
if self.number_of_cores_per_task:
self.assertIn(f"--cpus-per-task={self.number_of_cores_per_task}", res)
if self.exclusive:
self.assertIn(f"--exclusive", res)
if self.wait:
self.assertIn(f"--wait", res)
if self.job_array_start is not None and self.job_array_end is not None:
self.assertIn(f"--array={self.job_array_start}-{self.job_array_end}:{self.job_array_step}"
f"{'%1' if self.force_sequential else ''}", res)
if self.cpu_frequency_str:
self.assertIn(f"--cpu-freq={self.cpu_frequency_str}", res)
if self.dependencies:
self.assertIn(f"--dependency={self.dependencies}", res)
if self.mail_address:
self.assertIn(f"--mail-user={self.mail_address}", res)
if self.mail_types:
self.assertIn(f"--mail-type={','.join(self.mail_types)}", res)
# Derives the class above for multiple versions of SLURM configs to ran on all tests
class BatchSystemGeneratorTestsMinimal(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# no module system
self.uses_module_system = False
self.purge_modules_at_start = False
self.modules = []
self.expected_module_order = []
self.expected_module_error = False
# no jobarray
self.job_array_start = None
self.job_array_end = None
self.job_array_step = None
# no commands
self.commands = []
class BatchSystemGeneratorTestsMaximal(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# another shell
self.shell = "/bin/fish"
# other user detail (with some special chars)
self.partition = "!§$%&/()="
self.reservation = "VJTZg mg fjt&/"
self.account = "234567890"
self.wait = True
# other mail details
self.mail_address = "complicatedtest§$%&/()@mail/(/(&&%%%.de"
self.mail_types = [MailType.BEGIN, MailType.END, MailType.ALL, MailType.FAIL, MailType.NONE]
# other hardware settings
self.number_of_tasks = 96
self.number_of_cores_per_task = 96
self.mem_per_cpu = 4321
self.cpu_frequency_str = "Medium-Medium"
self.time_str = "00:59:00"
self.in_minutes = 59
self.exclusive = True
self.dependencies = "afterok:123434"
# other out and err paths
self.std_out_path = "/home/piratest/piratest/a/b12/78-98/test.out"
self.std_err_path = "/home/piratest/piratest/t/c21/98-12/test.err"
# module system
self.uses_module_system = True
self.purge_modules_at_start = True
self.use_set_u = True
self.modules = [
{"name": "gcc"},
{"name": "make", "version": "9.7", "depends_on": [{"name": "cuda", "version": "10.2"}]},
{"name": "cmake"},
{"name": "cuda", "version": "10.2", "depends_on": [{"name": "gcc"}]},
]
self.expected_module_order = ["gcc", "cmake", "cuda/10.2", "make/9.7"]
self.expected_module_error = False
# commands
self.commands = [
"a", "b", "c", "aslkdjalkdj", "12334576876", "§$%&/()(/&", "/usr/bin/time"
]
self.job_array_start = 1
self.job_array_end = 5
self.job_array_step = 1
self.force_sequential = True
class BatchSystemGeneratorTestsModuleOrder1(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# module system
self.uses_module_system = False
self.purge_modules_at_start = True
self.modules = []
self.expected_module_order = []
self.expected_module_error = False
# everything other: minimal
self.job_array_start = None
self.job_array_end = None
self.job_array_step = None
self.commands = []
class BatchSystemGeneratorTestsModuleOrder2(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# module system
self.uses_module_system = True
self.purge_modules_at_start = True
self.modules = []
self.expected_module_order = []
self.expected_module_error = False
# everything other: minimal
self.job_array_start = None
self.job_array_end = None
self.job_array_step = None
self.commands = []
class BatchSystemGeneratorTestsModuleOrder3(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# module system
self.uses_module_system = True
self.purge_modules_at_start = False
self.modules = [
{"name": "gcc"},
{"name": "cuda", "version": "10.2", "depends_on": [{"name": "gcc"}]},
{"name": "cmake"},
{"name": "make", "version": "9.7", "depends_on": [{"name": "cuda"}]}
]
self.expected_module_order = ["gcc", "cmake", "cuda/10.2", "make/9.7"]
self.expected_module_error = False
# everything other: minimal
self.job_array_start = None
self.job_array_end = None
self.job_array_step = None
self.commands = []
class BatchSystemGeneratorTestsJobarray(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# module system: minimal
self.uses_module_system = False
self.purge_modules_at_start = False
self.modules = []
self.expected_module_order = []
self.expected_module_error = False
# job array:
self.job_array_start = 0
self.job_array_end = 10
self.job_array_step = 1
# commands minimal
self.commands = []
class BatchSystemGeneratorTestsJobarray2(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# module system: minimal
self.uses_module_system = False
self.purge_modules_at_start = False
self.modules = []
self.expected_module_order = []
self.expected_module_error = False
# job array:
self.job_array_start = 0
self.job_array_end = 10
self.job_array_step = 2
# commands minimal
self.commands = []
class BatchSystemGeneratorTestsPySlurmArgs(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# set time and in_minutes to test the get_pyslurm_args time conversion
self.time_str = "42"
self.in_minutes = 42
class BatchSystemGeneratorTestsPySlurmArgs2(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# set time and in_minutes to test the get_pyslurm_args time conversion
self.time_str = "42:42"
self.in_minutes = 43
class BatchSystemGeneratorTestsPySlurmArgs3(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# set time and in_minutes to test the get_pyslurm_args time conversion
self.time_str = "42:12:42"
self.in_minutes = 2533
class BatchSystemGeneratorTestsPySlurmArgs4(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# set time and in_minutes to test the get_pyslurm_args time conversion
self.time_str = "2-12"
self.in_minutes = 3600
class BatchSystemGeneratorTestsPySlurmArgs5(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# set time and in_minutes to test the get_pyslurm_args time conversion
self.time_str = "1-09:07"
self.in_minutes = 1987
class BatchSystemGeneratorTestsPySlurmArgs6(BatchSystemGeneratorTests):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# set time and in_minutes to test the get_pyslurm_args time conversion
self.time_str = "1-9:7:51"
self.in_minutes = 1988
| 33,153 | 40.236318 | 126 | py |
PIRA | PIRA-master/test/unit/BuilderTest.py | """
File: BuilderTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Unit test for the Builder class.
"""
import unittest
import lib.Builder as B
import lib.Configuration as C
import lib.ConfigurationLoader as CO
class BuilderTest(unittest.TestCase):
def setUp(self):
C.InvocationConfig.create_from_kwargs({'config' : '../inputs/configs/basic_config_005.json'})
ld = CO.SimplifiedConfigurationLoader()
self.cfg = ld.load_conf()
pass
def test_init(self):
tc = C.TargetConfig(self.cfg.get_place('/tmp'), '/tmp', 'test_item', 'ct', 'asdf')
builder = B.Builder(tc, True)
self.assertIsNotNone(builder)
self.assertIsNone(builder.error)
self.assertIsNone(builder.instrumentation_file)
self.assertTrue(builder.build_instr)
self.assertEqual(builder.directory, '/tmp')
def test_init_tc_none(self):
with self.assertRaises(B.BuilderException) as cm_b:
builder = B.Builder(None, True)
b_exc = cm_b.exception
self.assertEqual(str(b_exc), 'Builder::ctor: Target Configuration was None')
@unittest.skip('Builder::set_up mainly changes directories. How to test?')
def test_set_up(self):
self.assertFalse(True)
def test_construct_pira_kwargs_fail_instr(self):
tc = C.TargetConfig(self.cfg.get_place('/tmp'), '/tmp', 'test_item', 'ct', 'asdf')
builder = B.Builder(tc, True, '/tmp/instr_file')
self.assertIsNotNone(builder)
self.assertIsNotNone(builder.instrumentation_file)
with self.assertRaises(B.BuilderException) as cm_b:
p_kwargs = builder.construct_pira_kwargs()
exc = cm_b.exception
self.assertEqual(str(exc), 'Should not construct non-instrument kwargs in instrumentation mode.')
def test_construct_pira_kwargs(self):
tc = C.TargetConfig(self.cfg.get_place('/tmp'), '/tmp', 'test_item', 'ct', 'asdf')
builder = B.Builder(tc, False, '/tmp/instr_file')
self.assertIsNotNone(builder)
self.assertIsNotNone(builder.instrumentation_file)
p_kwargs = builder.construct_pira_kwargs()
self.assertEqual(p_kwargs['CC'], '\"clang\"')
self.assertEqual(p_kwargs['CXX'], '\"clang++\"')
self.assertEqual(p_kwargs['PIRANAME'], 'pira.built.exe')
self.assertEqual(p_kwargs['NUMPROCS'], 8)
@unittest.skip('Implement this test, when fully switched to internal Score-P')
def test_construct_pira_instr_kwargs(self):
tc = C.TargetConfig(self.cfg.get_place('/tmp'), '/tmp', 'test_item', 'ct', 'asdf')
builder = B.Builder(tc, True, '/tmp/instr_file')
self.assertIsNotNone(builder)
self.assertIsNotNone(builder.instrumentation_file)
p_kwargs = builder.construct_pira_instr_kwargs()
self.assertEqual(p_kwargs['CC'],'\"clang -finstrument-functions -finstrument-functions-whitelist-inputfile=/tmp/instr_file\"')
self.assertEqual(p_kwargs['CXX'],'\"clang++ -finstrument-functions -finstrument-functions-whitelist-inputfile=/tmp/instr_file\"')
# self.assertEqual(p_kwargs['CLFLAGS'], '\" scorep.init.o ')
# self.assertEqual(p_kwargs['CXXLFLAGS'], )
self.assertEqual(p_kwargs['PIRANAME'], 'pira.built.exe')
self.assertEqual(p_kwargs['NUMPROCS'], 8)
self.assertEqual(p_kwargs['filter-file'], '/tmp/instr_file')
def test_construct_pira_instr_kwargs(self):
tc = C.TargetConfig(self.cfg.get_place('/tmp'), '/tmp', 'test_item', 'ct', 'asdf')
builder = B.Builder(tc, False)
self.assertIsNotNone(builder)
with self.assertRaises(B.BuilderException) as cm_b:
p_kwargs = builder.construct_pira_instr_kwargs()
exc = cm_b.exception
self.assertEqual(str(exc), 'Should not construct instrument kwargs in non-instrumentation mode.')
@unittest.skip('This actually calls the functor.')
def test_build_flavors(self):
pass
| 3,815 | 39.595745 | 133 | py |
PIRA | PIRA-master/test/unit/DatabaseTest.py | """
File: DatabaseTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the argument mapping
"""
import lib.Database as D
import lib.tables as T
import unittest
import os
class TestDatabaseBasic(unittest.TestCase):
"""Tests the data base. Maybe"""
def setUp(self):
D.DBManager.instance = None
def test_create_db(self):
self.assertIsNone(D.DBManager.instance)
dbm = D.DBManager('test.sqlite')
self.assertIsNotNone(dbm)
self.assertIsNotNone(dbm.instance.conn)
def test_create_cursor(self):
dbm = D.DBManager('test.sqlite')
dbm.create_cursor()
self.assertIsNotNone(dbm.instance.cursor)
def test_create_db_fail(self):
self.assertRaises(D.DBException, D.DBManager, None) #XXX Why does not raise?
def test_fail_create_table_wo_cursor(self):
dbm = D.DBManager('test.sqlite')
self.assertRaises(D.DBException, D.DBManager.instance.create_table, T.create_items_table)
class TestDatabaseManip(unittest.TestCase):
""" Tests the manipulating functions of the DB implementation """
@classmethod
def tearDownClass(cls):
os.remove('test.sqlite')
def setUp(self):
D.DBManager.instance = None
self.dbm = D.DBManager('test.sqlite')
self.dbm.create_cursor()
def test_create_app_table(self):
self.dbm.create_table(T.create_application_table)
# XXX Add actual asserts
def test_create_build_table(self):
self.dbm.create_table(T.create_builds_table)
# XXX Add actual asserts
def test_create_items_table(self):
self.dbm.create_table(T.create_items_table)
# XXX Add actual asserts
def test_create_experiment_table(self):
self.dbm.create_table(T.create_experiment_table)
# XXX Add actual asserts
if __name__ == '__main__':
unittest.main()
| 1,863 | 25.253521 | 126 | py |
PIRA | PIRA-master/test/unit/AnalyzerTest.py | """
File: AnalyzerTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the Analyzer module.
"""
import lib.Analyzer as A
import lib.FunctorManagement as F
import lib.ConfigurationLoader as C
import lib.Utility as U
from lib.Configuration import PiraConfig, PiraConfigII, PiraConfigAdapter, PiraItem, TargetConfig, InvocationConfig, InstrumentConfig
from lib.ArgumentMapping import CmdlineLinearArgumentMapper
from lib.ProfileSink import ProfileSinkBase
import unittest
import os
class TestProfileSink(ProfileSinkBase):
def __init__(self):
super().__init__()
self._tc = None
self._ic = None
def process(self, exp_dir: str, target_config: TargetConfig, instr_config: InstrumentConfig):
self._sink_target = exp_dir
self._tc = target_config
self._ic = instr_config
def has_config_output(self):
return False
class TestAnalyzer(unittest.TestCase):
def setUp(self):
# Pira I configuration (we probably drop the support anyway...)
self._p_cfg = PiraConfig()
# Pira II configuration and adapter
self._pira_two_cfg = PiraConfigII()
# get runtime folder
self.pira_dir = U.get_default_pira_dir()
# insert user runtime folder into test config
self.test_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../inputs/configs/basic_config_005.json')
data = None
with open(self.test_config, 'r') as file:
data = file.read()
data = data.replace('/tmp', self.pira_dir)
with open(self.test_config, 'w') as file:
file.write(data)
self._it_dir = self.pira_dir
item = PiraItem(os.path.join(self.pira_dir, 'test_item'))
item.set_analyzer_dir('/analyzer')
item.set_cubes_dir('/cubes')
item.set_flavors(['dflt'])
item.set_functors_base_path('/functors')
item.set_mode('ct')
InvocationConfig.create_from_kwargs({'config' : '../inputs/configs/basic_config_005.json'})
run_opts = CmdlineLinearArgumentMapper({'x': [1]})
item.set_run_options(run_opts)
self._item = item
self._pira_two_cfg.add_item(self._it_dir, item)
self._pira_two_cfg._empty = False # This is usually done in ConfigurationLoader
self._pira_two_adapter = PiraConfigAdapter(self._pira_two_cfg)
def tearDown(self):
# reset test config
data = None
with open(self.test_config, 'r') as file:
data = file.read()
data = data.replace(self.pira_dir, '/tmp')
with open(self.test_config, 'w') as file:
file.write(data)
def test_empty_pira_config(self):
with self.assertRaises(A.PiraAnalyzerException):
analyzer = A.Analyzer(PiraConfig())
def test_empty_pira_configII(self):
with self.assertRaises(A.PiraAnalyzerException):
analyzer = A.Analyzer(PiraConfigII())
def test_empty_pira_config_adapter(self):
with self.assertRaises(A.PiraAnalyzerException):
analyzer = A.Analyzer(PiraConfigAdapter(PiraConfigII()))
def test_pira_configII(self):
analyzer = A.Analyzer(self._pira_two_cfg)
self.assertIsNotNone(analyzer)
def test_pira_config_adapter(self):
analyzer = A.Analyzer(self._pira_two_adapter)
self.assertIsNotNone(analyzer)
def test_config_empty_sink(self):
analyzer = A.Analyzer(self._pira_two_cfg)
tc = TargetConfig(self._it_dir, self._it_dir, self._it_dir, 'dflt', 'asdf')
with self.assertRaises(RuntimeError):
analyzer.analyze(tc, 0, True)
def test_empty_target_config(self):
analyzer = A.Analyzer(self._pira_two_cfg)
with self.assertRaises(RuntimeError):
analyzer.analyze(None, 0, True)
def test_run_analyzer_command(self):
analyzer = A.Analyzer(self._pira_two_cfg)
with self.assertRaises(Exception):
analyzer.run_analyzer_command('some/command','analyzer/dir','ct','benchmark','exp/dir',0,'cfg/file',False)
def test_run_analyzer_command_no_instr(self):
analyzer = A.Analyzer(self._pira_two_cfg)
with self.assertRaises(Exception):
analyzer.run_analyzer_command_noInstr('some/command','analyzer/dir','ct','benchmark')
def test_analyze_local(self):
ld = C.SimplifiedConfigurationLoader()
cfg = ld.load_conf()
analyzer = A.Analyzer(cfg)
fm = F.FunctorManager(cfg)
a_f = fm.get_or_load_functor(self.pira_dir, 'test_item', 'ct', 'analyze')
self.assertIsNotNone(a_f)
self.assertTrue(a_f.get_method()['passive'])
self.assertEqual(a_f.get_it(), 0)
tc = TargetConfig(cfg.get_place(self.pira_dir), self.pira_dir, 'test_item', 'ct', 'asdf')
with self.assertRaises(RuntimeError) as assert_cm:
analyzer.analyze(tc, 0, True)
rt_err = assert_cm.exception
self.assertEqual(str(rt_err), 'Analyzer::analyze: Profile Sink in Analyzer not set!')
analyzer.set_profile_sink(TestProfileSink())
analyzer.analyze(tc, 0, True)
self.assertEqual(a_f.get_it(), 1)
@unittest.skip('Skip the test of the slurm Analyzer as we do not have any implementation for now.')
def test_analyze_slurm(self):
pass | 5,037 | 34.230769 | 133 | py |
PIRA | PIRA-master/test/unit/UtilityTest.py | """
File: UtilityTest.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description: Tests for the utility module
"""
import os.path
import unittest
import lib.Utility as U
import lib.Logging as L
class TestUtility(unittest.TestCase):
def test_check_provided_directory(self):
self.assertTrue(U.check_provided_directory('/home'))
self.assertFalse(U.check_provided_directory('/glibberish/asdf'))
def test_check_file(self):
self.assertTrue(U.is_file('/bin/sh'))
self.assertFalse(U.is_file('/bin/ushsdnsdhh'))
def test_shell_dry_run(self):
command = 'echo "Hello world!"'
expected_out = '[debug] Utility::shell: DRY RUN SHELL CALL: ' + command
out, t = U.shell(command, dry=True)
lm = L.get_logger().get_last_msg()
self.assertEqual(lm, expected_out)
self.assertEqual(t, 1.0)
self.assertEqual(out, '')
def test_shell_time_invoc(self):
command = 'echo "Hello World!"'
expected_out = 'Hello World!\n'
out, t = U.shell(command, time_invoc=True)
self.assertEqual(out, expected_out)
self.assertGreater(t, -1.0) # XXX This is already a little fishy!
def test_shell_invoc(self):
command = 'echo "Hello World!"'
expected_out = 'Hello World!\n'
out, t = U.shell(command, time_invoc=False)
self.assertEqual(out, expected_out)
self.assertEqual(t, -1.0) # XXX This is already a little fishy!
def test_concat_a_b_with_sep_all_empty(self):
a = ''
b = ''
sep = ''
r = U.concat_a_b_with_sep(a, b, sep)
self.assertEqual(r, '')
def test_concat_a_b_with_sep_empty(self):
a = 'a'
b = ''
sep = ''
r = U.concat_a_b_with_sep(a, b, sep)
self.assertEqual(r, 'a')
b = 'a'
a = ''
r = U.concat_a_b_with_sep(a, b, sep)
self.assertEqual(r, 'a')
def test_concat_a_b_with_sep(self):
a = 'a'
b = 'b'
sep = '_'
r = U.concat_a_b_with_sep(a, b, sep)
self.assertEqual(r, 'a_b')
a = 'aaaa'
b = ''
r = U.concat_a_b_with_sep(a, b, sep)
self.assertEqual(r, 'aaaa_')
def test_is_valid_file(self):
file_name = '/work/scratch/j_lehr/temp1-a'
res = U.is_valid_file_name(file_name)
self.assertTrue(res)
def test_is_valid_file_cube_pattern(self):
file_name = '/work/scratch/j_lehr/_preparation_/hpcg-1-test_run-1'
res = U.is_valid_file_name(file_name)
self.assertTrue(res)
def test_is_valid_file_false(self):
file_name = '/work\\scratch/j_lehr/temp1-%a'
res = U.is_valid_file_name(file_name)
self.assertFalse(res)
def test_is_valid_file_long(self):
file_name = '/work/scratch/j_lehr/temp1-a/_asd-tes-12-_2-/asd/nul'
res = U.is_valid_file_name(file_name)
self.assertTrue(res)
def test_is_valid_file_dot(self):
file_name = '/work+tch/j.lehr/temp1-a'
res = U.is_valid_file_name(file_name)
self.assertFalse(res)
def test_is_valid_file_plus(self):
file_name = '/work+tch/j_lehr/temp1-a'
res = U.is_valid_file_name(file_name)
self.assertFalse(res)
def test_is_valid_file_whitespace(self):
file_name = '/work+tch/j_leh r/temp1-a'
res = U.is_valid_file_name(file_name)
self.assertFalse(res)
def test_get_tempdir(self):
tempdir = U.get_tempdir()
def test_make_dir(self):
U.make_dir("/home")
def test_remove_dir(self):
U.make_dirs(U.get_tempdir() + "/rm_dir_test")
U.remove_dir(U.get_tempdir() + "/rm_dir_test")
def test_default_config(self):
U.get_default_config_file()
def test_get_pira_dir(self):
path = os.path.dirname(__file__)
path = "/".join(path.split("/")[:-2])
self.assertEqual(path, U.get_pira_code_dir())
def test_get_default_slurm_config_dir(self):
self.assertEqual(f"{U.get_default_pira_dir()}/batchsystem.json", U.get_default_slurm_config_path())
def test_remove_file_with_pattern(self):
U.write_file(f"{os.path.dirname(__file__)}/testa.txt", "Test")
U.write_file(f"{os.path.dirname(__file__)}/testb.txt", "Test2")
U.write_file(f"{os.path.dirname(__file__)}/Atestc.txt", "Test3")
U.remove_file_with_pattern(os.path.dirname(__file__), "test[a-c].txt")
self.assertFalse(U.check_file(f"{os.path.dirname(__file__)}/testa.txt"))
self.assertFalse(U.check_file(f"{os.path.dirname(__file__)}/testb.txt"))
self.assertTrue(U.check_file(f"{os.path.dirname(__file__)}/Atestc.txt"))
U.remove_file(f"{os.path.dirname(__file__)}/Atestc.txt")
def test_json_to_canonic(self):
json_loads = {
"a": "astring",
"b": 12,
"c": None,
"d": {
"e": "innerstr",
"f": None
},
"g": [
{"a": 1},
{"b": 2}
]
}
self.assertEqual(U.json_to_canonic(json_loads), json_loads)
if __name__ == '__main__':
unittest.main()
| 4,811 | 28.341463 | 126 | py |
PIRA | PIRA-master/test/integration/check.py | #!/usr/bin/env python3
"""
File: check.py
License: Part of the PIRA project. Licensed under BSD 3 clause license. See LICENSE.txt file at https://github.com/tudasc/pira
Description:
This script is used to check the output of the integration tests against expectations defined in a file.
Call this script from the run-script of each integration test. (Usage information: `check.py -h`)
The expectations file is required to be a JSON file with the this structure:
```
[
{
"iteration": 0,
"expect": ['foo', 'foobar'],
"may-expect": ['.*bar.*'],
"never-expect": ['evil']
},
{
"iteration": 1,
"expect": ['foo'],
"may-expect": ['foobar', '.*bar.*'],
"never-expect": ['evil', 'some_function']
}
]
```
The expectations follow the following semantics:
- `expect`: Fails if at least one of the functions is *not* present in the iteration's instrumentation.
- `may-expect`: Attention! This field contains regexex! Instrumented functions matching any of the regexes do not provoke a failure.
The goal is to avoid long lists of uninteresting functions.
- Instrumented functions which are not listed in `expect` or match the regex in `may-expect` provoke a failure.
- `never-expect`: Can be used to explicitly state function which should be absent from the instrumentation.
- `never-expect` is "stronger" than `may-expect`. If a function is listed in `never-expect` it provokes a failure, regardless of a possible match with `may-expect`.
"""
import argparse
from distutils.command.config import config
import json
import os
import re
from xmlrpc.client import FastMarshaller
def prepare_instrumentation_file(lines):
""" Prepare a Score-P instrumenation list by removing prefixes, etc. """
lines = filter(lambda line: "SCOREP_" not in line, lines)
lines = map(lambda line: line.replace("INCLUDE", ""), lines)
lines = map(lambda line: line.strip(), lines)
lines = filter(lambda line: line != "", lines)
return list(lines)
def matches_any_regex(line, regexes):
for regex in regexes:
if re.search(regex, line):
return True
return False
class ExpectationTriple:
""" Represents the triple of expected, may-expect and never-expected for a single iteration. """
def __init__(self, entry):
self.iteration = entry["iteration"]
self.expect = entry["expect"]
self.may_expect = entry["may-expect"]
self.never_expect = entry["never-expect"]
def check_lines(self, lines, verbose=False):
""" Check a given instrumentation list against expectations. """
result = True
if verbose:
print(f"Iteration {self.iteration}")
print(f"\tactual instrumentation: {lines}")
print(f"\texpected instrumentation: {self.expect}")
print(f"\tmaybe-expected instrumentation: {self.may_expect}")
print(f"\tnever-expected instrumentation: {self.never_expect}")
conflict = set(self.expect) & set(self.never_expect)
if conflict:
print(
f"Problem in expectations file. The following functions are both expected and never-expected: {conflict}"
)
return False
# test whether all expected functions are present
expected_but_missing = set(self.expect) - set(lines)
if expected_but_missing:
print(
f"Iteration {self.iteration}: The following functions were expected to be present in the instrumenation, but were not: {expected_but_missing}"
)
result = False
remaining = set(lines) - set(self.expect)
# test for functions which have been marked as explicitly not expected
explicitly_not_expected = set(remaining) & set(self.never_expect)
if explicitly_not_expected:
print(
f"Iteration {self.iteration}: The following functions were instrumented, but explicitly not expected: {explicitly_not_expected}"
)
result = False
# remove all functions from remaining which match an expression in may-expect
remaining = list(filter(lambda x: not matches_any_regex(x, self.may_expect), remaining))
# test for any further functions which are instrumented, but unexptected
remaining = set(remaining) - explicitly_not_expected
if remaining:
print(
f"Iteration {self.iteration}: The following functions were instrumented, but not (may-) expected: {remaining}"
)
result = False
return result
def main():
# parse cli arguments
parser = argparse.ArgumentParser(
description="Check instrumentation output of PIRA integration tests.")
parser.add_argument('instr_dir_path',
metavar='dir',
help="Path to directory containing the filter lists produced by PIRA.")
parser.add_argument('expected_path',
metavar='expected',
help="Path to JSON file describing the expected output.")
parser.add_argument(
'benchmark_name',
metavar='benchmark',
help=
"Benchmark name as in the filter list files. Most likely;: \"application_name\"_\"flavour\"")
parser.add_argument('-v',
'--verbose',
action='store_true',
help="Print actual and expected instrumentation before comparing.")
args = parser.parse_args()
# populate expectations data structure
expectations = {}
with open(args.expected_path, "r") as f:
for entry in json.load(f):
expectations[entry["iteration"]] = ExpectationTriple(entry)
# iterate over expectations
result = True
for i, exp in expectations.items():
instr_file_name = "instrumented-" + args.benchmark_name + "_it-" + str(i) + ".txt"
instr_file_path = os.path.join(args.instr_dir_path, instr_file_name)
try:
with open(instr_file_path, "r") as f:
lines = prepare_instrumentation_file(f.readlines())
result &= exp.check_lines(lines, verbose=args.verbose)
except OSError as err:
print(f"Error opening instrumentation file for iteration {i}: {err}")
result = False
if result:
print("All checks passed.")
# exit with error code iff there have been violated expectation
exit(not result)
if __name__ == '__main__':
main()
| 6,314 | 36.366864 | 169 | py |
PIRA | PIRA-master/test/integration/AMG2013_Slurm/functors/clean_amg_ct_mpi.py |
def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 163 | 12.666667 | 43 | py |
PIRA | PIRA-master/test/integration/AMG2013_Slurm/functors/no_instr_amg_ct_mpi.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CC="OMPI_CC=clang mpicc" -j'
def active(benchmark, **kwargs):
pass
| 184 | 15.818182 | 43 | py |
PIRA | PIRA-master/test/integration/AMG2013_Slurm/functors/amg_ct_mpi.py |
def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CC="OMPI_CC=clang scorep --instrument-filter=' + kwargs['filter-file'] + ' mpicc" -j'
def active(benchmark, **kwargs):
pass
| 241 | 21 | 100 | py |
PIRA | PIRA-master/test/integration/AMG2013_Slurm/functors/runner_amg_ct_mpi.py |
def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'cd test && ' + kwargs['LD_PRELOAD'] + ' mpirun --allow-run-as-root -oversubscribe -np 8 ./amg2013 -pooldist 1 -r ' + str(kwargs['args'][1]) + ' ' + str(kwargs['args'][1]) + ' ' + str(kwargs['args'][1]) + ' -P 1 1 1 -printstats'
def active(benchmark, **kwargs):
pass
| 380 | 30.75 | 238 | py |
PIRA | PIRA-master/test/integration/AMG2013_Slurm/functors/analyze_amg_ct_mpi.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'pgis_pira'
def active(benchmark, **kwargs):
pass
| 161 | 13.727273 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLifePiraVersion1/functors/analyze_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 218 | 10.526316 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLifePiraVersion1/functors/clean_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 162 | 13.818182 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLifePiraVersion1/functors/gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX="scorep --instrument-filter=' + kwargs['filter-file'] + ' clang++" make gol'
def active(benchmark, **kwargs):
pass
| 231 | 20.090909 | 90 | py |
PIRA | PIRA-master/test/integration/GameOfLifePiraVersion1/functors/runner_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return './gol ' + kwargs['args'][1]
def active(benchmark, **kwargs):
pass
| 178 | 15.272727 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLifePiraVersion1/functors/no_instr_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX=clang++ make gol'
def active(benchmark, **kwargs):
pass
| 172 | 14.727273 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_heuristic/functors/analyze_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira --metacg-format 2 --heuristic-selection fp_and_mem_ops --cuttoff-selection unique_median'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 307 | 15.210526 | 109 | py |
PIRA | PIRA-master/test/integration/GameOfLife_heuristic/functors/clean_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 162 | 13.818182 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_heuristic/functors/gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX="scorep --instrument-filter=' + kwargs['filter-file'] + ' clang++" make gol'
def active(benchmark, **kwargs):
pass
| 231 | 20.090909 | 90 | py |
PIRA | PIRA-master/test/integration/GameOfLife_heuristic/functors/runner_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return './gol ' + kwargs['args'][1]
def active(benchmark, **kwargs):
pass
| 178 | 15.272727 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_heuristic/functors/no_instr_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX=clang++ make gol'
def active(benchmark, **kwargs):
pass
| 172 | 14.727273 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLifePiraVersion1_Slurm/functors/analyze_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 218 | 10.526316 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLifePiraVersion1_Slurm/functors/clean_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 162 | 13.818182 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLifePiraVersion1_Slurm/functors/gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX="scorep --instrument-filter=' + kwargs['filter-file'] + ' clang++" make gol'
def active(benchmark, **kwargs):
pass
| 231 | 20.090909 | 90 | py |
PIRA | PIRA-master/test/integration/GameOfLifePiraVersion1_Slurm/functors/runner_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return './gol ' + kwargs['args'][1]
def active(benchmark, **kwargs):
pass
| 178 | 15.272727 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLifePiraVersion1_Slurm/functors/no_instr_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX=clang++ make gol'
def active(benchmark, **kwargs):
pass
| 172 | 14.727273 | 43 | py |
PIRA | PIRA-master/test/integration/AMG2013/functors/clean_amg_ct_mpi.py |
def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 163 | 12.666667 | 43 | py |
PIRA | PIRA-master/test/integration/AMG2013/functors/no_instr_amg_ct_mpi.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CC="OMPI_CC=clang mpicc" -j'
def active(benchmark, **kwargs):
pass
| 184 | 15.818182 | 43 | py |
PIRA | PIRA-master/test/integration/AMG2013/functors/amg_ct_mpi.py |
def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CC="OMPI_CC=clang scorep --instrument-filter=' + kwargs['filter-file'] + ' mpicc" -j'
def active(benchmark, **kwargs):
pass
| 241 | 21 | 100 | py |
PIRA | PIRA-master/test/integration/AMG2013/functors/runner_amg_ct_mpi.py |
def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'cd test && ' + kwargs['LD_PRELOAD'] + ' mpirun --allow-run-as-root -oversubscribe -np 8 ./amg2013 -pooldist 1 -r ' + str(kwargs['args'][1]) + ' ' + str(kwargs['args'][1]) + ' ' + str(kwargs['args'][1]) + ' -P 1 1 1 -printstats'
def active(benchmark, **kwargs):
pass
| 380 | 30.75 | 238 | py |
PIRA | PIRA-master/test/integration/AMG2013/functors/analyze_amg_ct_mpi.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'pgis_pira'
def active(benchmark, **kwargs):
pass
| 161 | 13.727273 | 43 | py |
PIRA | PIRA-master/test/integration/Kripke/functors/runner_kripke_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return kwargs['LD_PRELOAD'] + ' mpirun -c 8 ./bin/kripke.exe --procs 2,2,2 --groups ' + kwargs['args'][1]
def active(benchmark, **kwargs):
pass
| 254 | 22.181818 | 109 | py |
PIRA | PIRA-master/test/integration/Kripke/functors/no_instr_kripke_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CXX_WRAP="mpicxx" -j'
def active(benchmark, **kwargs):
pass
| 183 | 15.727273 | 45 | py |
PIRA | PIRA-master/test/integration/Kripke/functors/analyze_kripke_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 228 | 11.052632 | 45 | py |
PIRA | PIRA-master/test/integration/Kripke/functors/clean_kripke_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 168 | 14.363636 | 45 | py |
PIRA | PIRA-master/test/integration/Kripke/functors/kripke_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make OMPI_CXX=clang++ CXX_WRAP="scorep --instrument-filter=' + kwargs['filter-file'] + ' mpicxx" -j'
def active(benchmark, **kwargs):
pass
| 256 | 24.7 | 112 | py |
PIRA | PIRA-master/test/integration/LoadImbalance_Slurm/functors/imbalance_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CC="OMPI_CC=clang scorep --instrument-filter=' + kwargs['filter-file'] + ' mpicc"'
def active(benchmark, **kwargs):
pass
| 238 | 20.727273 | 97 | py |
PIRA | PIRA-master/test/integration/LoadImbalance_Slurm/functors/analyze_imbalance_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 218 | 10.526316 | 43 | py |
PIRA | PIRA-master/test/integration/LoadImbalance_Slurm/functors/runner_imbalance_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return kwargs['LD_PRELOAD'] + ' mpirun -np 8 imbalance-static.out'
# return kwargs['LD_PRELOAD'] + ' mpirun -np 8 imbalance-dynamic.out'
def active(benchmark, **kwargs):
pass
| 280 | 24.545455 | 71 | py |
PIRA | PIRA-master/test/integration/LoadImbalance_Slurm/functors/clean_imbalance_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 162 | 13.818182 | 43 | py |
PIRA | PIRA-master/test/integration/LoadImbalance_Slurm/functors/no_instr_imbalance_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CC="OMPI_CC=clang mpicc"'
def active(benchmark, **kwargs):
pass
| 181 | 15.545455 | 43 | py |
PIRA | PIRA-master/test/integration/Kripke_Slurm/functors/runner_kripke_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return kwargs['LD_PRELOAD'] + ' mpirun -c 8 ./bin/kripke.exe --procs 2,2,2 --groups ' + kwargs['args'][1]
def active(benchmark, **kwargs):
pass
| 254 | 22.181818 | 109 | py |
PIRA | PIRA-master/test/integration/Kripke_Slurm/functors/no_instr_kripke_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CXX_WRAP="mpicxx" -j'
def active(benchmark, **kwargs):
pass
| 183 | 15.727273 | 45 | py |
PIRA | PIRA-master/test/integration/Kripke_Slurm/functors/analyze_kripke_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 228 | 11.052632 | 45 | py |
PIRA | PIRA-master/test/integration/Kripke_Slurm/functors/clean_kripke_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 168 | 14.363636 | 45 | py |
PIRA | PIRA-master/test/integration/Kripke_Slurm/functors/kripke_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make OMPI_CXX=clang++ CXX_WRAP="scorep --instrument-filter=' + kwargs['filter-file'] + ' mpicxx" -j'
def active(benchmark, **kwargs):
pass
| 256 | 24.7 | 112 | py |
PIRA | PIRA-master/test/integration/GameOfLife_hybrid_filter/functors/analyze_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 218 | 10.526316 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_hybrid_filter/functors/clean_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 162 | 13.818182 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_hybrid_filter/functors/gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX="scorep --instrument-filter=' + kwargs['filter-file'] + ' clang++" make gol'
def active(benchmark, **kwargs):
pass
| 231 | 20.090909 | 90 | py |
PIRA | PIRA-master/test/integration/GameOfLife_hybrid_filter/functors/runner_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return './gol ' + kwargs['args'][1]
def active(benchmark, **kwargs):
pass
| 178 | 15.272727 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_hybrid_filter/functors/no_instr_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX=clang++ make gol'
def active(benchmark, **kwargs):
pass
| 172 | 14.727273 | 43 | py |
PIRA | PIRA-master/test/integration/LULESH/functors/analyze_lulesh_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 228 | 11.052632 | 45 | py |
PIRA | PIRA-master/test/integration/LULESH/functors/clean_lulesh_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 168 | 14.363636 | 45 | py |
PIRA | PIRA-master/test/integration/LULESH/functors/lulesh_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CXXFLAGS="$LULESH_CXXFLAGS" CXX="OMPI_CXX=clang++ scorep --instrument-filter=' + kwargs['filter-file'] + ' mpicxx" -j'
def active(benchmark, **kwargs):
pass
| 279 | 27 | 135 | py |
PIRA | PIRA-master/test/integration/LULESH/functors/runner_lulesh_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return kwargs['LD_PRELOAD'] + ' mpirun -c 8 ./lulesh2.0 -b 1'
def active(benchmark, **kwargs):
pass
| 210 | 18.181818 | 65 | py |
PIRA | PIRA-master/test/integration/LULESH/functors/no_instr_lulesh_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CXXFLAGS="$LULESH_CXXFLAGS" CXX="mpicxx" -j'
def active(benchmark, **kwargs):
pass
| 206 | 17.818182 | 61 | py |
PIRA | PIRA-master/test/integration/LULESH_Slurm/functors/analyze_lulesh_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 228 | 11.052632 | 45 | py |
PIRA | PIRA-master/test/integration/LULESH_Slurm/functors/clean_lulesh_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 168 | 14.363636 | 45 | py |
PIRA | PIRA-master/test/integration/LULESH_Slurm/functors/lulesh_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CXXFLAGS="$LULESH_CXXFLAGS" CXX="OMPI_CXX=clang++ scorep --instrument-filter=' + kwargs['filter-file'] + ' mpicxx" -j'
def active(benchmark, **kwargs):
pass
| 279 | 27 | 135 | py |
PIRA | PIRA-master/test/integration/LULESH_Slurm/functors/runner_lulesh_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return kwargs['LD_PRELOAD'] + ' mpirun -c 8 ./lulesh2.0 -b 1'
def active(benchmark, **kwargs):
pass
| 210 | 18.181818 | 65 | py |
PIRA | PIRA-master/test/integration/LULESH_Slurm/functors/no_instr_lulesh_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CXXFLAGS="$LULESH_CXXFLAGS" CXX="mpicxx" -j'
def active(benchmark, **kwargs):
pass
| 206 | 17.818182 | 61 | py |
PIRA | PIRA-master/test/integration/GameOfLife_hybrid_filter_Slurm/functors/analyze_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 218 | 10.526316 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_hybrid_filter_Slurm/functors/clean_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 162 | 13.818182 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_hybrid_filter_Slurm/functors/gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX="scorep --instrument-filter=' + kwargs['filter-file'] + ' clang++" make gol'
def active(benchmark, **kwargs):
pass
| 231 | 20.090909 | 90 | py |
PIRA | PIRA-master/test/integration/GameOfLife_hybrid_filter_Slurm/functors/runner_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return './gol ' + kwargs['args'][1]
def active(benchmark, **kwargs):
pass
| 178 | 15.272727 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_hybrid_filter_Slurm/functors/no_instr_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX=clang++ make gol'
def active(benchmark, **kwargs):
pass
| 172 | 14.727273 | 43 | py |
PIRA | PIRA-master/test/integration/LoadImbalance/functors/imbalance_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CC="OMPI_CC=clang scorep --instrument-filter=' + kwargs['filter-file'] + ' mpicc"'
def active(benchmark, **kwargs):
pass
| 238 | 20.727273 | 97 | py |
PIRA | PIRA-master/test/integration/LoadImbalance/functors/analyze_imbalance_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 218 | 10.526316 | 43 | py |
PIRA | PIRA-master/test/integration/LoadImbalance/functors/runner_imbalance_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return kwargs['LD_PRELOAD'] + ' mpirun -np 8 imbalance-static.out'
# return kwargs['LD_PRELOAD'] + ' mpirun -np 8 imbalance-dynamic.out'
def active(benchmark, **kwargs):
pass
| 280 | 24.545455 | 71 | py |
PIRA | PIRA-master/test/integration/LoadImbalance/functors/clean_imbalance_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 162 | 13.818182 | 43 | py |
PIRA | PIRA-master/test/integration/LoadImbalance/functors/no_instr_imbalance_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make CC="OMPI_CC=clang mpicc"'
def active(benchmark, **kwargs):
pass
| 181 | 15.545455 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_Slurm/functors/analyze_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 218 | 10.526316 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_Slurm/functors/clean_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 162 | 13.818182 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_Slurm/functors/gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX="scorep --instrument-filter=' + kwargs['filter-file'] + ' clang++" make gol'
def active(benchmark, **kwargs):
pass
| 231 | 20.090909 | 90 | py |
PIRA | PIRA-master/test/integration/GameOfLife_Slurm/functors/runner_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return './gol ' + kwargs['args'][1]
def active(benchmark, **kwargs):
pass
| 178 | 15.272727 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_Slurm/functors/no_instr_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX=clang++ make gol'
def active(benchmark, **kwargs):
pass
| 172 | 14.727273 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_heuristic_Slurm/functors/analyze_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira --heuristic-selection fp_and_mem_ops --cuttoff-selection unique_median'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 289 | 14.263158 | 91 | py |
PIRA | PIRA-master/test/integration/GameOfLife_heuristic_Slurm/functors/clean_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 162 | 13.818182 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_heuristic_Slurm/functors/gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX="scorep --instrument-filter=' + kwargs['filter-file'] + ' clang++" make gol'
def active(benchmark, **kwargs):
pass
| 231 | 20.090909 | 90 | py |
PIRA | PIRA-master/test/integration/GameOfLife_heuristic_Slurm/functors/runner_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return './gol ' + kwargs['args'][1]
def active(benchmark, **kwargs):
pass
| 178 | 15.272727 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife_heuristic_Slurm/functors/no_instr_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX=clang++ make gol'
def active(benchmark, **kwargs):
pass
| 172 | 14.727273 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife/functors/analyze_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def pre(**kwargs):
pass
def passive(benchmark, **kwargs):
return 'pgis_pira'
def post(**kwargs):
pass
def active(benchmark, **kwargs):
pass
| 218 | 10.526316 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife/functors/clean_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'make clean'
def active(benchmark, **kwargs):
pass
| 162 | 13.818182 | 43 | py |
PIRA | PIRA-master/test/integration/GameOfLife/functors/gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return 'CXX="scorep --instrument-filter=' + kwargs['filter-file'] + ' clang++" make gol'
def active(benchmark, **kwargs):
pass
| 231 | 20.090909 | 90 | py |
PIRA | PIRA-master/test/integration/GameOfLife/functors/runner_gol_ct.py | def get_method():
return {'passive': True, 'active': False}
def passive(benchmark, **kwargs):
return './gol ' + kwargs['args'][1]
def active(benchmark, **kwargs):
pass
| 178 | 15.272727 | 43 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.