repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
gunpowder | gunpowder-master/gunpowder/nodes/hdf5like_write_base.py | from .batch_filter import BatchFilter
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from gunpowder.roi import Roi
import logging
import os
import warnings
logger = logging.getLogger(__name__)
class Hdf5LikeWrite(BatchFilter):
"""Assemble arrays of passing batches in one HDF5-like container. This is
useful to store chunks produced by :class:`Scan` on disk without keeping
the larger array in memory. The ROIs of the passing arrays will be used to
determine the position where to store the data in the dataset.
Args:
dataset_names (``dict``, :class:`ArrayKey` -> ``string``):
A dictionary from array keys to names of the datasets to store them
in.
output_dir (``string``):
The directory to save the container. Will be created, if it does
not exist.
output_filename (``string``):
The output filename of the container. Will be created, if it does
not exist, otherwise data is overwritten in the existing container.
compression_type (``string`` or ``int``):
Compression strategy. Legal values are ``gzip``, ``szip``,
``lzf``. If an integer between 1 and 10, this indicates ``gzip``
compression level.
dataset_dtypes (``dict``, :class:`ArrayKey` -> data type):
A dictionary from array keys to datatype (eg. ``np.int8``). If
given, arrays are stored using this type. The original arrays
within the pipeline remain unchanged.
"""
def __init__(
self,
dataset_names,
output_dir=".",
output_filename="output.hdf",
compression_type=None,
dataset_dtypes=None,
):
warnings.warn(
"HDF5LikeWrite is depricated and will soon be removed in v2.0",
DeprecationWarning,
)
self.dataset_names = dataset_names
self.output_dir = output_dir
self.output_filename = output_filename
self.compression_type = compression_type
if dataset_dtypes is None:
self.dataset_dtypes = {}
else:
self.dataset_dtypes = dataset_dtypes
self.dataset_offsets = {}
def setup(self):
for key in self.dataset_names.keys():
self.updates(key, self.spec[key])
self.enable_autoskip()
def prepare(self, request):
deps = BatchRequest()
for key in self.dataset_names.keys():
deps[key] = request[key]
return deps
def _open_file(self, filename):
raise NotImplementedError("Only implemented in subclasses")
def _get_voxel_size(self, dataset):
return Coordinate(dataset.attrs["resolution"])
def _get_offset(self, dataset):
return Coordinate(dataset.attrs["offset"])
def _set_voxel_size(self, dataset, voxel_size):
dataset.attrs["resolution"] = voxel_size
def _set_offset(self, dataset, offset):
dataset.attrs["offset"] = offset
def init_datasets(self, batch):
filename = os.path.join(self.output_dir, self.output_filename)
logger.debug("Initializing container %s", filename)
try:
os.makedirs(self.output_dir)
except:
pass
for array_key, dataset_name in self.dataset_names.items():
logger.debug("Initializing dataset for %s", array_key)
assert array_key in self.spec, (
"Asked to store %s, but is not provided upstream." % array_key
)
assert array_key in batch.arrays, (
"Asked to store %s, but is not part of batch." % array_key
)
array = batch.arrays[array_key]
dims = array.spec.roi.dims
batch_shape = array.data.shape
with self._open_file(filename) as data_file:
# if a dataset already exists, read its meta-information (if
# present)
if dataset_name in data_file:
offset = self._get_offset(data_file[dataset_name]) or Coordinate(
(0,) * dims
)
else:
provided_roi = self.spec[array_key].roi
if provided_roi is None:
raise RuntimeError(
"Dataset %s does not exist in %s, and no ROI is "
"provided for %s. I don't know how to initialize "
"the dataset." % (dataset_name, filename, array_key)
)
offset = provided_roi.offset
voxel_size = array.spec.voxel_size
data_shape = provided_roi.shape // voxel_size
logger.debug("Shape in voxels: %s", data_shape)
# add channel dimensions (if present)
data_shape = batch_shape[:-dims] + data_shape
logger.debug("Shape with channel dimensions: %s", data_shape)
if array_key in self.dataset_dtypes:
dtype = self.dataset_dtypes[array_key]
else:
dtype = batch.arrays[array_key].data.dtype
logger.debug(
"create_dataset: %s, %s, %s, %s, offset=%s, resolution=%s",
dataset_name,
data_shape,
self.compression_type,
dtype,
offset,
voxel_size,
)
dataset = data_file.create_dataset(
name=dataset_name,
shape=data_shape,
compression=self.compression_type,
dtype=dtype,
)
self._set_offset(dataset, offset)
self._set_voxel_size(dataset, voxel_size)
logger.debug(
"%s (%s in %s) has offset %s",
array_key,
dataset_name,
filename,
offset,
)
self.dataset_offsets[array_key] = offset
def process(self, batch, request):
filename = os.path.join(self.output_dir, self.output_filename)
if not self.dataset_offsets:
self.init_datasets(batch)
with self._open_file(filename) as data_file:
for array_key, dataset_name in self.dataset_names.items():
dataset = data_file[dataset_name]
array_roi = batch.arrays[array_key].spec.roi
voxel_size = self.spec[array_key].voxel_size
dims = array_roi.dims
channel_slices = (slice(None),) * max(0, len(dataset.shape) - dims)
dataset_roi = Roi(
self.dataset_offsets[array_key],
Coordinate(dataset.shape[-dims:]) * voxel_size,
)
common_roi = array_roi.intersect(dataset_roi)
if common_roi.empty:
logger.warn(
"array %s with ROI %s lies outside of dataset ROI %s, "
"skipping writing" % (array_key, array_roi, dataset_roi)
)
continue
dataset_voxel_roi = (
common_roi - self.dataset_offsets[array_key]
) // voxel_size
dataset_voxel_slices = dataset_voxel_roi.to_slices()
array_voxel_roi = (common_roi - array_roi.offset) // voxel_size
array_voxel_slices = array_voxel_roi.to_slices()
logger.debug(
"writing %s to voxel coordinates %s"
% (array_key, dataset_voxel_roi)
)
data = batch.arrays[array_key].data[channel_slices + array_voxel_slices]
dataset[channel_slices + dataset_voxel_slices] = data
| 8,112 | 35.21875 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/zarr_source.py | from gunpowder.ext import ZarrFile
from gunpowder.batch import Batch
from gunpowder.coordinate import Coordinate
from gunpowder.profiling import Timing
from gunpowder.roi import Roi
from gunpowder.array import Array
from gunpowder.array_spec import ArraySpec
from .batch_provider import BatchProvider
from zarr._storage.store import BaseStore
from zarr import N5Store, N5FSStore
import numpy as np
from collections.abc import MutableMapping
from typing import Union
import warnings
import logging
logger = logging.getLogger(__name__)
class ZarrSource(BatchProvider):
"""A `zarr <https://github.com/zarr-developers/zarr>`_ data source.
Provides arrays from zarr datasets. If the attribute ``resolution`` is set
in a zarr dataset, it will be used as the array's ``voxel_size``. If the
attribute ``offset`` is set in a dataset, it will be used as the offset of
the :class:`Roi` for this array. It is assumed that the offset is given in
world units.
Args:
store (``string``, ``zarr.BaseStore``):
A zarr store or path to a zarr directory or zip file.
datasets (``dict``, :class:`ArrayKey` -> ``string``):
Dictionary of array keys to dataset names that this source offers.
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
An optional dictionary of array keys to array specs to overwrite
the array specs automatically determined from the data file. This
is useful to set a missing ``voxel_size``, for example. Only fields
that are not ``None`` in the given :class:`ArraySpec` will be used.
channels_first (``bool``, optional):
Specifies the ordering of the dimensions of the HDF5-like data source.
If channels_first is set (default), then the input shape is expected
to be (channels, spatial dimensions). This is recommended because of
better performance. If channels_first is set to false, then the input
data is read in channels_last manner and converted to channels_first.
"""
def __init__(
self,
store: Union[BaseStore, MutableMapping, str] = None,
datasets=None,
array_specs=None,
channels_first=True,
filename=None,
):
# datasets is not really optional, this is for backwards compatibility
# only
assert datasets is not None, "Argument 'datasets' has to be provided"
if filename is not None:
warnings.warn(
"Argument 'filename' will be replaced in v2.0, " "use 'store' instead",
DeprecationWarning,
)
assert store is None, "If 'store' is given, 'filename' has to be None"
store = filename
self.store = store
if array_specs is None:
self.array_specs = {}
else:
self.array_specs = array_specs
self.channels_first = channels_first
self.datasets = datasets
def _get_voxel_size(self, dataset):
if "resolution" not in dataset.attrs:
return None
if self._rev_metadata():
return Coordinate(dataset.attrs["resolution"][::-1])
else:
return Coordinate(dataset.attrs["resolution"])
def _get_offset(self, dataset):
if "offset" not in dataset.attrs:
return None
if self._rev_metadata():
return Coordinate(dataset.attrs["offset"][::-1])
else:
return Coordinate(dataset.attrs["offset"])
def _rev_metadata(self):
with ZarrFile(self.store, mode="a") as store:
return isinstance(store, N5Store) or isinstance(store, N5FSStore)
def _open_file(self, store):
return ZarrFile(store, mode="r")
def setup(self):
with self._open_file(self.store) as data_file:
for array_key, ds_name in self.datasets.items():
if ds_name not in data_file:
raise RuntimeError("%s not in %s" % (ds_name, self.store))
spec = self.__read_spec(array_key, data_file, ds_name)
self.provides(array_key, spec)
def provide(self, request):
timing = Timing(self)
timing.start()
batch = Batch()
with self._open_file(self.store) as data_file:
for array_key, request_spec in request.array_specs.items():
logger.debug("Reading %s in %s...", array_key, request_spec.roi)
voxel_size = self.spec[array_key].voxel_size
# scale request roi to voxel units
dataset_roi = request_spec.roi / voxel_size
# shift request roi into dataset
dataset_roi = dataset_roi - self.spec[array_key].roi.offset / voxel_size
# create array spec
array_spec = self.spec[array_key].copy()
array_spec.roi = request_spec.roi
# add array to batch
batch.arrays[array_key] = Array(
self.__read(data_file, self.datasets[array_key], dataset_roi),
array_spec,
)
logger.debug("done")
timing.stop()
batch.profiling_stats.add(timing)
return batch
def __read_spec(self, array_key, data_file, ds_name):
dataset = data_file[ds_name]
if array_key in self.array_specs:
spec = self.array_specs[array_key].copy()
else:
spec = ArraySpec()
if spec.voxel_size is None:
voxel_size = self._get_voxel_size(dataset)
if voxel_size is None:
voxel_size = Coordinate((1,) * len(dataset.shape))
logger.warning(
"WARNING: File %s does not contain resolution information "
"for %s (dataset %s), voxel size has been set to %s. This "
"might not be what you want.",
self.store,
array_key,
ds_name,
spec.voxel_size,
)
spec.voxel_size = voxel_size
self.ndims = len(spec.voxel_size)
if spec.roi is None:
offset = self._get_offset(dataset)
if offset is None:
offset = Coordinate((0,) * self.ndims)
if self.channels_first:
shape = Coordinate(dataset.shape[-self.ndims :])
else:
shape = Coordinate(dataset.shape[: self.ndims])
spec.roi = Roi(offset, shape * spec.voxel_size)
if spec.dtype is not None:
assert spec.dtype == dataset.dtype, (
"dtype %s provided in array_specs for %s, "
"but differs from dataset %s dtype %s"
% (self.array_specs[array_key].dtype, array_key, ds_name, dataset.dtype)
)
else:
spec.dtype = dataset.dtype
if spec.interpolatable is None:
spec.interpolatable = spec.dtype in [
np.float32,
np.float64,
np.float128,
np.uint8, # assuming this is not used for labels
]
logger.warning(
"WARNING: You didn't set 'interpolatable' for %s "
"(dataset %s). Based on the dtype %s, it has been "
"set to %s. This might not be what you want.",
array_key,
ds_name,
spec.dtype,
spec.interpolatable,
)
return spec
def __read(self, data_file, ds_name, roi):
c = len(data_file[ds_name].shape) - self.ndims
if self.channels_first:
array = np.asarray(data_file[ds_name][(slice(None),) * c + roi.to_slices()])
else:
array = np.asarray(data_file[ds_name][roi.to_slices() + (slice(None),) * c])
array = np.transpose(
array, axes=[i + self.ndims for i in range(c)] + list(range(self.ndims))
)
return array
def name(self):
return super().name() + f"[{self.store}]"
| 8,175 | 33.066667 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/balance_labels.py | from .batch_filter import BatchFilter
from gunpowder.array import Array
from gunpowder.batch_request import BatchRequest
from gunpowder.batch import Batch
from collections.abc import Iterable
import itertools
import logging
import numpy as np
logger = logging.getLogger(__name__)
class BalanceLabels(BatchFilter):
"""Creates a scale array to balance the loss between class labels.
Note that this only balances loss weights per-batch and does not accumulate
statistics about class balance across batches.
Args:
labels (:class:`ArrayKey`):
An array containing binary or integer labels.
scales (:class:`ArrayKey`):
A array with scales to be created. This new array will have the
same ROI and resolution as ``labels``.
mask (:class:`ArrayKey`, optional):
An optional mask (or list of masks) to consider for balancing.
Every voxel marked with a 0 will not contribute to the scaling and
will have a scale of 0 in ``scales``.
slab (``tuple`` of ``int``, optional):
A shape specification to perform the balancing in slabs of this
size. -1 can be used to refer to the actual size of the label
array. For example, a slab of::
(2, -1, -1, -1)
will perform the balancing for every each slice ``[0:2,:]``,
``[2:4,:]``, ... individually.
num_classes(``int``, optional):
The number of classes. Labels will be expected to be in the
interval [0, ``num_classes``). Defaults to 2 for binary
classification.
clipmin (``float``, optional):
Clip class fraction to clipmin when calculating class weights.
Defaults to 0.05. Set to None if you do not want to clip min values.
clipmax (``float``, optional):
Clip class fraction to clipmax when calculating class weights.
Defaults to 0.95. Set to None, if you do not want to clip max
values.
"""
def __init__(
self,
labels,
scales,
mask=None,
slab=None,
num_classes=2,
clipmin=0.05,
clipmax=0.95,
):
self.labels = labels
self.scales = scales
if mask is None:
self.masks = []
elif not isinstance(mask, Iterable):
self.masks = [mask]
else:
self.masks = mask
self.slab = slab
self.num_classes = num_classes
self.clipmin = clipmin
self.clipmax = clipmax
def setup(self):
assert self.labels in self.spec, (
"Asked to balance labels %s, which are not provided." % self.labels
)
for mask in self.masks:
assert mask in self.spec, (
"Asked to apply mask %s to balance labels, but mask is not "
"provided." % mask
)
spec = self.spec[self.labels].copy()
spec.dtype = np.float32
self.provides(self.scales, spec)
self.enable_autoskip()
def prepare(self, request):
deps = BatchRequest()
deps[self.labels] = request[self.scales]
for mask in self.masks:
deps[mask] = request[self.scales]
return deps
def process(self, batch, request):
labels = batch.arrays[self.labels]
assert len(np.unique(labels.data)) <= self.num_classes, (
"Found more unique labels than classes in %s." % self.labels
)
assert 0 <= np.min(labels.data) < self.num_classes, (
"Labels %s are not in [0, num_classes)." % self.labels
)
assert 0 <= np.max(labels.data) < self.num_classes, (
"Labels %s are not in [0, num_classes)." % self.labels
)
# initialize error scale with 1s
error_scale = np.ones(labels.data.shape, dtype=np.float32)
# set error_scale to 0 in masked-out areas
for key in self.masks:
mask = batch.arrays[key]
assert (
labels.data.shape == mask.data.shape
), "Shape of mask %s %s does not match %s %s" % (
mask,
mask.data.shape,
self.labels,
labels.data.shape,
)
error_scale *= mask.data
if not self.slab:
slab = error_scale.shape
else:
# slab with -1 replaced by shape
slab = tuple(
m if s == -1 else s for m, s in zip(error_scale.shape, self.slab)
)
slab_ranges = (range(0, m, s) for m, s in zip(error_scale.shape, slab))
for start in itertools.product(*slab_ranges):
slices = tuple(
slice(start[d], start[d] + slab[d]) for d in range(len(slab))
)
self.__balance(labels.data[slices], error_scale[slices])
spec = self.spec[self.scales].copy()
spec.roi = labels.spec.roi
outputs = Batch()
outputs[self.scales] = Array(error_scale, spec)
return outputs
def __balance(self, labels, scale):
labels = labels.astype(np.int64)
# in the masked-in area, compute the fraction of per-class samples
masked_in = scale.sum()
classes, counts = np.unique(labels[np.nonzero(scale)], return_counts=True)
fracs = (
counts.astype(float) / masked_in if masked_in > 0 else np.zeros(counts.size)
)
if self.clipmin is not None or self.clipmax is not None:
np.clip(fracs, self.clipmin, self.clipmax, fracs)
# compute the class weights
w_sparse = 1.0 / float(self.num_classes) / fracs
w = np.zeros(self.num_classes)
w[classes] = w_sparse
# scale the masked-in scale with the class weights
scale *= np.take(w, labels)
| 5,890 | 31.016304 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/generic_train.py | import glob
import logging
import multiprocessing
import re
import time
from gunpowder.nodes.batch_filter import BatchFilter
from gunpowder.producer_pool import ProducerPool, WorkersDied, NoResult
from gunpowder.array import ArrayKey
from gunpowder.array_spec import ArraySpec
from gunpowder.batch_request import BatchRequest
logger = logging.getLogger(__name__)
class TrainProcessDied(Exception):
pass
class GenericTrain(BatchFilter):
"""Generic train node to perform one training iteration for each batch that
passes through. This node alone does nothing and should be subclassed for
concrete implementations.
Args:
inputs (dict): Dictionary from names of input layers in the network to
:class:``ArrayKey`` or batch attribute name as string.
outputs (dict): Dictionary from the names of output layers in the
network to :class:``ArrayKey``. New arrays will be generated by
this node for each entry (if requested downstream).
gradients (dict): Dictionary from the names of output layers in the
network to :class:``ArrayKey``. New arrays containing the
gradient of an output with respect to the loss will be generated by
this node for each entry (if requested downstream).
array_specs (dict, optional): An optional dictionary of
:class:`ArrayKey` to :class:`ArraySpec` to set the array specs
generated arrays (``outputs`` and ``gradients``). This is useful
to set the ``voxel_size``, for example, if they differ from the
voxel size of the input arrays. Only fields that are not ``None``
in the given :class:`ArraySpec` will be used.
spawn_subprocess (bool, optional): Whether to run the ``train_step`` in
a separate process. Default is false.
"""
def __init__(
self, inputs, outputs, gradients, array_specs=None, spawn_subprocess=False
):
self.initialized = False
self.inputs = inputs
self.outputs = outputs
self.gradients = gradients
self.array_specs = {} if array_specs is None else array_specs
self.spawn_subprocess = spawn_subprocess
self.provided_arrays = list(self.outputs.values()) + list(
self.gradients.values()
)
def setup(self):
# get common voxel size of inputs, or None if they differ
common_voxel_size = None
for key in self.inputs.values():
if not isinstance(key, ArrayKey):
continue
if self.spec[key].nonspatial:
continue
voxel_size = self.spec[key].voxel_size
if common_voxel_size is None:
common_voxel_size = voxel_size
elif common_voxel_size != voxel_size:
common_voxel_size = None
break
# announce provided outputs
for key in self.provided_arrays:
if key in self.array_specs:
spec = self.array_specs[key].copy()
else:
spec = ArraySpec()
if spec.voxel_size is None and not spec.nonspatial:
assert common_voxel_size is not None, (
"There is no common voxel size of the inputs, and no "
"ArraySpec has been given for %s that defines "
"voxel_size." % key
)
spec.voxel_size = common_voxel_size
if spec.interpolatable is None:
# default for predictions
spec.interpolatable = False
self.provides(key, spec)
if self.spawn_subprocess:
# start training as a producer pool, so that we can gracefully exit if
# anything goes wrong
self.worker = ProducerPool([self.__produce_train_batch], queue_size=1)
self.batch_in = multiprocessing.Queue(maxsize=1)
self.worker.start()
else:
self.start()
self.initialized = True
def prepare(self, request):
deps = BatchRequest()
for key in self.inputs.values():
deps[key] = request[key]
return deps
def teardown(self):
if self.spawn_subprocess:
# signal "stop"
self.batch_in.put((None, None))
try:
self.worker.get(timeout=2)
except NoResult:
pass
self.worker.stop()
else:
self.stop()
def process(self, batch, request):
start = time.time()
if self.spawn_subprocess:
self.batch_in.put((batch, request))
try:
out = self.worker.get()
except WorkersDied:
raise TrainProcessDied()
for array_key in self.provided_arrays:
if array_key in request:
batch.arrays[array_key] = out.arrays[array_key]
batch.loss = out.loss
batch.iteration = out.iteration
else:
self.train_step(batch, request)
time_of_iteration = time.time() - start
logger.info(
"Train process: iteration=%d loss=%f time=%f",
batch.iteration,
batch.loss,
time_of_iteration,
)
def start(self):
"""To be implemented in subclasses.
This method will be called before the first call to :fun:`train_step`,
from the same process that :fun:`train_step` will be called from. Use
this to initialize you solver and training hardware.
"""
pass
def train_step(self, batch, request):
"""To be implemented in subclasses.
In this method, an implementation should perform one training iteration
on the given batch. ``batch.loss`` and ``batch.iteration`` should be
set. Output arrays should be created according to the given request
and added to ``batch``."""
raise NotImplementedError(
"Class %s does not implement 'train_step'" % self.name()
)
def stop(self):
"""To be implemented in subclasses.
This method will be called after the last call to :fun:`train_step`,
from the same process that :fun:`train_step` will be called from. Use
this to tear down you solver and free training hardware.
"""
pass
def _checkpoint_name(self, basename, iteration):
return basename + "_checkpoint_" + "%i" % iteration
def _get_latest_checkpoint(self, basename):
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r"(\d+)", text)]
checkpoints = glob.glob(basename + "_checkpoint_*")
checkpoints.sort(key=natural_keys)
if len(checkpoints) > 0:
checkpoint = checkpoints[-1]
iteration = int(checkpoint.split("_")[-1])
return checkpoint, iteration
return None, 0
def __produce_train_batch(self):
"""Process one train batch."""
if not self.initialized:
self.start()
self.initialized = True
batch, request = self.batch_in.get()
# stop signal
if batch is None:
self.stop()
return None
self.train_step(batch, request)
return batch
| 7,446 | 31.662281 | 82 | py |
gunpowder | gunpowder-master/gunpowder/nodes/graph_source.py | import logging
import networkx as nx
import numpy as np
from gunpowder.batch import Batch
from gunpowder.graph import Graph
from gunpowder.graph_spec import GraphSpec
from gunpowder.nodes.batch_provider import BatchProvider
from gunpowder.profiling import Timing
logger = logging.getLogger(__name__)
class GraphSource(BatchProvider):
"""Creates a gunpowder graph source from a daisy graph provider.
Queries for graphs from a given Roi will only return edges completely
contained within the Roi - edges that cross the boundary will not be
included.
Arguments:
graph_provider (:class:`daisy.SharedGraphProvider`):
A daisy graph provider to read the graph from.
Can be backed by MongoDB or any other implemented backend.
graph (:class:`GraphKey`):
The key of the graph to create
graph_spec (:class:`GraphSpec`, optional):
An optional :class:`GraphSpec` containing a roi and optionally
whether the graph is directed. The default is to have an unbounded
roi and detect directedness from the graph_provider.
"""
def __init__(self, graph_provider, graph, graph_spec=None):
self.graph_provider = graph_provider
self.graph = graph
self.graph_spec = graph_spec
def setup(self):
if self.graph_spec is not None:
roi = self.graph_spec.roi
if self.graph_spec.directed is not None:
assert self.graph_spec.directed == self.graph_provider.directed
else:
roi = None
spec = GraphSpec(roi=roi, directed=self.graph_provider.directed)
self.provides(self.graph, spec)
def provide(self, request):
timing = Timing(self)
timing.start()
batch = Batch()
roi = request[self.graph].roi.copy()
graph = GraphSource.create_gp_graph_from_daisy(self.graph_provider, roi)
batch.graphs[self.graph] = graph
timing.stop()
batch.profiling_stats.add(timing)
return batch
@staticmethod
def create_gp_graph_from_daisy(graph_provider, roi):
"""A static method to convert a daisy graph into a gunpowder graph.
Only includes edges if both endpoints are within the roi.
Arguments:
graph_provider (:class:`daisy.SharedGraphProvider`)
A daisy graph provider to read the graph from
roi (:class:`Roi`):
The roi in which to read the graph
Returns:
An instance of :class:`Graph` containing the nodes and edges read
from the daisy graph provider in the given roi.
"""
logger.debug("Creating gunpowder graph from daisy graph provider")
daisy_graph = graph_provider[roi]
logger.debug("%d nodes found in roi %s", len(daisy_graph), roi)
spec = GraphSpec(roi=roi, directed=daisy_graph.is_directed())
dangling_nodes = []
for node, data in daisy_graph.nodes(data=True):
position_attribute = graph_provider.position_attribute
if type(position_attribute) == list:
if position_attribute[0] not in data:
dangling_nodes.append(node)
continue
location = np.array(
[data[attr] for attr in position_attribute], dtype=np.float32
)
else:
if position_attribute not in data:
dangling_nodes.append(node)
continue
location = np.array(data[position_attribute], dtype=np.float32)
data["location"] = location
data["id"] = node
logger.debug("Dangling nodes: %s", dangling_nodes)
for n in dangling_nodes:
daisy_graph.remove_node(n)
if daisy_graph.is_directed():
pure_nx_graph = nx.DiGraph()
else:
pure_nx_graph = nx.Graph()
pure_nx_graph.update(daisy_graph)
return Graph.from_nx_graph(pure_nx_graph, spec)
| 4,059 | 34.929204 | 81 | py |
gunpowder | gunpowder-master/gunpowder/nodes/random_provider.py | import copy
import numpy as np
from gunpowder.array import Array
from gunpowder.array_spec import ArraySpec
from .batch_provider import BatchProvider
class RandomProvider(BatchProvider):
"""Randomly selects one of the upstream providers::
(a + b + c) + RandomProvider()
will create a provider that randomly relays requests to providers ``a``,
``b``, or ``c``. Array and point keys of ``a``, ``b``, and ``c`` should be
the same.
Args:
probabilities (1-D array-like, optional):
An optional list of
probabilities for choosing upstream providers, given in the
same order. Probabilities do not need to be normalized. Default
is ``None``, corresponding to equal probabilities.
random_provider_key (``ArrayKey``):
If provided, this node will store the index of the chosen random
provider in a nonspatial array.
"""
def __init__(self, probabilities=None, random_provider_key=None):
self.probabilities = probabilities
self.random_provider_key = random_provider_key
# automatically normalize probabilities to sum to 1
if self.probabilities is not None:
self.probabilities = [
float(x) / np.sum(probabilities) for x in self.probabilities
]
def setup(self):
self.enable_placeholders()
assert (
len(self.get_upstream_providers()) > 0
), "at least one batch provider must be added to the RandomProvider"
if self.probabilities is not None:
assert len(self.get_upstream_providers()) == len(self.probabilities), (
"if probabilities are specified, they "
"need to be given for each batch "
"provider added to the RandomProvider"
)
common_spec = None
# advertise outputs only if all upstream providers have them
for provider in self.get_upstream_providers():
if common_spec is None:
common_spec = copy.deepcopy(provider.spec)
else:
for key, spec in list(common_spec.items()):
if key not in provider.spec:
del common_spec[key]
for key, spec in common_spec.items():
self.provides(key, spec)
if self.random_provider_key is not None:
self.provides(self.random_provider_key, ArraySpec(nonspatial=True))
def provide(self, request):
# Random seed is set in provide rather than prepare since this node
# is not a batch filter
np.random.seed(request.random_seed)
if self.random_provider_key is not None:
del request[self.random_provider_key]
i = np.random.choice(
range(len(self.get_upstream_providers())), p=self.probabilities
)
provider = self.get_upstream_providers()[i]
batch = provider.request_batch(request)
if self.random_provider_key is not None:
batch[self.random_provider_key] = Array(
np.array(i), ArraySpec(nonspatial=True)
)
return batch
| 3,171 | 34.244444 | 83 | py |
gunpowder | gunpowder-master/gunpowder/nodes/random_location.py | import math
import logging
from random import random, randint, choices, seed
import itertools
import numpy as np
from scipy.spatial import cKDTree
from skimage.transform import integral_image, integrate
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from gunpowder.roi import Roi
from gunpowder.array import Array
from gunpowder.array_spec import ArraySpec
from .batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class RandomLocation(BatchFilter):
"""Choses a batch at a random location in the bounding box of the upstream
provider.
The random location is chosen such that the batch request ROI lies entirely
inside the provider's ROI.
If ``min_masked`` and ``mask`` are set, only batches are returned that have
at least the given ratio of masked-in voxels. This is in general faster
than using the :class:`Reject` node, at the expense of storing an integral
array of the complete mask.
If ``ensure_nonempty`` is set to a :class:`GraphKey`, only batches are
returned that have at least one point of this point collection within the
requested ROI.
Additional tests for randomly picked locations can be implemented by
subclassing and overwriting of :func:`accepts`. This method takes the
randomly shifted request that meets all previous criteria (like
``min_masked`` and ``ensure_nonempty``) and should return ``True`` if the
request is acceptable.
Args:
min_masked (``float``, optional):
If non-zero, require that the random sample contains at least that
ratio of masked-in voxels.
mask (:class:`ArrayKey`, optional):
The array to use for mask checks.
ensure_nonempty (:class:`GraphKey`, optional):
Ensures that when finding a random location, a request for
``ensure_nonempty`` will contain at least one point.
p_nonempty (``float``, optional):
If ``ensure_nonempty`` is set, it defines the probability that a
request for ``ensure_nonempty`` will contain at least one point.
Default value is 1.0.
ensure_centered (``bool``, optional):
if ``ensure_nonempty`` is set, ``ensure_centered`` guarantees
that the center voxel of the roi contains a point.
point_balance_radius (``int``):
if ``ensure_nonempty`` is set, ``point_balance_radius`` defines
a radius s.t. for every point `p` in ``ensure_nonempty``, the
probability of picking p is inversely related to the number of
other points within a distance of ``point_balance_radius`` to p.
This helps avoid oversampling of dense regions of the graph, and
undersampling of sparse regions.
random_shift_key (``ArrayKey`` optional):
if ``random_shift_key`` is not None, this node will populate
that key with a nonspatial array containing the random shift
used for each request. This can be useful for snapshot iterations
if you want to figure out where that snapshot came from.
"""
def __init__(
self,
min_masked=0,
mask=None,
ensure_nonempty=None,
p_nonempty=1.0,
ensure_centered=None,
point_balance_radius=1,
random_shift_key=None,
):
self.min_masked = min_masked
self.mask = mask
self.mask_spec = None
self.mask_integral = None
self.ensure_nonempty = ensure_nonempty
self.points = None
self.p_nonempty = p_nonempty
self.upstream_spec = None
self.random_shift = None
self.ensure_centered = ensure_centered
self.point_balance_radius = point_balance_radius
self.random_shift_key = random_shift_key
def setup(self):
upstream = self.get_upstream_provider()
self.upstream_spec = upstream.spec
if self.mask and self.min_masked > 0:
assert self.mask in self.upstream_spec, (
"Upstream provider does not have %s" % self.mask
)
self.mask_spec = self.upstream_spec.array_specs[self.mask]
logger.info("requesting complete mask...")
mask_request = BatchRequest({self.mask: self.mask_spec})
mask_batch = upstream.request_batch(mask_request)
logger.info("allocating mask integral array...")
mask_data = mask_batch.arrays[self.mask].data
mask_integral_dtype = np.uint64
logger.debug("mask size is %s", mask_data.size)
if mask_data.size < 2**32:
mask_integral_dtype = np.uint32
if mask_data.size < 2**16:
mask_integral_dtype = np.uint16
logger.debug("chose %s as integral array dtype", mask_integral_dtype)
self.mask_integral = np.array(mask_data > 0, dtype=mask_integral_dtype)
self.mask_integral = integral_image(self.mask_integral).astype(
mask_integral_dtype
)
if self.ensure_nonempty:
assert self.ensure_nonempty in self.upstream_spec, (
"Upstream provider does not have %s" % self.ensure_nonempty
)
graph_spec = self.upstream_spec.graph_specs[self.ensure_nonempty]
logger.info("requesting all %s points...", self.ensure_nonempty)
nonempty_request = BatchRequest({self.ensure_nonempty: graph_spec})
nonempty_batch = upstream.request_batch(nonempty_request)
self.points = cKDTree(
[p.location for p in nonempty_batch[self.ensure_nonempty].nodes]
)
point_counts = self.points.query_ball_point(
[p.location for p in nonempty_batch[self.ensure_nonempty].nodes],
r=self.point_balance_radius,
)
weights = [1 / len(point_count) for point_count in point_counts]
self.cumulative_weights = list(itertools.accumulate(weights))
logger.debug("retrieved %d points", len(self.points.data))
# clear bounding boxes of all provided arrays and points --
# RandomLocation does not have limits (offsets are ignored)
for key, spec in self.spec.items():
if spec.roi is not None:
spec.roi.shape = Coordinate((None,) * spec.roi.dims)
self.updates(key, spec)
# provide randomness if asked for
if self.random_shift_key is not None:
self.provides(self.random_shift_key, ArraySpec(nonspatial=True))
def prepare(self, request):
seed(request.random_seed)
logger.debug("request: %s", request.array_specs)
logger.debug("my spec: %s", self.spec)
if request.array_specs.keys():
lcm_voxel_size = self.spec.get_lcm_voxel_size(request.array_specs.keys())
else:
lcm_voxel_size = Coordinate((1,) * request.get_total_roi().dims)
shift_roi = self.__get_possible_shifts(request, lcm_voxel_size)
if request.array_specs.keys():
shift_roi = shift_roi.snap_to_grid(lcm_voxel_size, mode="shrink")
lcm_shift_roi = shift_roi / lcm_voxel_size
logger.debug(
"restricting random locations to multiples of voxel size %s",
lcm_voxel_size,
)
else:
lcm_shift_roi = shift_roi
assert not lcm_shift_roi.unbounded, (
"Can not pick a random location, intersection of upstream ROIs is "
"unbounded."
)
assert not lcm_shift_roi.empty, (
"Can not satisfy batch request, no location covers all requested " "ROIs."
)
random_shift = self.__select_random_shift(
request, lcm_shift_roi, lcm_voxel_size
)
self.random_shift = random_shift
self.__shift_request(request, random_shift)
return request
def process(self, batch, request):
if self.random_shift_key is not None:
batch[self.random_shift_key] = Array(
np.array(self.random_shift),
ArraySpec(nonspatial=True),
)
# reset ROIs to request
for array_key, spec in request.array_specs.items():
batch.arrays[array_key].spec.roi = spec.roi
for graph_key, spec in request.graph_specs.items():
batch.graphs[graph_key].spec.roi = spec.roi
# change shift point locations to lie within roi
for graph_key in request.graph_specs.keys():
batch.graphs[graph_key].shift(-self.random_shift)
def accepts(self, request):
"""Should return True if the randomly chosen location is acceptable
(besided meeting other criteria like ``min_masked`` and/or
``ensure_nonempty``). Subclasses can overwrite this method to implement
additional tests for acceptable locations."""
return True
def __get_possible_shifts(self, request, voxel_size):
total_shift_roi = None
for key, spec in request.items():
if spec.roi is None:
continue
request_roi = spec.roi
provided_roi = self.upstream_spec[key].roi
shift_roi = provided_roi.shift(-request_roi.begin).grow(
(0,) * request_roi.dims, -(request_roi.shape - voxel_size)
)
if total_shift_roi is None:
total_shift_roi = shift_roi
else:
if shift_roi != total_shift_roi:
total_shift_roi = total_shift_roi.intersect(shift_roi)
logger.debug("valid shifts for request in " + str(total_shift_roi))
return total_shift_roi
def __select_random_shift(self, request, lcm_shift_roi, lcm_voxel_size):
ensure_points = self.ensure_nonempty is not None and random() <= self.p_nonempty
while True:
if ensure_points:
random_shift = self.__select_random_location_with_points(
request, lcm_shift_roi, lcm_voxel_size
)
else:
random_shift = self.__select_random_location(
lcm_shift_roi, lcm_voxel_size
)
logger.debug("random shift: " + str(random_shift))
if not self.__is_min_masked(random_shift, request):
logger.debug("random location does not meet 'min_masked' criterium")
continue
if not self.__accepts(random_shift, request):
logger.debug("random location does not meet user-provided criterium")
continue
return random_shift
def __is_min_masked(self, random_shift, request):
if not self.mask or self.min_masked == 0:
return True
# get randomly chosen mask ROI
request_mask_roi = request.array_specs[self.mask].roi
request_mask_roi = request_mask_roi.shift(random_shift)
# get coordinates inside mask array
mask_voxel_size = self.spec[self.mask].voxel_size
request_mask_roi_in_array = request_mask_roi / mask_voxel_size
request_mask_roi_in_array -= self.mask_spec.roi.offset / mask_voxel_size
# get number of masked-in voxels
num_masked_in = integrate(
self.mask_integral,
[request_mask_roi_in_array.begin],
[
request_mask_roi_in_array.end
- Coordinate((1,) * self.mask_integral.ndim)
],
)[0]
mask_ratio = float(num_masked_in) / request_mask_roi_in_array.size
logger.debug("mask ratio is %f", mask_ratio)
return mask_ratio >= self.min_masked
def __accepts(self, random_shift, request):
# create a shifted copy of the request
shifted_request = request.copy()
self.__shift_request(shifted_request, random_shift)
return self.accepts(shifted_request)
def __shift_request(self, request, shift):
# shift request ROIs
for specs_type in [request.array_specs, request.graph_specs]:
for key, spec in specs_type.items():
if spec.roi is None:
continue
roi = spec.roi.shift(shift)
specs_type[key].roi = roi
def __select_random_location_with_points(
self, request, lcm_shift_roi, lcm_voxel_size
):
request_points = request.graph_specs.get(self.ensure_nonempty)
if request_points is None:
total_roi = request.get_total_roi()
logger.warning(
f"Requesting non empty {self.ensure_nonempty}, however {self.ensure_nonempty} "
f"has not been requested. Falling back on using the total roi of the "
f"request {total_roi} for {self.ensure_nonempty}."
)
request_points_roi = total_roi
else:
request_points_roi = request_points.roi
while True:
# How to pick shifts that ensure that a randomly chosen point is
# contained in the request ROI:
#
#
# request point
# [---------) .
# 0 +10 17
#
# least shifted to contain point
# [---------)
# 8 +10
# ==
# point-request.begin-request.shape+1
#
# most shifted to contain point:
# [---------)
# 17 +10
# ==
# point-request.begin
#
# all possible shifts
# [---------)
# 8 +10
# ==
# point-request.begin-request.shape+1
# ==
# request.shape
# pick a random point
point = choices(self.points.data, cum_weights=self.cumulative_weights)[0]
logger.debug("select random point at %s", point)
# get the lcm voxel that contains this point
lcm_location = Coordinate(point / lcm_voxel_size)
logger.debug("belongs to lcm voxel %s", lcm_location)
# align the point request ROI with lcm voxel grid
lcm_roi = request_points_roi.snap_to_grid(
lcm_voxel_size,
mode="shrink")
lcm_roi = lcm_roi / lcm_voxel_size
logger.debug("Point request ROI: %s", request_points_roi)
logger.debug("Point request lcm ROI shape: %s", lcm_roi.shape)
# get all possible starting points of lcm_roi.shape that contain
# lcm_location
if self.ensure_centered:
lcm_shift_roi_begin = (
lcm_location
- lcm_roi.begin
- lcm_roi.shape / 2
+ Coordinate((1,) * len(lcm_location))
)
lcm_shift_roi_shape = Coordinate((1,) * len(lcm_location))
else:
lcm_shift_roi_begin = (
lcm_location
- lcm_roi.begin
- lcm_roi.shape
+ Coordinate((1,) * len(lcm_location))
)
lcm_shift_roi_shape = lcm_roi.shape
lcm_point_shift_roi = Roi(lcm_shift_roi_begin, lcm_shift_roi_shape)
logger.debug("lcm point shift roi: %s", lcm_point_shift_roi)
# intersect with total shift ROI
if not lcm_point_shift_roi.intersects(lcm_shift_roi):
logger.debug(
"reject random shift, random point %s shift ROI %s does "
"not intersect total shift ROI %s",
point,
lcm_point_shift_roi,
lcm_shift_roi,
)
continue
lcm_point_shift_roi = lcm_point_shift_roi.intersect(lcm_shift_roi)
# select a random shift from all possible shifts
random_shift = self.__select_random_location(
lcm_point_shift_roi, lcm_voxel_size
)
logger.debug("random shift: %s", random_shift)
# count all points inside the shifted ROI
points = self.__get_points_in_roi(request_points_roi.shift(random_shift))
assert (
point in points
), "Requested batch to contain point %s, but got points " "%s" % (
point,
points,
)
num_points = len(points)
return random_shift
def __select_random_location(self, lcm_shift_roi, lcm_voxel_size):
# select a random point inside ROI
random_shift = Coordinate(
randint(begin, end - 1)
for begin, end in zip(lcm_shift_roi.begin, lcm_shift_roi.end)
)
random_shift *= lcm_voxel_size
return random_shift
def __get_points_in_roi(self, roi):
points = []
center = roi.center
radius = math.ceil(float(max(roi.shape)) / 2)
candidates = self.points.query_ball_point(center, radius, p=np.inf)
for i in candidates:
if roi.contains(self.points.data[i]):
points.append(self.points.data[i])
return np.array(points)
| 17,580 | 36.486141 | 95 | py |
gunpowder | gunpowder-master/gunpowder/nodes/defect_augment.py | import logging
import random
import numpy as np
# imports for deformed slice
from skimage.draw import line
from scipy.ndimage.measurements import label
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.morphology import binary_dilation
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from .batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class DefectAugment(BatchFilter):
"""Augment intensity arrays section-wise with artifacts like missing
sections, low-contrast sections, by blending in artifacts drawn from a
separate source, or by deforming a section.
Args:
intensities (:class:`ArrayKey`):
The key of the array of intensities to modify.
prob_missing(``float``):
prob_low_contrast(``float``):
prob_artifact(``float``):
prob_deform(``float``):
Probabilities of having a missing section, low-contrast section, an
artifact (see param ``artifact_source``) or a deformed slice. The
sum should not exceed 1. Values in missing sections will be set to
0.
contrast_scale (``float``, optional):
By how much to scale the intensities for a low-contrast section,
used if ``prob_low_contrast`` > 0.
artifact_source (class:`BatchProvider`, optional):
A gunpowder batch provider that delivers intensities (via
:class:`ArrayKey` ``artifacts``) and an alpha mask (via
:class:`ArrayKey` ``artifacts_mask``), used if ``prob_artifact`` > 0.
artifacts(:class:`ArrayKey`, optional):
The key to query ``artifact_source`` for to get the intensities
of the artifacts.
artifacts_mask(:class:`ArrayKey`, optional):
The key to query ``artifact_source`` for to get the alpha mask
of the artifacts to blend them with ``intensities``.
deformation_strength (``int``, optional):
Strength of the slice deformation in voxels, used if
``prob_deform`` > 0. The deformation models a fold by shifting the
section contents towards a randomly oriented line in the section.
The line itself will be drawn with a value of 0.
axis (``int``, optional):
Along which axis sections are cut.
"""
def __init__(
self,
intensities,
prob_missing=0.05,
prob_low_contrast=0.05,
prob_artifact=0.0,
prob_deform=0.0,
contrast_scale=0.1,
artifact_source=None,
artifacts=None,
artifacts_mask=None,
deformation_strength=20,
axis=0,
):
self.intensities = intensities
self.prob_missing = prob_missing
self.prob_low_contrast = prob_low_contrast
self.prob_artifact = prob_artifact
self.prob_deform = prob_deform
self.contrast_scale = contrast_scale
self.artifact_source = artifact_source
self.artifacts = artifacts
self.artifacts_mask = artifacts_mask
self.deformation_strength = deformation_strength
self.axis = axis
def setup(self):
if self.artifact_source is not None:
self.artifact_source.setup()
def teardown(self):
if self.artifact_source is not None:
self.artifact_source.teardown()
# send roi request to data-source upstream
def prepare(self, request):
random.seed(request.random_seed)
deps = BatchRequest()
# we prepare the augmentations, by determining which slices
# will be augmented by which method
# If one of the slices is augmented with 'deform',
# we prepare these trafos already
# and request a bigger roi from upstream
prob_missing_threshold = self.prob_missing
prob_low_contrast_threshold = prob_missing_threshold + self.prob_low_contrast
prob_artifact_threshold = prob_low_contrast_threshold + self.prob_artifact
prob_deform_slice = prob_artifact_threshold + self.prob_deform
spec = request[self.intensities].copy()
roi = spec.roi
logger.debug("downstream request ROI is %s" % roi)
raw_voxel_size = self.spec[self.intensities].voxel_size
# store the mapping slice to augmentation type in a dict
self.slice_to_augmentation = {}
# store the transformations for deform slice
self.deform_slice_transformations = {}
for c in range((roi / raw_voxel_size).shape[self.axis]):
r = random.random()
if r < prob_missing_threshold:
logger.debug("Zero-out " + str(c))
self.slice_to_augmentation[c] = "zero_out"
elif r < prob_low_contrast_threshold:
logger.debug("Lower contrast " + str(c))
self.slice_to_augmentation[c] = "lower_contrast"
elif r < prob_artifact_threshold:
logger.debug("Add artifact " + str(c))
self.slice_to_augmentation[c] = "artifact"
elif r < prob_deform_slice:
logger.debug("Add deformed slice " + str(c))
self.slice_to_augmentation[c] = "deformed_slice"
# get the shape of a single slice
slice_shape = (roi / raw_voxel_size).shape
slice_shape = slice_shape[: self.axis] + slice_shape[self.axis + 1 :]
self.deform_slice_transformations[c] = self.__prepare_deform_slice(
slice_shape
)
# prepare transformation and
# request bigger upstream roi for deformed slice
if "deformed_slice" in self.slice_to_augmentation.values():
# create roi sufficiently large to feed deformation
logger.debug("before growth: %s" % spec.roi)
growth = Coordinate(
tuple(
0
if d == self.axis
else raw_voxel_size[d] * self.deformation_strength
for d in range(spec.roi.dims)
)
)
logger.debug("growing request by %s" % str(growth))
source_roi = roi.grow(growth, growth)
# update request ROI to get all voxels necessary to perfrom
# transformation
spec.roi = source_roi
logger.debug("upstream request roi is %s" % spec.roi)
deps[self.intensities] = spec
def process(self, batch, request):
assert batch.get_total_roi().dims == 3, "defectaugment works on 3d batches only"
raw = batch.arrays[self.intensities]
raw_voxel_size = self.spec[self.intensities].voxel_size
for c, augmentation_type in self.slice_to_augmentation.items():
section_selector = tuple(
slice(None if d != self.axis else c, None if d != self.axis else c + 1)
for d in range(raw.spec.roi.dims)
)
if augmentation_type == "zero_out":
raw.data[section_selector] = 0
elif augmentation_type == "low_contrast":
section = raw.data[section_selector]
mean = section.mean()
section -= mean
section *= self.contrast_scale
section += mean
raw.data[section_selector] = section
elif augmentation_type == "artifact":
section = raw.data[section_selector]
alpha_voxel_size = self.artifact_source.spec[
self.artifacts_mask
].voxel_size
assert raw_voxel_size == alpha_voxel_size, (
"Can only alpha blend RAW with "
"ALPHA_MASK if both have the same "
"voxel size"
)
artifact_request = BatchRequest()
artifact_request.add(
self.artifacts,
Coordinate(section.shape) * raw_voxel_size,
voxel_size=raw_voxel_size,
)
artifact_request.add(
self.artifacts_mask,
Coordinate(section.shape) * alpha_voxel_size,
voxel_size=raw_voxel_size,
)
logger.debug("Requesting artifact batch %s", artifact_request)
artifact_batch = self.artifact_source.request_batch(artifact_request)
artifact_alpha = artifact_batch.arrays[self.artifacts_mask].data
artifact_raw = artifact_batch.arrays[self.artifacts].data
assert artifact_alpha.dtype == np.float32
assert artifact_alpha.min() >= 0.0
assert artifact_alpha.max() <= 1.0
raw.data[section_selector] = (
section * (1.0 - artifact_alpha) + artifact_raw * artifact_alpha
)
elif augmentation_type == "deformed_slice":
section = raw.data[section_selector].squeeze()
# set interpolation to cubic, spec interploatable is true, else to 0
interpolation = 3 if self.spec[self.intensities].interpolatable else 0
# load the deformation fields that were prepared for this slice
flow_x, flow_y, line_mask = self.deform_slice_transformations[c]
# apply the deformation fields
shape = section.shape
section = map_coordinates(
section, (flow_y, flow_x), mode="constant", order=interpolation
).reshape(shape)
# things can get smaller than 0 at the boundary, so we clip
section = np.clip(section, 0.0, 1.0)
# zero-out data below the line mask
section[line_mask] = 0.0
raw.data[section_selector] = section
# in case we needed to change the ROI due to a deformation augment,
# restore original ROI and crop the array data
if "deformed_slice" in self.slice_to_augmentation.values():
old_roi = request[self.intensities].roi
logger.debug("resetting roi to %s" % old_roi)
crop = tuple(
slice(None)
if d == self.axis
else slice(self.deformation_strength, -self.deformation_strength)
for d in range(raw.spec.roi.dims)
)
raw.data = raw.data[crop]
raw.spec.roi = old_roi
def __prepare_deform_slice(self, slice_shape):
# grow slice shape by 2 x deformation strength
grow_by = 2 * self.deformation_strength
shape = (slice_shape[0] + grow_by, slice_shape[1] + grow_by)
# randomly choose fixed x or fixed y with p = 1/2
fixed_x = random.random() < 0.5
if fixed_x:
x0, y0 = 0, np.random.randint(1, shape[1] - 2)
x1, y1 = shape[0] - 1, np.random.randint(1, shape[1] - 2)
else:
x0, y0 = np.random.randint(1, shape[0] - 2), 0
x1, y1 = np.random.randint(1, shape[0] - 2), shape[1] - 1
## generate the mask of the line that should be blacked out
line_mask = np.zeros(shape, dtype="bool")
rr, cc = line(x0, y0, x1, y1)
line_mask[rr, cc] = 1
# generate vectorfield pointing towards the line to compress the image
# first we get the unit vector representing the line
line_vector = np.array([x1 - x0, y1 - y0], dtype="float32")
line_vector /= np.linalg.norm(line_vector)
# next, we generate the normal to the line
normal_vector = np.zeros_like(line_vector)
normal_vector[0] = -line_vector[1]
normal_vector[1] = line_vector[0]
# make meshgrid
x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
# generate the vector field
flow_x, flow_y = np.zeros(shape), np.zeros(shape)
# find the 2 components where coordinates are bigger / smaller than the line
# to apply normal vector in the correct direction
components, n_components = label(np.logical_not(line_mask).view("uint8"))
assert n_components == 2, "%i" % n_components
neg_val = components[0, 0] if fixed_x else components[-1, -1]
pos_val = components[-1, -1] if fixed_x else components[0, 0]
flow_x[components == pos_val] = self.deformation_strength * normal_vector[1]
flow_y[components == pos_val] = self.deformation_strength * normal_vector[0]
flow_x[components == neg_val] = -self.deformation_strength * normal_vector[1]
flow_y[components == neg_val] = -self.deformation_strength * normal_vector[0]
# generate the flow fields
flow_x, flow_y = (x + flow_x).reshape(-1, 1), (y + flow_y).reshape(-1, 1)
# dilate the line mask
line_mask = binary_dilation(line_mask, iterations=10)
return flow_x, flow_y, line_mask
| 13,030 | 38.36858 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/hdf5like_source_base.py | from gunpowder.batch import Batch
from gunpowder.coordinate import Coordinate
from gunpowder.profiling import Timing
from gunpowder.roi import Roi
from gunpowder.array import Array
from gunpowder.array_spec import ArraySpec
from .batch_provider import BatchProvider
import logging
import numpy as np
import warnings
logger = logging.getLogger(__name__)
class Hdf5LikeSource(BatchProvider):
"""An HDF5-like data source.
Provides arrays from datasets accessed with an h5py-like API for each array
key given. If the attribute ``resolution`` is set in a dataset, it will be
used as the array's ``voxel_size``. If the attribute ``offset`` is set in a
dataset, it will be used as the offset of the :class:`Roi` for this array.
It is assumed that the offset is given in world units.
Args:
filename (``string``):
The input file.
datasets (``dict``, :class:`ArrayKey` -> ``string``):
Dictionary of array keys to dataset names that this source offers.
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
An optional dictionary of array keys to array specs to overwrite
the array specs automatically determined from the data file. This
is useful to set a missing ``voxel_size``, for example. Only fields
that are not ``None`` in the given :class:`ArraySpec` will be used.
channels_first (``bool``, optional):
Specifies the ordering of the dimensions of the HDF5-like data source.
If channels_first is set (default), then the input shape is expected
to be (channels, spatial dimensions). This is recommended due to
better performance. If channels_first is set to false, then the input
data is read in channels_last manner and converted to channels_first.
"""
def __init__(self, filename, datasets, array_specs=None, channels_first=True):
warnings.warn(
"HDF5LikeSource is depricated and will soon be removed in v2.0",
DeprecationWarning,
)
self.filename = filename
self.datasets = datasets
if array_specs is None:
self.array_specs = {}
else:
self.array_specs = array_specs
self.channels_first = channels_first
# number of spatial dimensions
self.ndims = None
def _open_file(self, filename):
raise NotImplementedError("Only implemented in subclasses")
def setup(self):
with self._open_file(self.filename) as data_file:
for array_key, ds_name in self.datasets.items():
if ds_name not in data_file:
raise RuntimeError("%s not in %s" % (ds_name, self.filename))
spec = self.__read_spec(array_key, data_file, ds_name)
self.provides(array_key, spec)
def provide(self, request):
timing = Timing(self)
timing.start()
batch = Batch()
with self._open_file(self.filename) as data_file:
for array_key, request_spec in request.array_specs.items():
logger.debug("Reading %s in %s...", array_key, request_spec.roi)
voxel_size = self.spec[array_key].voxel_size
# scale request roi to voxel units
dataset_roi = request_spec.roi / voxel_size
# shift request roi into dataset
dataset_roi = dataset_roi - self.spec[array_key].roi.offset / voxel_size
# create array spec
array_spec = self.spec[array_key].copy()
array_spec.roi = request_spec.roi
# add array to batch
batch.arrays[array_key] = Array(
self.__read(data_file, self.datasets[array_key], dataset_roi),
array_spec,
)
logger.debug("done")
timing.stop()
batch.profiling_stats.add(timing)
return batch
def _get_voxel_size(self, dataset):
try:
return Coordinate(dataset.attrs["resolution"])
except Exception: # todo: make specific when z5py supports it
return None
def _get_offset(self, dataset):
try:
return Coordinate(dataset.attrs["offset"])
except Exception: # todo: make specific when z5py supports it
return None
def __read_spec(self, array_key, data_file, ds_name):
dataset = data_file[ds_name]
if array_key in self.array_specs:
spec = self.array_specs[array_key].copy()
else:
spec = ArraySpec()
if spec.voxel_size is None:
voxel_size = self._get_voxel_size(dataset)
if voxel_size is None:
voxel_size = Coordinate((1,) * len(dataset.shape))
logger.warning(
"WARNING: File %s does not contain resolution information "
"for %s (dataset %s), voxel size has been set to %s. This "
"might not be what you want.",
self.filename,
array_key,
ds_name,
spec.voxel_size,
)
spec.voxel_size = voxel_size
self.ndims = len(spec.voxel_size)
if spec.roi is None:
offset = self._get_offset(dataset)
if offset is None:
offset = Coordinate((0,) * self.ndims)
if self.channels_first:
shape = Coordinate(dataset.shape[-self.ndims :])
else:
shape = Coordinate(dataset.shape[: self.ndims])
spec.roi = Roi(offset, shape * spec.voxel_size)
if spec.dtype is not None:
assert spec.dtype == dataset.dtype, (
"dtype %s provided in array_specs for %s, "
"but differs from dataset %s dtype %s"
% (self.array_specs[array_key].dtype, array_key, ds_name, dataset.dtype)
)
else:
spec.dtype = dataset.dtype
if spec.interpolatable is None:
spec.interpolatable = spec.dtype in [
np.float32,
np.float64,
np.float128,
np.uint8, # assuming this is not used for labels
]
logger.warning(
"WARNING: You didn't set 'interpolatable' for %s "
"(dataset %s). Based on the dtype %s, it has been "
"set to %s. This might not be what you want.",
array_key,
ds_name,
spec.dtype,
spec.interpolatable,
)
return spec
def __read(self, data_file, ds_name, roi):
c = len(data_file[ds_name].shape) - self.ndims
if self.channels_first:
array = np.asarray(data_file[ds_name][(slice(None),) * c + roi.to_slices()])
else:
array = np.asarray(data_file[ds_name][roi.to_slices() + (slice(None),) * c])
array = np.transpose(
array, axes=[i + self.ndims for i in range(c)] + list(range(self.ndims))
)
return array
def name(self):
return super().name() + f"[{self.filename}]"
| 7,296 | 33.747619 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/zarr_write.py | from collections.abc import MutableMapping
from typing import Union
from zarr._storage.store import BaseStore
from zarr import N5FSStore, N5Store
from .batch_filter import BatchFilter
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from gunpowder.roi import Roi
from gunpowder.coordinate import Coordinate
from gunpowder.ext import ZarrFile
import logging
import warnings
logger = logging.getLogger(__name__)
class ZarrWrite(BatchFilter):
"""Assemble arrays of passing batches in one zarr container. This is useful
to store chunks produced by :class:`Scan` on disk without keeping the
larger array in memory. The ROIs of the passing arrays will be used to
determine the position where to store the data in the dataset.
Args:
dataset_names (``dict``, :class:`ArrayKey` -> ``string``):
A dictionary from array keys to names of the datasets to store them
in.
store (``string`` or ``BaseStore``):
The directory to save the zarr container. Will be created, if it does
not exist.
compression_type (``string`` or ``int``):
Compression strategy. Legal values are ``gzip``, ``szip``,
``lzf``. If an integer between 1 and 10, this indicates ``gzip``
compression level.
dataset_dtypes (``dict``, :class:`ArrayKey` -> data type):
A dictionary from array keys to datatype (eg. ``np.int8``). If
given, arrays are stored using this type. The original arrays
within the pipeline remain unchanged.
"""
def __init__(
self,
dataset_names,
output_dir=".",
output_filename="output.hdf",
compression_type=None,
dataset_dtypes=None,
store: Union[BaseStore, MutableMapping, str] = None,
):
self.store = store if store is not None else f"{output_dir}/{output_filename}"
if store is None:
warnings.warn(
"Argument 'output_dir' and `output_filename` will be replaced in v2.0, "
"use 'store' instead",
DeprecationWarning,
)
self.dataset_names = dataset_names
self.compression_type = compression_type
if dataset_dtypes is None:
self.dataset_dtypes = {}
else:
self.dataset_dtypes = dataset_dtypes
self.dataset_offsets = {}
def _get_voxel_size(self, dataset):
if "resolution" not in dataset.attrs:
return None
if self._rev_metadata():
return Coordinate(dataset.attrs["resolution"][::-1])
else:
return Coordinate(dataset.attrs["resolution"])
def _get_offset(self, dataset):
if "offset" not in dataset.attrs:
return None
if self._rev_metadata():
return Coordinate(dataset.attrs["offset"][::-1])
else:
return Coordinate(dataset.attrs["offset"])
def _set_voxel_size(self, dataset, voxel_size):
if self._rev_metadata():
dataset.attrs["resolution"] = voxel_size[::-1]
else:
dataset.attrs["resolution"] = voxel_size
def _set_offset(self, dataset, offset):
if self._rev_metadata():
dataset.attrs["offset"] = offset[::-1]
else:
dataset.attrs["offset"] = offset
def _rev_metadata(self):
with ZarrFile(self.store, mode="a") as store:
return isinstance(store, N5Store) or isinstance(store, N5FSStore)
def _open_file(self, store):
return ZarrFile(store, mode="a")
def setup(self):
for key in self.dataset_names.keys():
self.updates(key, self.spec[key])
self.enable_autoskip()
def prepare(self, request):
deps = BatchRequest()
for key in self.dataset_names.keys():
deps[key] = request[key]
return deps
def init_datasets(self, batch):
with self._open_file(self.store) as data_file:
for array_key, dataset_name in self.dataset_names.items():
logger.debug("Initializing dataset for %s", array_key)
assert array_key in self.spec, (
"Asked to store %s, but is not provided upstream." % array_key
)
assert array_key in batch.arrays, (
"Asked to store %s, but is not part of batch." % array_key
)
array = batch.arrays[array_key]
dims = array.spec.roi.dims
batch_shape = array.data.shape
# if a dataset already exists, read its meta-information (if
# present)
if dataset_name in data_file:
offset = self._get_offset(data_file[dataset_name]) or Coordinate(
(0,) * dims
)
else:
provided_roi = self.spec[array_key].roi
if provided_roi is None:
raise RuntimeError(
"Dataset %s does not exist in %s, and no ROI is "
"provided for %s. I don't know how to initialize "
"the dataset." % (dataset_name, self.store, array_key)
)
offset = provided_roi.offset
voxel_size = array.spec.voxel_size
data_shape = provided_roi.shape // voxel_size
logger.debug("Shape in voxels: %s", data_shape)
# add channel dimensions (if present)
data_shape = batch_shape[:-dims] + data_shape
logger.debug("Shape with channel dimensions: %s", data_shape)
if array_key in self.dataset_dtypes:
dtype = self.dataset_dtypes[array_key]
else:
dtype = batch.arrays[array_key].data.dtype
logger.debug(
"create_dataset: %s, %s, %s, %s, offset=%s, resolution=%s",
dataset_name,
data_shape,
self.compression_type,
dtype,
offset,
voxel_size,
)
dataset = data_file.create_dataset(
name=dataset_name,
shape=data_shape,
compression=self.compression_type,
dtype=dtype,
)
self._set_offset(dataset, offset)
self._set_voxel_size(dataset, voxel_size)
logger.debug(
"%s (%s in %s) has offset %s",
array_key,
dataset_name,
self.store,
offset,
)
self.dataset_offsets[array_key] = offset
def process(self, batch, request):
if not self.dataset_offsets:
self.init_datasets(batch)
with self._open_file(self.store) as data_file:
for array_key, dataset_name in self.dataset_names.items():
dataset = data_file[dataset_name]
array_roi = batch.arrays[array_key].spec.roi
voxel_size = self.spec[array_key].voxel_size
dims = array_roi.dims
channel_slices = (slice(None),) * max(0, len(dataset.shape) - dims)
dataset_roi = Roi(
self.dataset_offsets[array_key],
Coordinate(dataset.shape[-dims:]) * voxel_size,
)
common_roi = array_roi.intersect(dataset_roi)
if common_roi.empty:
logger.warn(
"array %s with ROI %s lies outside of dataset ROI %s, "
"skipping writing" % (array_key, array_roi, dataset_roi)
)
continue
dataset_voxel_roi = (
common_roi - self.dataset_offsets[array_key]
) // voxel_size
dataset_voxel_slices = dataset_voxel_roi.to_slices()
array_voxel_roi = (common_roi - array_roi.offset) // voxel_size
array_voxel_slices = array_voxel_roi.to_slices()
logger.debug(
"writing %s to voxel coordinates %s"
% (array_key, dataset_voxel_roi)
)
data = batch.arrays[array_key].data[channel_slices + array_voxel_slices]
dataset[channel_slices + dataset_voxel_slices] = data
| 8,757 | 35.798319 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/specified_location.py | from random import randrange
from random import choice, seed
import logging
import numpy as np
from gunpowder.coordinate import Coordinate
from gunpowder.batch_request import BatchRequest
from .batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class SpecifiedLocation(BatchFilter):
"""Choses a batch at a location from the list provided at init, making sure
it is in the bounding box of the upstream provider.
Locations should be given in world units.
Locations will be chosen in order or at random from the list depending on the
``choose_randomly`` parameter.
If a location requires a shift outside the bounding box of any upstream provider
the module will skip that location with a warning.
Args:
locations (``list`` of locations):
Locations to center batches around.
choose_randomly (``bool``):
Defines whether locations should be picked in order or at random
from the list.
extra_data (``list`` of array-like):
A list of data that will be passed along with the arrays provided
by this node. This data will be appended as an attribute to the
dataset so it must be a data format compatible with hdf5.
jitter (``tuple`` of int):
How far to allow the point to shift in each direction.
Default is None, which places the point in the center.
Chooses uniformly from [loc - jitter, loc + jitter] in each
direction.
"""
def __init__(self, locations, choose_randomly=False, extra_data=None, jitter=None):
self.coordinates = locations
self.choose_randomly = choose_randomly
self.jitter = jitter
self.loc_i = -1
self.upstream_spec = None
self.specified_shift = None
if extra_data is not None:
assert len(extra_data) == len(locations), (
"extra_data (%d) should match the length of specified locations (%d)"
% (len(extra_data), len(locations))
)
self.extra_data = extra_data
def setup(self):
self.upstream_spec = self.get_upstream_provider().spec
# clear bounding boxes of all provided arrays and points --
# SpecifiedLocation does know its locations at setup (checks on the fly)
for key, spec in self.spec.items():
spec.roi.shape = (None,) * spec.roi.dims
self.updates(key, spec)
def prepare(self, request):
seed(request.random_seed)
np.random.seed(request.random_seed)
lcm_voxel_size = self.spec.get_lcm_voxel_size(request.array_specs.keys())
# shift to center
total_roi = request.get_total_roi()
request_center = total_roi.shape / 2 + total_roi.offset
self.specified_shift = self._get_next_shift(request_center, lcm_voxel_size)
while not self.__check_shift(request):
logger.warning(
"Location %s (shift %s) skipped"
% (self.coordinates[self.loc_i], self.specified_shift)
)
self.specified_shift = self._get_next_shift(request_center, lcm_voxel_size)
# Set shift for all requests
for specs_type in [request.array_specs, request.graph_specs]:
for key, spec in specs_type.items():
roi = spec.roi.shift(self.specified_shift)
specs_type[key].roi = roi
logger.debug(
"{}'th ({}) shift selected: {}".format(
self.loc_i, self.coordinates[self.loc_i], self.specified_shift
)
)
deps = request
return deps
def process(self, batch, request):
# reset ROIs to request
for array_key, spec in request.array_specs.items():
batch.arrays[array_key].spec.roi = spec.roi
if self.extra_data is not None:
batch.arrays[array_key].attrs[
"specified_location_extra_data"
] = self.extra_data[self.loc_i]
for graph_key, spec in request.graph_specs.items():
batch.points[graph_key].spec.roi = spec.roi
# change shift point locations to lie within roi
for graph_key in request.graph_specs.keys():
batch.points[graph_key].shift(-self.specified_shift)
def _get_next_shift(self, center_shift, voxel_size):
# gets next coordinate from list
if self.choose_randomly:
self.loc_i = randrange(len(self.coordinates))
else:
self.loc_i += 1
if self.loc_i >= len(self.coordinates):
self.loc_i = 0
logger.warning("Ran out of specified locations, looping list")
next_shift = Coordinate(self.coordinates[self.loc_i]) - center_shift
if self.jitter is not None:
rnd = []
for i in range(len(self.jitter)):
rnd.append(np.random.randint(-self.jitter[i], self.jitter[i] + 1))
next_shift += Coordinate(rnd)
logger.debug("Shift before rounding: %s" % str(next_shift))
# make sure shift is a multiple of voxel size (round to nearest)
next_shift = Coordinate(
[
int(vs * round(float(shift) / vs))
for vs, shift in zip(voxel_size, next_shift)
]
)
logger.debug("Shift after rounding: %s" % str(next_shift))
return next_shift
def __check_shift(self, request):
for key, spec in request.items():
request_roi = spec.roi
if key in self.upstream_spec:
provided_roi = self.upstream_spec[key].roi
else:
raise Exception("Requested %s, but upstream does not provide it." % key)
shifted_roi = request_roi.shift(self.specified_shift)
if not provided_roi.contains(shifted_roi):
logger.warning(
"Provided roi %s for key %s does not contain shifted roi %s"
% (provided_roi, key, shifted_roi)
)
return False
return True
| 6,145 | 35.802395 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/__init__.py | from __future__ import absolute_import
from .add_affinities import AddAffinities
from .astype import AsType
from .balance_labels import BalanceLabels
from .batch_filter import BatchFilter
from .batch_provider import BatchProvider
from .crop import Crop
from .csv_points_source import CsvPointsSource
from .daisy_request_blocks import DaisyRequestBlocks
from .defect_augment import DefectAugment
from .downsample import DownSample
from .dvid_source import DvidSource
from .deform_augment import DeformAugment
from .elastic_augment import ElasticAugment
from .exclude_labels import ExcludeLabels
from .graph_source import GraphSource
from .grow_boundary import GrowBoundary
from .hdf5_source import Hdf5Source
from .hdf5_write import Hdf5Write
from .intensity_augment import IntensityAugment
from .intensity_scale_shift import IntensityScaleShift
from .iterate_locations import IterateLocations
from .klb_source import KlbSource
from .merge_provider import MergeProvider
from .noise_augment import NoiseAugment
from .normalize import Normalize
from .pad import Pad
from .precache import PreCache
from .print_profiling_stats import PrintProfilingStats
from .random_location import RandomLocation
from .random_provider import RandomProvider
from .rasterize_graph import RasterizationSettings, RasterizeGraph
from .reject import Reject
from .renumber_connected_components import RenumberConnectedComponents
from .resample import Resample
from .scan import Scan
from .shift_augment import ShiftAugment
from .simple_augment import SimpleAugment
from .snapshot import Snapshot
from .specified_location import SpecifiedLocation
from .squeeze import Squeeze
from .stack import Stack
from .unsqueeze import Unsqueeze
from .upsample import UpSample
from .zarr_source import ZarrSource
from .zarr_write import ZarrWrite
| 1,808 | 36.6875 | 70 | py |
gunpowder | gunpowder-master/gunpowder/nodes/stack.py | from .batch_filter import BatchFilter
from gunpowder.array import Array
from gunpowder.batch import Batch
from gunpowder.profiling import Timing
import numpy as np
class Stack(BatchFilter):
"""Request several batches and stack them together, introducing a new
dimension for each array. This is useful to create batches with several
samples and only makes sense if there is a source of randomness upstream.
This node stacks only arrays, not points. The resulting batch will have the
same point sets as found in the first batch requested upstream.
Args:
num_repetitions (``int``):
How many upstream batches to stack.
"""
def __init__(self, num_repetitions):
self.num_repetitions = num_repetitions
def provide(self, request):
batches = [
self.get_upstream_provider().request_batch(request)
for _ in range(self.num_repetitions)
]
timing = Timing(self)
timing.start()
batch = Batch()
for b in batches:
batch.profiling_stats.merge_with(b.profiling_stats)
for key, spec in request.array_specs.items():
data = np.stack([b[key].data for b in batches])
batch[key] = Array(data, batches[0][key].spec.copy())
# copy points of first batch requested
for key, spec in request.points_specs.items():
batch[key] = batches[0][key]
timing.stop()
batch.profiling_stats.add(timing)
return batch
| 1,519 | 28.803922 | 79 | py |
gunpowder | gunpowder-master/gunpowder/nodes/intensity_scale_shift.py | from .batch_filter import BatchFilter
class IntensityScaleShift(BatchFilter):
"""Scales the intensities of a batch by ``scale``, then adds ``shift``.
Args:
array (:class:`ArrayKey`):
The key of the array to modify.
scale (``float``):
shift (``float``):
The shift and scale to apply to ``array``.
"""
def __init__(self, array, scale, shift):
self.array = array
self.scale = scale
self.shift = shift
def process(self, batch, request):
if self.array not in batch.arrays:
return
raw = batch.arrays[self.array]
raw.data = raw.data * self.scale + self.shift
| 690 | 22.033333 | 75 | py |
gunpowder | gunpowder-master/gunpowder/nodes/hdf5_write.py | from .hdf5like_write_base import Hdf5LikeWrite
from gunpowder.ext import h5py
import os
class Hdf5Write(Hdf5LikeWrite):
"""Assemble arrays of passing batches in one HDF5 file. This is useful to
store chunks produced by :class:`Scan` on disk without keeping the larger
array in memory. The ROIs of the passing arrays will be used to determine
the position where to store the data in the dataset.
Args:
dataset_names (``dict``, :class:`ArrayKey` -> ``string``):
A dictionary from array keys to names of the datasets to store them
in.
output_dir (``string``):
The directory to save the HDF5 file. Will be created, if it does
not exist.
output_filename (``string``):
The output filename of the container. Will be created, if it does
not exist, otherwise data is overwritten in the existing container.
compression_type (``string`` or ``int``):
Compression strategy. Legal values are ``gzip``, ``szip``,
``lzf``. If an integer between 1 and 10, this indicates ``gzip``
compression level.
dataset_dtypes (``dict``, :class:`ArrayKey` -> data type):
A dictionary from array keys to datatype (eg. ``np.int8``). If
given, arrays are stored using this type. The original arrays
within the pipeline remain unchanged.
"""
def _open_file(self, filename):
if os.path.exists(filename):
return h5py.File(filename, "r+")
else:
return h5py.File(filename, "w")
| 1,607 | 33.212766 | 79 | py |
gunpowder | gunpowder-master/gunpowder/nodes/batch_filter.py | import copy
import logging
from .batch_provider import BatchProvider
from gunpowder.batch_request import BatchRequest
from gunpowder.profiling import Timing
logger = logging.getLogger(__name__)
class BatchFilterError(Exception):
def __init__(self, batch_filter, msg):
self.batch_filter = batch_filter
self.msg = msg
def __str__(self):
return f"Error in {self.batch_filter.name()}: {self.msg}"
class BatchFilter(BatchProvider):
"""Convenience wrapper for :class:`BatchProviders<BatchProvider>` with
exactly one input provider.
By default, a node of this class will expose the same :class:`ProviderSpec`
as the upstream provider. You can modify the provider spec by calling
:func:`provides` and :func:`updates` in :func:`setup`.
Subclasses need to implement at least :func:`process` to modify a passed
batch (downstream). Optionally, the following methods can be implemented:
:func:`setup`
Initialize this filter. Called after setup of the DAG. All upstream
providers will be set up already.
:func:`teardown`
Destruct this filter, free resources, stop worker processes.
:func:`prepare`
Prepare for a batch request. Always called before each
:func:`process`. Used to communicate dependencies.
"""
@property
def remove_placeholders(self):
if not hasattr(self, "_remove_placeholders"):
return False
return self._remove_placeholders
def get_upstream_provider(self):
if len(self.get_upstream_providers()) != 1:
raise BatchFilterError(
self,
"BatchFilters need to have exactly one upstream provider, "
f"this one has {len(self.get_upstream_providers())}: "
f"({[b.name() for b in self.get_upstream_providers()]}",
)
return self.get_upstream_providers()[0]
def updates(self, key, spec):
"""Update an output provided by this :class:`BatchFilter`.
Implementations should call this in their :func:`setup` method, which
will be called when the pipeline is build.
Args:
key (:class:`ArrayKey` or :class:`GraphKey`):
The array or point set key this filter updates.
spec (:class:`ArraySpec` or :class:`GraphSpec`):
The updated spec of the array or point set.
"""
if key not in self.spec:
raise BatchFilterError(
self,
f"BatchFilter {self} is trying to change the spec for {key}, "
f"but {key} is not provided upstream. Upstream offers: "
f"{self.get_upstream_provider().spec}",
)
self.spec[key] = copy.deepcopy(spec)
self.updated_items.append(key)
logger.debug("%s updates %s with %s" % (self.name(), key, spec))
def enable_autoskip(self, skip=True):
"""Enable automatic skipping of this :class:`BatchFilter`, based on
given :func:`updates` and :func:`provides` calls. Has to be called in
:func:`setup`.
By default, :class:`BatchFilters<BatchFilter>` are not skipped
automatically, regardless of what they update or provide. If autskip is
enabled, :class:`BatchFilters<BatchFilter>` will only be run if the
request contains at least one key reported earlier with
:func:`updates` or :func:`provides`.
"""
self._autoskip_enabled = skip
def _init_spec(self):
# default for BatchFilters is to provide the same as upstream
if not hasattr(self, "_spec") or self._spec is None:
if len(self.get_upstream_providers()) != 0:
self._spec = copy.deepcopy(self.get_upstream_provider().spec)
else:
self._spec = None
def internal_teardown(self):
logger.debug("Resetting spec of %s", self.name())
self._spec = None
self._updated_items = []
self.teardown()
@property
def updated_items(self):
"""Get a list of the keys that are updated by this `BatchFilter`.
This list is only available after the pipeline has been build. Before
that, it is empty.
"""
if not hasattr(self, "_updated_items"):
self._updated_items = []
return self._updated_items
@property
def autoskip_enabled(self):
if not hasattr(self, "_autoskip_enabled"):
self._autoskip_enabled = False
return self._autoskip_enabled
def provide(self, request):
skip = self.__can_skip(request)
timing_prepare = Timing(self, "prepare")
timing_prepare.start()
downstream_request = request.copy()
if not skip:
dependencies = self.prepare(request)
if isinstance(dependencies, BatchRequest):
upstream_request = request.update_with(dependencies)
elif dependencies is None:
upstream_request = request.copy()
else:
raise BatchFilterError(
self,
f"This BatchFilter returned a {type(dependencies)}! "
"Supported return types are: `BatchRequest` containing your exact "
"dependencies or `None`, indicating a dependency on the full request.",
)
self.remove_provided(upstream_request)
else:
upstream_request = request.copy()
self.remove_provided(upstream_request)
timing_prepare.stop()
batch = self.get_upstream_provider().request_batch(upstream_request)
timing_process = Timing(self, "process")
timing_process.start()
if not skip:
if dependencies is not None:
dependencies.remove_placeholders()
node_batch = batch.crop(dependencies)
else:
node_batch = batch
downstream_request.remove_placeholders()
processed_batch = self.process(node_batch, downstream_request)
if processed_batch is None:
processed_batch = node_batch
batch = batch.merge(processed_batch, merge_profiling_stats=False).crop(
downstream_request
)
timing_process.stop()
batch.profiling_stats.add(timing_prepare)
batch.profiling_stats.add(timing_process)
return batch
def __can_skip(self, request):
"""Check if this filter needs to be run for the given request."""
if not self.autoskip_enabled:
return False
for key, spec in request.items():
if spec.placeholder:
continue
if key in self.provided_items:
return False
if key in self.updated_items:
return False
return True
def setup(self):
"""To be implemented in subclasses.
Called during initialization of the DAG. Callees can assume that all
upstream providers are set up already.
In setup, call :func:`provides` or :func:`updates` to announce the
arrays and points provided or changed by this node.
"""
pass
def prepare(self, request):
"""To be implemented in subclasses.
Prepare for a batch request. Should return a :class:`BatchRequest` of
needed dependencies. If None is returned, it will be assumed that all
of request is needed.
"""
return None
def process(self, batch, request):
"""To be implemented in subclasses.
Filter a batch, will be called after :func:`prepare`. Should return a
:class:`Batch` containing modified Arrays and Graphs. Keys in the returned
batch will replace the associated data in the original batch. If None is
returned it is assumed that the batch has been modified in place. ``request``
is the same as passed to :func:`prepare`, provided for convenience.
Args:
batch (:class:`Batch`):
The batch received from upstream to be modified by this node.
request (:class:`BatchRequest`):
The request this node received. The updated batch should meet
this request.
"""
raise BatchFilterError(self, "does not implement 'process'")
| 8,452 | 32.677291 | 91 | py |
gunpowder | gunpowder-master/gunpowder/nodes/downsample.py | from .batch_filter import BatchFilter
from gunpowder.array import ArrayKey, Array
from gunpowder.batch_request import BatchRequest
from gunpowder.batch import Batch
import logging
import numbers
from funlib.geometry import Coordinate
logger = logging.getLogger(__name__)
class DownSample(BatchFilter):
"""Downsample arrays in a batch by given factors.
Args:
source (:class:`ArrayKey`):
The key of the array to downsample.
factor (``int`` or ``tuple`` of ``int``):
The factor to downsample with.
target (:class:`ArrayKey`):
The key of the array to store the downsampled ``source``.
"""
def __init__(self, source, factor, target):
assert isinstance(source, ArrayKey)
assert isinstance(target, ArrayKey)
assert isinstance(factor, numbers.Number) or isinstance(
factor, tuple
), "Scaling factor should be a number or a tuple of numbers."
self.source = source
self.factor = (
factor if isinstance(factor, numbers.Number) else Coordinate(factor)
)
self.target = target
def setup(self):
spec = self.spec[self.source].copy()
spec.voxel_size *= self.factor
self.provides(self.target, spec)
self.enable_autoskip()
def prepare(self, request):
deps = BatchRequest()
deps[self.source] = request[self.target]
return deps
def process(self, batch, request):
outputs = Batch()
data = batch.arrays[self.source].data
channel_dims = len(data.shape) - batch.arrays[self.source].spec.roi.dims
# downsample
if isinstance(self.factor, tuple):
slices = tuple(slice(None, None) for _ in range(channel_dims)) + tuple(
slice(None, None, k) for k in self.factor
)
else:
slices = tuple(slice(None, None) for _ in range(channel_dims)) + tuple(
slice(None, None, self.factor)
for i in range(batch[self.source].spec.roi.dims)
)
logger.debug("downsampling %s with %s", self.source, slices)
data = data[slices]
# create output array
spec = self.spec[self.target].copy()
spec.roi = request[self.target].roi
outputs.arrays[self.target] = Array(data, spec)
return outputs
| 2,387 | 28.481481 | 83 | py |
gunpowder | gunpowder-master/gunpowder/nodes/hdf5_source.py | from gunpowder.ext import h5py
from .hdf5like_source_base import Hdf5LikeSource
class Hdf5Source(Hdf5LikeSource):
"""An HDF5 data source.
Provides arrays from HDF5 datasets. If the attribute ``resolution`` is set
in a HDF5 dataset, it will be used as the array's ``voxel_size``. If the
attribute ``offset`` is set in a dataset, it will be used as the offset of
the :class:`Roi` for this array. It is assumed that the offset is given in
world units.
Args:
filename (``string``):
The HDF5 file.
datasets (``dict``, :class:`ArrayKey` -> ``string``):
Dictionary of array keys to dataset names that this source offers.
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
An optional dictionary of array keys to array specs to overwrite
the array specs automatically determined from the data file. This
is useful to set a missing ``voxel_size``, for example. Only fields
that are not ``None`` in the given :class:`ArraySpec` will be used.
channels_first (``bool``, optional):
Specifies the ordering of the dimensions of the HDF5-like data source.
If channels_first is set (default), then the input shape is expected
to be (channels, spatial dimensions). This is recommended because of
better performance. If channels_first is set to false, then the input
data is read in channels_last manner and converted to channels_first.
"""
def _open_file(self, filename):
return h5py.File(filename, "r")
| 1,631 | 37.857143 | 82 | py |
gunpowder | gunpowder-master/gunpowder/nodes/upsample.py | from .batch_filter import BatchFilter
from gunpowder.coordinate import Coordinate
from gunpowder.array import ArrayKey, Array
from gunpowder.array_spec import ArraySpec
from gunpowder.batch_request import BatchRequest
from gunpowder.batch import Batch
import logging
import numbers
import numpy as np
logger = logging.getLogger(__name__)
class UpSample(BatchFilter):
"""Upsample arrays in a batch by given factors.
Args:
source (:class:`ArrayKey`):
The key of the array to upsample.
factor (``int`` or ``Coordinate``):
The factor to upsample with.
target (:class:`ArrayKey`):
The key of the array to store the upsampled ``source``.
"""
def __init__(self, source, factor, target):
assert isinstance(source, ArrayKey)
assert isinstance(target, ArrayKey)
assert isinstance(factor, numbers.Number) or isinstance(
factor, Coordinate
), "Scaling factor should be a number or a Coordinate."
self.source = source
self.factor = factor
self.target = target
def setup(self):
spec = self.spec[self.source].copy()
if not isinstance(self.factor, Coordinate):
self.factor = Coordinate((self.factor,) * spec.roi.dims)
assert spec.voxel_size % self.factor == (0,) * len(
spec.voxel_size
), "voxel size of upsampled volume is not integer: %s/%s = %s" % (
spec.voxel_size,
self.factor,
tuple(v / f for v, f in zip(spec.voxel_size, self.factor)),
)
spec.voxel_size /= self.factor
self.provides(self.target, spec)
def prepare(self, request):
deps = BatchRequest()
if self.target not in request:
return
logger.debug("preparing upsampling of " + str(self.source))
upstream_voxel_size = self.spec[self.source].voxel_size
request_roi = request[self.target].roi.snap_to_grid(
upstream_voxel_size, mode="grow"
)
logger.debug("request ROI is %s" % request_roi)
# add or merge to batch request
deps[self.source] = ArraySpec(roi=request_roi)
return deps
def process(self, batch, request):
outputs = Batch()
if self.target not in request:
return
input_roi = batch.arrays[self.source].spec.roi
request_roi = request[self.target].roi
assert input_roi.contains(request_roi)
# upsample
logger.debug("upsampling %s with %s", self.source, self.factor)
crop = batch.arrays[self.source]
data = crop.data
for d, f in enumerate(self.factor):
data = np.repeat(data, f, axis=-self.factor.dims + d)
# create output array
spec = self.spec[self.target].copy()
spec.roi = input_roi
outputs.arrays[self.target] = Array(data, spec).crop(request_roi)
return outputs
| 2,967 | 27.266667 | 74 | py |
gunpowder | gunpowder-master/gunpowder/nodes/normalize.py | import logging
import copy
import numpy as np
from gunpowder.batch_request import BatchRequest
from .batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class Normalize(BatchFilter):
"""Normalize the values of an array to be floats between 0 and 1, based on
the type of the array.
Args:
array (:class:`ArrayKey`):
The key of the array to modify.
factor (scalar, optional):
The factor to use. If not given, a factor is chosen based on the
``dtype`` of the array (e.g., ``np.uint8`` would result in a factor
of ``1.0/255``).
dtype (data-type, optional):
The datatype of the normalized array. Defaults to ``np.float32``.
"""
def __init__(self, array, factor=None, dtype=np.float32):
self.array = array
self.factor = factor
self.dtype = dtype
def setup(self):
self.enable_autoskip()
array_spec = copy.deepcopy(self.spec[self.array])
array_spec.dtype = self.dtype
self.updates(self.array, array_spec)
def prepare(self, request):
deps = BatchRequest()
deps[self.array] = request[self.array]
deps[self.array].dtype = None
return deps
def process(self, batch, request):
if self.array not in batch.arrays:
return
factor = self.factor
array = batch.arrays[self.array]
array.spec.dtype = self.dtype
if factor is None:
logger.debug(
"automatically normalizing %s with dtype=%s",
self.array,
array.data.dtype,
)
if array.data.dtype == np.uint8:
factor = 1.0 / 255
elif array.data.dtype == np.uint16:
factor = 1.0 / (256 * 256 - 1)
elif array.data.dtype == np.float32:
assert array.data.min() >= 0 and array.data.max() <= 1, (
"Values are float but not in [0,1], I don't know how "
"to normalize. Please provide a factor."
)
factor = 1.0
else:
raise RuntimeError(
"Automatic normalization for "
+ str(array.data.dtype)
+ " not implemented, please "
"provide a factor."
)
logger.debug("scaling %s with %f", self.array, factor)
array.data = array.data.astype(self.dtype) * factor
| 2,525 | 28.717647 | 79 | py |
gunpowder | gunpowder-master/gunpowder/nodes/pad.py | import logging
import numpy as np
from .batch_filter import BatchFilter
from gunpowder.array import ArrayKey
from gunpowder.roi import Roi
from gunpowder.coordinate import Coordinate
from gunpowder.batch_request import BatchRequest
logger = logging.getLogger(__name__)
class Pad(BatchFilter):
"""Add a constant intensity padding around arrays of another batch
provider. This is useful if your requested batches can be larger than what
your source provides.
Args:
key (:class:`ArrayKey` or :class:`GraphKey`):
The array or points set to pad.
size (:class:`Coordinate` or ``None``):
The padding to be added. If None, an infinite padding is added. If
a coordinate, this amount will be added to the ROI in the positive
and negative direction.
value (scalar or ``None``):
The value to report inside the padding. If not given, 0 is used.
Only used for :class:`Array<Arrays>`.
"""
def __init__(self, key, size, value=None):
self.key = key
self.size = size
self.value = value
def setup(self):
self.enable_autoskip()
assert self.key in self.spec, (
"Asked to pad %s, but is not provided upstream." % self.key
)
assert self.spec[self.key].roi is not None, (
"Asked to pad %s, but upstream provider doesn't have a ROI for "
"it." % self.key
)
spec = self.spec[self.key].copy()
if self.size is not None:
spec.roi = spec.roi.grow(self.size, self.size)
else:
spec.roi.shape = Coordinate((None,) * spec.roi.dims)
self.updates(self.key, spec)
def prepare(self, request):
upstream_spec = self.get_upstream_provider().spec
logger.debug("request: %s" % request)
logger.debug("upstream spec: %s" % upstream_spec)
# TODO: remove this?
if self.key not in request:
return
roi = request[self.key].roi.copy()
# change request to fit into upstream spec
request[self.key].roi = roi.intersect(upstream_spec[self.key].roi)
if request[self.key].roi.empty:
logger.warning(
"Requested %s ROI %s lies entirely outside of upstream " "ROI %s.",
self.key,
roi,
upstream_spec[self.key].roi,
)
# ensure a valid request by asking for empty ROI
request[self.key].roi = Roi(
upstream_spec[self.key].roi.offset,
(0,) * upstream_spec[self.key].roi.dims,
)
logger.debug("new request: %s" % request)
deps = BatchRequest()
deps[self.key] = request[self.key]
return deps
def process(self, batch, request):
if self.key not in request:
return
# restore requested batch size and ROI
if isinstance(self.key, ArrayKey):
array = batch.arrays[self.key]
array.data = self.__expand(
array.data,
array.spec.roi / array.spec.voxel_size,
request[self.key].roi / array.spec.voxel_size,
self.value if self.value else 0,
)
array.spec.roi = request[self.key].roi
else:
points = batch.graphs[self.key]
points.spec.roi = request[self.key].roi
def __expand(self, a, from_roi, to_roi, value):
"""from_roi and to_roi should be in voxels."""
logger.debug(
"expanding array of shape %s from %s to %s", str(a.shape), from_roi, to_roi
)
num_channels = len(a.shape) - from_roi.dims
channel_shapes = a.shape[:num_channels]
b = np.zeros(channel_shapes + to_roi.shape, dtype=a.dtype)
if value != 0:
b[:] = value
shift = -to_roi.offset
logger.debug("shifting 'from' by " + str(shift))
a_in_b = from_roi.shift(shift).to_slices()
logger.debug("target shape is " + str(b.shape))
logger.debug("target slice is " + str(a_in_b))
b[(slice(None),) * num_channels + a_in_b] = a
return b
| 4,216 | 29.781022 | 87 | py |
gunpowder | gunpowder-master/gunpowder/nodes/precache.py | import copy
import logging
import multiprocessing
import time
import random
from .batch_filter import BatchFilter
from gunpowder.profiling import Timing
from gunpowder.producer_pool import ProducerPool
from collections import deque
logger = logging.getLogger(__name__)
class WorkersDiedException(Exception):
pass
class PreCache(BatchFilter):
"""Pre-cache repeated equal batch requests. For the first of a series of
equal batch request, a set of workers is spawned to pre-cache the batches
in parallel processes. This way, subsequent requests can be served quickly.
A note on changing the requests sent to `PreCache`.
Given requests A and B, if requests are sent in the sequence:
A, ..., A, B, A, ..., A, B, A, ...
Precache will build a Queue of batches that satisfy A, and handle requests
B on demand. This prevents `PreCache` from discarding the queue on every
SnapshotRequest.
However if B request replace A as the most common request, i.e.:
A, A, A, ..., A, B, B, B, ...,
`PreCache` will discard the A queue and build a B queue after it has seen
more B requests than A requests out of the last 5 requests.
This node only makes sense if:
1. Incoming batch requests are repeatedly the same.
2. There is a source of randomness in upstream nodes.
Args:
cache_size (``int``):
How many batches to hold at most in the cache.
num_workers (``int``):
How many processes to spawn to fill the cache.
"""
def __init__(self, cache_size=50, num_workers=20):
self.current_request = None
self.workers = None
self.cache_size = cache_size
self.num_workers = num_workers
# keep track of recent requests
self.last_5 = deque(
[
None,
]
* 5,
maxlen=5,
)
def teardown(self):
if self.workers is not None:
self.workers.stop()
def provide(self, request):
timing = Timing(self)
timing.start()
# update recent requests
self.last_5.popleft()
self.last_5.append(request)
if request != self.current_request:
current_count = sum(
[
recent_request == self.current_request
for recent_request in self.last_5
]
)
new_count = sum(
[recent_request == request for recent_request in self.last_5]
)
if new_count > current_count or self.current_request is None:
if self.workers is not None:
logger.info("new request received, stopping current workers...")
self.workers.stop()
self.current_request = copy.deepcopy(request)
logger.info(
"starting new set of workers (%s, cache size %s)...",
self.num_workers,
self.cache_size,
)
self.workers = ProducerPool(
[self._run_worker for _ in range(self.num_workers)],
queue_size=self.cache_size,
)
self.workers.start()
logger.debug("getting batch from queue...")
batch = self.workers.get()
timing.stop()
batch.profiling_stats.add(timing)
else:
logger.debug("Resolving new request sequentially")
batch = self.get_upstream_provider().request_batch(request)
timing.stop()
batch.profiling_stats.add(timing)
else:
logger.debug("getting batch from queue...")
batch = self.workers.get()
timing.stop()
batch.profiling_stats.add(timing)
return batch
def _run_worker(self):
request = copy.deepcopy(self.current_request)
# Note that using a precache node breaks determinism in batches recieved since we do not
# keep a mapping of the order in which random seeds were used, and the order in which
# the corresponding batch gets returned.
request._random_seed = random.randint(0, 2**32)
return self.get_upstream_provider().request_batch(request)
| 4,336 | 30.889706 | 96 | py |
gunpowder | gunpowder-master/gunpowder/nodes/print_profiling_stats.py | import logging
from .batch_filter import BatchFilter
from gunpowder.profiling import Timing, TimingSummary, ProfilingStats
logger = logging.getLogger(__name__)
class PrintProfilingStats(BatchFilter):
"""Print profiling information about nodes upstream of this node in the DAG.
The output also includes a ``TOTAL`` section, which shows the wall-time
spent in the upstream and downstream passes. For the downstream pass, this
information is not available in the first iteration, since the request-batch
cycle is not completed, yet.
Args:
every (``int``):
Collect statistics about that many batch requests and show min,
max, mean, and median runtimes.
"""
def __init__(self, every=1):
self.every = every
self.n = 0
self.accumulated_stats = ProfilingStats()
self.__upstream_timing = Timing(self)
self.__upstream_timing_summary = TimingSummary()
self.__downstream_timing = Timing(self)
self.__downstream_timing_summary = TimingSummary()
def prepare(self, request):
self.__downstream_timing.stop()
# skip the first one, where we don't know how much time we spent
# downstream
if self.__downstream_timing.elapsed() > 0:
self.__downstream_timing_summary.add(self.__downstream_timing)
self.__downstream_timing = Timing(self)
self.__upstream_timing.start()
deps = request
return deps
def process(self, batch, request):
self.__upstream_timing.stop()
self.__upstream_timing_summary.add(self.__upstream_timing)
self.__upstream_timing = Timing(self)
self.__downstream_timing.start()
self.n += 1
print_stats = self.n % self.every == 0
self.accumulated_stats.merge_with(batch.profiling_stats)
if not print_stats:
return
stats = "\n"
stats += "Profiling Stats\n"
stats += "===============\n"
stats += "\n"
stats += "NODE".ljust(20)
stats += "METHOD".ljust(10)
stats += "COUNTS".ljust(10)
stats += "MIN".ljust(10)
stats += "MAX".ljust(10)
stats += "MEAN".ljust(10)
stats += "MEDIAN".ljust(10)
stats += "\n"
summaries = list(self.accumulated_stats.get_timing_summaries().items())
summaries.sort()
for (node_name, method_name), summary in summaries:
if summary.counts() > 0:
stats += node_name[:19].ljust(20)
stats += (
method_name[:19].ljust(10) if method_name is not None else " " * 10
)
stats += ("%d" % summary.counts())[:9].ljust(10)
stats += ("%.2f" % summary.min())[:9].ljust(10)
stats += ("%.2f" % summary.max())[:9].ljust(10)
stats += ("%.2f" % summary.mean())[:9].ljust(10)
stats += ("%.2f" % summary.median())[:9].ljust(10)
stats += "\n"
stats += "\n"
stats += "TOTAL"
stats += "\n"
for phase, summary in zip(
["upstream", "downstream"],
[self.__upstream_timing_summary, self.__downstream_timing_summary],
):
if summary.counts() > 0:
stats += phase[:19].ljust(30)
stats += ("%d" % summary.counts())[:9].ljust(10)
stats += ("%.2f" % summary.min())[:9].ljust(10)
stats += ("%.2f" % summary.max())[:9].ljust(10)
stats += ("%.2f" % summary.mean())[:9].ljust(10)
stats += ("%.2f" % summary.median())[:9].ljust(10)
stats += "\n"
stats += "\n"
logger.info(stats)
# reset summaries
self.accumulated_stats = ProfilingStats()
self.__upstream_timing_summary = TimingSummary()
self.__downstream_timing_summary = TimingSummary()
| 3,948 | 33.043103 | 87 | py |
gunpowder | gunpowder-master/gunpowder/nodes/shift_augment.py | from __future__ import print_function, division
import logging
import numpy as np
import random
from gunpowder.roi import Roi
from gunpowder.coordinate import Coordinate
from gunpowder.batch_request import BatchRequest
from .batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class ShiftAugment(BatchFilter):
def __init__(self, prob_slip=0, prob_shift=0, sigma=0, shift_axis=0):
self.prob_slip = prob_slip
self.prob_shift = prob_shift
self.sigma = sigma
self.shift_axis = shift_axis
self.ndim = None
self.shift_sigmas = None
self.shift_array = None
self.lcm_voxel_size = None
def prepare(self, request):
random.seed(request.random_seed)
self.ndim = request.get_total_roi().dims
assert self.shift_axis in range(self.ndim)
try:
self.shift_sigmas = tuple(self.sigma)
except TypeError:
self.shift_sigmas = [float(self.sigma)] * self.ndim
self.shift_sigmas[self.shift_axis] = 0.0
self.shift_sigmas = tuple(self.shift_sigmas)
assert len(self.shift_sigmas) == self.ndim
assert self.shift_sigmas[self.shift_axis] == 0.0
has_nonzero = False
for sigma in self.shift_sigmas:
if sigma != 0.0:
has_nonzero = True
break
assert has_nonzero
if not request.array_specs:
raise ValueError(
"Request passed to Jitter node must contain at least one array key. "
+ "Check to make sure that Jitter node is not upstream of a RandomLocation node "
+ "with an ensure_nonempty argument."
)
self.lcm_voxel_size = self.spec.get_lcm_voxel_size(
array_keys=request.array_specs.keys()
)
assert self.lcm_voxel_size
roi_shape = request.get_total_roi().shape
assert (
roi_shape // self.lcm_voxel_size * self.lcm_voxel_size == roi_shape
), "total roi shape {} must be divisible by least common voxel size {}".format(
roi_shape, self.lcm_voxel_size
)
roi_shape_adjusted = roi_shape // self.lcm_voxel_size
shift_axis_len = roi_shape_adjusted[self.shift_axis]
self.shift_array = self.construct_global_shift_array(
shift_axis_len,
self.shift_sigmas,
self.prob_slip,
self.prob_shift,
self.lcm_voxel_size,
)
for key, spec in request.items():
sub_shift_array = self.get_sub_shift_array(
request.get_total_roi(),
spec.roi,
self.shift_array,
self.shift_axis,
self.lcm_voxel_size,
)
updated_roi = self.compute_upstream_roi(spec.roi, sub_shift_array)
spec.roi.offset = updated_roi.offset
spec.roi.shape = updated_roi.shape
request[key] = spec
deps = request
return deps
def process(self, batch, request):
for array_key, array in batch.arrays.items():
sub_shift_array = self.get_sub_shift_array(
request.get_total_roi(),
array.spec.roi,
self.shift_array,
self.shift_axis,
self.lcm_voxel_size,
)
array.data = self.shift_and_crop(
array.data,
request[array_key].roi.shape,
sub_shift_array,
array.spec.voxel_size,
)
array.spec.roi = request[array_key].roi
assert (
request[array_key].roi.shape
== Coordinate(array.data.shape) * self.lcm_voxel_size
), "request roi shape {} is not the same as generated array shape {}".format(
request[array_key].roi.shape, array.data.shape
)
batch[array_key] = array
for points_key, points in batch.graphs.items():
sub_shift_array = self.get_sub_shift_array(
request.get_total_roi(),
points.spec.roi,
self.shift_array,
self.shift_axis,
self.lcm_voxel_size,
)
points = self.shift_points(
points,
request[points_key].roi,
sub_shift_array,
self.shift_axis,
self.lcm_voxel_size,
)
batch[points_key] = points
def shift_and_crop(self, arr, roi_shape, sub_shift_array, voxel_size):
"""Shift an array received from upstream and crop it to the target downstream region
:param arr: an array of upstream data to be shifted and cropped
:param roi_shape: the shape of the downstream ROI
:param sub_shift_array: the cropped section of the global shift array that applies to this specific request
:param voxel_size: the voxel sizes of the data in the array
:return: an array of shape roi_shape that contains the array to be passed downstream
"""
array_shift_axis_len = arr.shape[self.shift_axis]
sub_shift_array_len = len(sub_shift_array)
assert (
array_shift_axis_len % sub_shift_array_len == 0
), "array shift axis length {} is not divisible by the sub_shift_array length {}".format(
arr.shape[self.shift_axis], sub_shift_array.shape[0]
)
voxel_ratio = array_shift_axis_len // sub_shift_array_len
# assumption: each sub shift array element divides evenly by the voxel size
rescaled_sub_shift_array = sub_shift_array // np.array(voxel_size, dtype=int)
max_shift = rescaled_sub_shift_array.max(axis=0)
batch = arr.copy()
batch_view = np.moveaxis(batch, self.shift_axis, 0)
for index, plane in enumerate(batch_view):
adjusted_index = index // voxel_ratio
shift = rescaled_sub_shift_array[adjusted_index, :] - max_shift
shift = np.delete(shift, self.shift_axis, axis=0)
assert len(shift) == plane.ndim
plane = np.roll(plane, shift, axis=tuple(range(len(shift))))
batch_view[index] = plane
adjusted_roi_shape = Coordinate(roi_shape) // Coordinate(voxel_size)
sl = tuple(slice(0, adjusted_roi_shape[index]) for index in range(self.ndim))
return batch[sl]
@staticmethod
def shift_points(points, request_roi, sub_shift_array, shift_axis, lcm_voxel_size):
"""Shift a set of points received from upstream and crop out those not the the target downstream region
:param points: the points from upstream
:param request_roi: the downstream ROI
:param sub_shift_array: the cropped section of the global shift array that applies to this specific request
:param shift_axis: the axis to perform the shift along
:param lcm_voxel_size: the least common voxel size for the arrays in the request
:return a Graph object with the updated point locations and ROI
"""
nodes = list(points.nodes)
spec = points.spec
shift_axis_start_pos = spec.roi.offset[shift_axis]
for node in nodes:
loc = node.location
shift_axis_position = loc[shift_axis]
shift_array_index = int(
(shift_axis_position - shift_axis_start_pos)
// lcm_voxel_size[shift_axis]
)
assert shift_array_index >= 0
shift = Coordinate(sub_shift_array[shift_array_index])
loc += shift
if not request_roi.contains(loc):
points.remove_node(node)
points.spec.roi = request_roi
return points
@staticmethod
def get_sub_shift_array(
total_roi, item_roi, shift_array, shift_axis, lcm_voxel_size
):
"""Slices the global shift array to return the sub-shift array to shift an item in the request
:param total_roi: the total roi of the request
:param item_roi: the roi of the item (array or points) being shifted
:param shift_array: the shift array for the total_roi
:param shift_axis: the axis along which we are shifting
:param lcm_voxel_size: the least common voxel size for the arrays in the request
:return: the portion of the global shift array that should be used to shift the item
"""
item_offset_from_total = item_roi.offset - total_roi.offset
offset_in_shift_axis = (
item_offset_from_total[shift_axis] // lcm_voxel_size[shift_axis]
)
len_in_shift_axis = item_roi.shape[shift_axis] // lcm_voxel_size[shift_axis]
return shift_array[
offset_in_shift_axis : offset_in_shift_axis + len_in_shift_axis
]
@staticmethod
def construct_global_shift_array(
shift_axis_len, shift_sigmas, prob_slip, prob_shift, lcm_voxel_size
):
"""Sets the attribute variable self.shift_array
:param shift_axis_len: the length of the shift axis
:param shift_sigmas: the sigma to generate the normal distribution of shift amounts in each direction
:param prob_slip: the probability of the slice shifting independently of all other slices
:param prob_shift: the probability of the slice and all following slices shifting
:param lcm_voxel_size: the least common voxel size of all the arrays in the request
:return: the shift_array for the total_roi
"""
# each row is one slice along shift axis
shift_array = np.zeros(shape=(shift_axis_len, len(shift_sigmas)), dtype=int)
base_shift = np.zeros(shape=len(shift_sigmas), dtype=int)
assert prob_slip + prob_shift <= 1
for shift_axis_position in range(shift_axis_len):
r = random.random()
slip = np.array(
[
np.random.normal(scale=sigma / lcm_voxel_size[dimension])
for dimension, sigma in enumerate(shift_sigmas)
]
)
slip = np.rint(slip).astype(int)
slip = slip * np.array(lcm_voxel_size, dtype=int)
if r <= prob_slip:
shift_array[shift_axis_position] = base_shift + slip
elif r <= prob_slip + prob_shift:
base_shift += slip
shift_array[shift_axis_position] = base_shift
else:
shift_array[shift_axis_position] = base_shift
return shift_array
@staticmethod
def compute_upstream_roi(request_roi, sub_shift_array):
"""Compute the ROI to pass upstream for a specific item (array or points) in a request
:param request_roi: the downstream ROI passed to the Jitter node
:param sub_shift_array: the portion of the global shift array that should be used to shift the item
:return: the expanded ROI to pass upstream
"""
max_shift = Coordinate(sub_shift_array.max(axis=0))
min_shift = Coordinate(sub_shift_array.min(axis=0))
downstream_offset = request_roi.offset
upstream_offset = downstream_offset - max_shift
upstream_shape = request_roi.shape + max_shift - min_shift
return Roi(offset=upstream_offset, shape=upstream_shape)
| 11,372 | 39.187279 | 115 | py |
gunpowder | gunpowder-master/gunpowder/contrib/__init__.py | from __future__ import absolute_import
from .nodes import *
| 61 | 14.5 | 38 | py |
gunpowder | gunpowder-master/gunpowder/contrib/nodes/add_vector_map.py | import copy
import logging
import numpy as np
from scipy.spatial import KDTree
from gunpowder.array import Array
from gunpowder.array_spec import ArraySpec
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from gunpowder.morphology import enlarge_binary_map
from gunpowder.nodes.batch_filter import BatchFilter
from gunpowder.graph_spec import GraphSpec
logger = logging.getLogger(__name__)
class AddVectorMap(BatchFilter):
def __init__(
self,
src_and_trg_points,
voxel_sizes,
radius_phys,
partner_criterion,
stayinside_array_keys=None,
pad_for_partners=(0, 0, 0),
):
"""Creates a vector map of shape [dim_vector, [shape_of_array]] (e.g. [3, 50,50,50] for an array of
shape (50,50,50)) where every voxel which is close to a any source point location has a vector which points to
one of the source point location's target location.
Close to a point location in src_point includes all voxels which
1) lie within distance radius_phys of the considered src point location
2) (if stayinside_array_keys is not None), lie within the same segment as the src location in the
mask provided in stayinside_array_keys.
The partner_criterion decides to which target location of the source point location that the vector of a
voxel points (the different criterions are described below).
Args:
src_and_trg_points (dict): Dictionary from :class:``ArrayKey`` of the vector map to be created
to a tuple (:class:``GraphKey`` of the source points, :class:``GraphKey``
of the target points) which define the source and target points.
voxel_sizes (dict): Dictionary from
:class:``ArrayKey`` of the vector
map to be created to a
:class:`Coordinate` for the voxel
size of the array.
stayinside_array_keys (dict): Dictionary from :class:``ArrayKey`` of the vector map to be created to
:class:``ArrayKey`` of the stayinside_array.
The stayinside_array is assumed to contain discrete objects labeled with
different object ids. The object id at the specific source location is used
to restrict the region where vectors are created around a source location.
Voxels that are located outside of this object are set to zero.
If stayinside_array_keys is None, all the voxels within distance
radius_phys to the source location receive a vector.
pad_for_partners (tuple): n-dim tuple which defines padding of trg_points request in all dimensions
(in world units).
This might be used s.t. also partner locations which lie within the padded
region, hence slightly outside of the vector map's roi, are considered.
radius_phys (int): Radius (in world units) to restrict region where vectors are created around
a source location.
partner_criterion(str): 'min_distance' or 'all'
'min_distance': the vectors of all the voxels around a source location
point to the same target location, namely the location which has the
minimal distance to the considered source location.
'all': all partner locations of a given source location are considered.
The region around a source location is split up into (num_partners)-subregions
where each voxel points to the target location for which this subregion
is the closest.
"""
self.array_to_src_trg_points = src_and_trg_points
self.voxel_sizes = voxel_sizes
self.array_keys_to_stayinside_array_keys = stayinside_array_keys
self.pad_for_partners = pad_for_partners
self.radius_phys = radius_phys
self.partner_criterion = partner_criterion
def setup(self):
for (
array_key,
(src_points_key, trg_points_key),
) in self.array_to_src_trg_points.items():
for points_key in [src_points_key, trg_points_key]:
assert (
points_key in self.spec
), "Asked for {} in AddVectorMap from {}, where {} is not provided.".format(
array_key, points_key, points_key
)
neg_pad_for_partners = Coordinate(
(self.pad_for_partners * np.asarray([-1])).tolist()
)
self.provides(
array_key,
ArraySpec(
roi=self.spec[src_points_key].roi.grow(
neg_pad_for_partners, neg_pad_for_partners
),
voxel_size=self.voxel_sizes[array_key],
interpolatable=False,
dtype=np.float32,
),
)
self.enable_autoskip()
def prepare(self, request):
deps = BatchRequest()
for (
array_key,
(src_points_key, trg_points_key),
) in self.array_to_src_trg_points.items():
if array_key in request:
# increase or set request for points to be array roi + padding for partners outside roi for target points
deps[src_points_key] = GraphSpec(request[array_key].roi)
padded_roi = request[array_key].roi.grow(
(self.pad_for_partners), (self.pad_for_partners)
)
deps[trg_points_key] = GraphSpec(padded_roi)
for (
array_key,
stayinside_array_key,
) in self.array_keys_to_stayinside_array_keys.items():
if array_key in request:
deps[stayinside_array_key] = copy.deepcopy(request[array_key])
return deps
def process(self, batch, request):
# create vector map and add it to batch
for (
array_key,
(src_points_key, trg_points_key),
) in self.array_to_src_trg_points.items():
if array_key in request:
vector_map = self.__get_vector_map(
batch=batch, request=request, vector_map_array_key=array_key
)
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(data=vector_map, spec=spec)
def __get_vector_map(self, batch, request, vector_map_array_key):
src_points_key, trg_points_key = self.array_to_src_trg_points[
vector_map_array_key
]
dim_vectors = len(request[vector_map_array_key].roi.shape)
voxel_size_vm = self.voxel_sizes[vector_map_array_key]
offset_vector_map_phys = request[vector_map_array_key].roi.offset
vector_map_total = np.zeros(
(dim_vectors,) + (request[vector_map_array_key].roi.shape // voxel_size_vm),
dtype=np.float32,
)
if batch.graphs[src_points_key].num_vertices() == 0:
return vector_map_total
for node in batch.graphs[src_points_key].nodes:
if request[vector_map_array_key].roi.contains(Coordinate(node.location)):
# get all partner locations which should be considered
relevant_partner_loc = self.__get_relevant_partner_locations(
batch, node, trg_points_key
)
if len(relevant_partner_loc) > 0:
# get locations where to set vectors around source location
# (look only at region close to src location (to avoid np.nonzero() over entire array))
mask = self.__get_mask(
batch, request, vector_map_array_key, node.location
)
offset_vx_considered_mask = [
(
(
node.location[dim]
- self.radius_phys
- offset_vector_map_phys[dim]
)
// voxel_size_vm[dim]
)
for dim in range(dim_vectors)
]
clipped_offset_vx_considered_mask = np.clip(
offset_vx_considered_mask, a_min=0, a_max=np.inf
)
slices = tuple(
slice(
int(np.max((0, offset_vx_considered_mask[dim]))),
int(
np.min(
(
offset_vx_considered_mask[dim]
+ (2 * self.radius_phys // voxel_size_vm[dim]),
((mask.shape[dim])),
)
)
),
)
for dim in range(dim_vectors)
)
considered_region_mask = mask[slices]
locations_to_fill_vx = np.reshape(
np.nonzero(considered_region_mask), [dim_vectors, -1]
).T
locations_to_fill_abs_phys = (
(
(locations_to_fill_vx + clipped_offset_vx_considered_mask)
* voxel_size_vm
)
+ offset_vector_map_phys
).tolist()
# check for target node with largest distance first and add vector pointing to it to vector_map_total
num_src_vectors_per_trg_loc = len(
locations_to_fill_abs_phys
) // len(relevant_partner_loc)
if num_src_vectors_per_trg_loc > 0:
dist_to_locs = {}
for phys_loc in relevant_partner_loc:
dist_to_locs[
np.linalg.norm(node.location - phys_loc)
] = phys_loc
for nr, dist in enumerate(
reversed(np.sort(list(dist_to_locs.keys())))
):
trg_loc_abs_phys = dist_to_locs[dist]
kdtree_locs_vector_map = KDTree(locations_to_fill_abs_phys)
if nr == len(relevant_partner_loc) - 1:
num_src_vectors_per_trg_loc = len(
locations_to_fill_abs_phys
)
distances, ids = kdtree_locs_vector_map.query(
trg_loc_abs_phys, k=num_src_vectors_per_trg_loc
)
try:
len(ids)
except TypeError:
ids = [ids]
for src_voxel_id in ids:
# remove node from list which are taken as neighbors of THIS target location
neighbor_loc_abs_phys = kdtree_locs_vector_map.data[
src_voxel_id
]
locations_to_fill_abs_phys.remove(
neighbor_loc_abs_phys.tolist()
)
# get vector for THIS neighbor to THIS target location, get its location and place it
vector = trg_loc_abs_phys - neighbor_loc_abs_phys
neighbor_loc_shifted_vx = (
neighbor_loc_abs_phys - offset_vector_map_phys
) // voxel_size_vm
for dim in range(dim_vectors):
vector_map_total[dim][
tuple([int(l)] for l in neighbor_loc_shifted_vx)
] = vector[dim]
return vector_map_total
def __get_relevant_partner_locations(self, batch, node, trg_points_key):
# criterions: 'min_distance' or 'all'
# get all partner locations
all_partners_locations = []
for partner_id in node.attrs["partner_ids"]:
if batch.graphs[trg_points_key].contains(partner_id):
all_partners_locations.append(
batch.graphs[trg_points_key].node(partner_id).location
)
# if only one partner location, return this one for any given criterion
if len(all_partners_locations) <= 1:
return all_partners_locations
# return all partner locations
elif self.partner_criterion == "all":
return all_partners_locations
# return partner with minimal euclidean distance to src_location
elif self.partner_criterion == "min_distance":
min_distance, stored_pos = np.inf, []
for partner_loc in all_partners_locations:
distance = np.linalg.norm(partner_loc - node.location)
if distance < min_distance:
min_distance = distance.copy()
stored_pos = partner_loc.copy()
return [stored_pos]
def __get_mask(self, batch, request, vector_map_array_key, src_location):
"""create binary mask encoding where to place vectors for in region around considered src_location"""
voxel_size = self.voxel_sizes[vector_map_array_key]
offset_bm_phys = request[vector_map_array_key].roi.offset
shape_bm_array_vx = request[vector_map_array_key].roi.shape // voxel_size
binary_map = np.zeros(shape_bm_array_vx, dtype="uint8")
if self.array_keys_to_stayinside_array_keys is None:
mask = np.ones_like(binary_map)
else:
stayinside_array_key = self.array_keys_to_stayinside_array_keys[
vector_map_array_key
]
mask = batch.arrays[stayinside_array_key].data
if mask.shape > binary_map.shape:
# assumption: binary map is centered in the mask array
padding = (np.asarray(mask.shape) - np.asarray(binary_map.shape)) / 2.0
slices = [slice(np.floor(pad), -np.ceil(pad)) for pad in padding]
mask = mask[slices]
binary_map_total = np.zeros_like(binary_map)
shifted_loc = src_location - np.asarray(offset_bm_phys)
shifted_loc = shifted_loc.astype(np.int32) // voxel_size
object_id = mask[tuple([loc] for loc in shifted_loc)][
0
] # 0 index, otherwise numpy array with single number
binary_map[tuple([loc] for loc in shifted_loc)] = 1
binary_map = enlarge_binary_map(
binary_map, radius=self.radius_phys, voxel_size=voxel_size
)
binary_map[mask != object_id] = 0
binary_map_total += binary_map
binary_map.fill(0)
binary_map_total[binary_map_total != 0] = 1
return binary_map_total
| 16,212 | 48.279635 | 122 | py |
gunpowder | gunpowder-master/gunpowder/contrib/nodes/add_gt_mask_exclusive_zone.py | import copy
import logging
import numpy as np
from scipy import ndimage
from gunpowder.nodes.batch_filter import BatchFilter
from gunpowder.array import Array, ArrayKeys
from gunpowder.nodes.rasterize_graph import RasterizationSettings
from gunpowder.morphology import enlarge_binary_map
logger = logging.getLogger(__name__)
class AddGtMaskExclusiveZone(BatchFilter):
"""Create ExclusizeZone mask for a binary map in batch and add it as
array to batch.
An ExclusiveZone mask is a bianry mask [0,1] where locations which lie
within a given distance to the ON (=1) regions (surrounding the ON regions)
of the given binary map are set to 0, whereas all the others are set to 1.
Args:
EZ_masks_to_binary_map(dict, :class:``ArrayKey``->:class:``ArrayKey``):
Arrays of exclusive zones (keys of dict) to create for which
binary mask (values of dict).
gaussian_sigma_for_zone(float, optional): Defines extend of exclusive
zone around ON region in binary map. Defaults to 1.
rasterization_setting(:class:``RasterizationSettings``, optional): Which
rasterization setting to use.
"""
def __init__(
self,
EZ_masks_to_binary_map,
gaussian_sigma_for_zone=1,
rasterization_setting=None,
):
self.EZ_masks_to_binary_map = EZ_masks_to_binary_map
self.gaussian_sigma_for_zone = gaussian_sigma_for_zone
if rasterization_setting is None:
self.rasterization_setting = RasterizationSettings()
else:
self.rasterization_setting = rasterization_setting
self.skip_next = False
def setup(self):
self.upstream_spec = self.get_upstream_provider().get_spec()
self.spec = copy.deepcopy(self.upstream_spec)
for EZ_mask_type, binary_map_type in self.EZ_masks_to_binary_map.items():
if binary_map_type in self.upstream_spec.arrays:
self.spec.arrays[EZ_mask_type] = self.spec.arrays[binary_map_type]
def get_spec(self):
return self.spec
def prepare(self, request):
self.EZ_masks_to_create = []
for EZ_mask_type, binary_map_type in self.EZ_masks_to_binary_map.items():
# do nothing if binary mask to create EZ mask around is not requested as well
if EZ_mask_type in request.arrays:
# assert that binary mask for which EZ mask is created for is requested
assert (
binary_map_type in request.arrays
), "ExclusiveZone Mask for {}, can only be created if {} also requested.".format(
EZ_mask_type, binary_map_type
)
# assert that ROI of EZ lies within ROI of binary mask
assert request.arrays[binary_map_type].contains(
request.arrays[EZ_mask_type]
), "EZ mask for {} requested for ROI outside of source's ({}) ROI.".format(
EZ_mask_type, binary_map_type
)
self.EZ_masks_to_create.append(EZ_mask_type)
del request.arrays[EZ_mask_type]
if len(self.EZ_masks_to_create) == 0:
logger.warn("no ExclusiveZone Masks requested, will do nothing")
self.skip_next = True
def process(self, batch, request):
# do nothing if no gt binary maps were requested
if self.skip_next:
self.skip_next = False
return
for EZ_mask_type in self.EZ_masks_to_create:
binary_map_type = self.EZ_masks_to_binary_map[EZ_mask_type]
binary_map = batch.arrays[binary_map_type].data
resolution = batch.arrays[binary_map_type].resolution
EZ_mask = self.__get_exclusivezone_mask(
binary_map,
shape_EZ_mask=request.arrays[EZ_mask_type].get_shape(),
resolution=resolution,
)
batch.arrays[EZ_mask_type] = Array(
data=EZ_mask, roi=request.arrays[EZ_mask_type], resolution=resolution
)
def __get_exclusivezone_mask(self, binary_map, shape_EZ_mask, resolution=None):
"""Exclusive zone surrounds every synapse. Created by enlarging the ON regions of given binary map
with different gaussian filter, make it binary and subtract the original binary map from it
"""
shape_diff = np.asarray(binary_map.shape - np.asarray(shape_EZ_mask))
slices = [
slice(diff, shape - diff)
for diff, shape in zip(shape_diff, binary_map.shape)
]
relevant_binary_map = binary_map[slices]
BM_enlarged_binary = enlarge_binary_map(
relevant_binary_map,
marker_size_voxel=self.rasterization_setting.marker_size_voxel,
voxel_size=resolution,
marker_size_physical=self.rasterization_setting.marker_size_physical,
)
exclusive_zone = np.ones_like(BM_enlarged_binary) - (
BM_enlarged_binary - relevant_binary_map
)
return exclusive_zone
| 5,113 | 38.953125 | 106 | py |
gunpowder | gunpowder-master/gunpowder/contrib/nodes/add_blobs_from_points.py | import logging
import itertools
import numpy as np
from gunpowder.array import Array
from gunpowder.nodes.batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class AddBlobsFromPoints(BatchFilter):
"""Add an array with blobs at locations given by a specified points
collection. The blobs are also restricted to stay within the same class in
the restrictive_mask array that corresponds to the center voxel of the
blob.
Args:
blob_settings(dict):
Where each desired output blob map should have it's own entry
consisting of the following format:
`blob_name` : dict (
'points_key' : Desired point type to use for blob locations
'output_array_key': Desired array type name for output map
'output_array_dtype': Desired array dtype name for output map
'radius': Desired radius of blobs, since blobs are restricted
by the restricting mask, this radius should be thought of as
the maximum radius of the blobs.
'output_voxel_size': voxel_size of output array. Voxel size of
restrictive mask
'restrictive_mask_key': Array type of restrictive mask
'id_mapper': Functor (class with a __call__ function) that can
take an ID and map it to some other value. This class should
also have a 'make_map' method that will be called at the
beggining of each process step and given all ids in all arrays
to be processed for that batch.
)
This is an example blob_setting for presynaptic blobs in the cremi
dataset::
add_blob_data = {
'PRESYN': {
'points_key': PointsTypes.PRESYN,
'output_array_key': ArrayTypes.PRESYN_BLOB,
'output_array_dtype': 'int64',
'radius': 60,
'output_voxel_size': voxel_size,
'restrictive_mask_key': ArrayTypes.GT_LABELS,
'max_desired_overlap': 0.05
}
}
"""
def __init__(self, blob_settings):
self.blob_settings = blob_settings
for points_key, settings in self.blob_settings.items():
blob_settings[points_key]["blob_placer"] = BlobPlacer(
radius=settings["radius"],
voxel_size=settings["output_voxel_size"],
dtype=settings["output_array_dtype"],
)
def setup(self):
for blob_name, settings in self.blob_settings.items():
self.provides(
settings["output_array_key"],
self.spec[settings["restrictive_mask_key"]],
)
def prepare(self, request):
for blob_name, settings in self.blob_settings.items():
array_key = settings["output_array_key"]
if array_key in request:
points_key = settings["points_key"]
request_roi = request[array_key].roi
# If point is not already requested, add to request
if points_key not in request.points_specs:
request.add(points_key, request_roi.shape)
else:
request[points_key].roi = request[points_key].roi.union(request_roi)
# Get correct size for restrictive_mask_key
restrictive_mask_key = settings["restrictive_mask_key"]
if restrictive_mask_key not in request.array_specs:
request.add(restrictive_mask_key, request_roi.shape)
else:
request[restrictive_mask_key].roi = request[
restrictive_mask_key
].roi.union(request_roi)
else:
# do nothing if no blobs of this type were requested
logger.warning(
"%s output array type for %s never requested. \
Deleting entry..."
% (settings["output_array_key"], blob_name)
)
del self.blob_settings[blob_name]
def process(self, batch, request):
# check arrays and gather all IDs and synapse IDs
all_points = {}
all_synapse_ids = {}
for blob_name, settings in self.blob_settings.items():
# Unpack settings
points_key = settings["points_key"]
restrictive_mask_key = settings["restrictive_mask_key"]
# Make sure both the necesary point types and arrays are present
assert points_key in batch.points, (
"Upstream does not provide required point type\
: %s"
% points_key
)
assert restrictive_mask_key in batch.arrays, (
"Upstream does not provide required \
array type: %s"
% restrictive_mask_key
)
# Get point data
points = batch.points[points_key]
# If point doesn't have it's corresponding partner, delete it
if (
"partner_points" in settings.keys()
and settings["partner_points"] is not None
):
partner_points = batch.points[settings["partner_points"]]
synapse_ids = []
for point_id, point in points.data.items():
# pdb.set_trace()
if not point.partner_ids[0] in partner_points.data.keys():
logger.warning(
"Point %s has no partner. Deleting..." % point_id
)
del points.data[point_id]
else:
synapse_ids.append(point.synapse_id)
all_synapse_ids[points_key] = synapse_ids
all_points[points_key] = points
for blob_name, settings in self.blob_settings.items():
# Unpack settings
points_key = settings["points_key"]
array_key = settings["output_array_key"]
voxel_size = settings["output_voxel_size"]
restrictive_mask_key = settings["restrictive_mask_key"]
restrictive_mask = batch.arrays[restrictive_mask_key].crop(
request[array_key].roi
)
id_mapper = settings["id_mapper"]
dtype = settings["output_array_dtype"]
if id_mapper is not None:
id_mapper.make_map(all_points)
# Initialize output array
shape_array = np.asarray(request[array_key].roi.shape) / voxel_size
blob_map = np.zeros(shape_array, dtype=dtype)
# Get point data
points = batch.points[points_key]
offset = np.asarray(points.spec.roi.offset)
for point_id, point_data in points.data.items():
voxel_location = np.round(
((point_data.location - offset) / (voxel_size))
).astype("int32")
synapse_id = point_data.synapse_id
# if mapping exists, do it
if id_mapper is not None:
synapse_id = id_mapper(synapse_id)
settings["blob_placer"].place(
blob_map, voxel_location, synapse_id, restrictive_mask.data
)
# Provide array
batch.arrays[array_key] = Array(blob_map, spec=request[array_key].copy())
batch.arrays[array_key].spec.dtype = dtype
# add id_mapping to attributes
if id_mapper is not None:
id_map_list = np.array(list(id_mapper.get_map().items()))
batch.arrays[array_key].attrs["id_mapping"] = id_map_list
batch.arrays[array_key].attrs["point_ids"] = points.data.keys()
batch.arrays[array_key].attrs["synapse_ids"] = all_synapse_ids[points_key]
# Crop all other requests
for array_key, array in request.array_specs.items():
batch.arrays[array_key] = batch.arrays[array_key].crop(array.roi)
for points_key, points in request.points_specs.items():
batch.points[points_key] = batch.points[points_key].spec.roi = points.roi
class BlobPlacer:
"""Places synapse array blobs from location data.
Args:
radius: int - that desired radius of synaptic blobs
voxel_size: array, list, tuple - voxel size in physical
"""
def __init__(self, radius, voxel_size, dtype="uint64"):
self.voxel_size = voxel_size
if isinstance(self.voxel_size, (list, tuple)):
self.voxel_size = np.asarray(self.voxel_size)
self.radius = radius / self.voxel_size
self.sphere_map = np.zeros(self.radius * 2, dtype=dtype)
self.center = (np.asarray(self.sphere_map.shape)) / 2
ranges = [
range(0, self.radius[0] * 2),
range(0, self.radius[1] * 2),
range(0, self.radius[2] * 2),
]
for index in np.asarray(list(itertools.product(*ranges))):
# if distance less than r, place a 1
if np.linalg.norm((self.center - index) * self.voxel_size) <= radius:
self.sphere_map[tuple(index)] = 1
self.sphere_voxel_array = np.sum(self.sphere_map, axis=(0, 1, 2))
def place(self, matrix, location, marker, mask):
"""Places synapse
Args:
matrix: 4D np array - 1st dim are for layers to avoid overlap
(3 should be more than enough)
location: np array - location where to place synaptic blob within given matrix
marker: int - the ID used to mark this paricular synapse in the matrix
mask: 3D np array - when placing a blob, will sample mask at
center location and only place blob in interection where mask has
the same ID. Usually used to restrict synaptic blobs inside their
respective cells (using segmentation)
"""
# Calculate cube circumscribing the sphere to place
start = location - self.radius
end = location + self.radius
# check if sphere fits in matrix
if np.all(start >= 0) and np.all(np.asarray(matrix.shape) - end >= 0):
# calculate actual synapse shape from intersection between sphere and restrictive mask
restricting_label = mask[location[0], location[1], location[2]]
restricting_mask = (
mask[start[0] : end[0], start[1] : end[1], start[2] : end[2]]
== restricting_label
)
shape = self.sphere_map * restricting_mask
# place shape in chosen layer
matrix[start[0] : end[0], start[1] : end[1], start[2] : end[2]] += (
shape * marker
)
return matrix, True
logger.warning("Location %s out of bounds" % (location))
return matrix, False
| 11,060 | 38.503571 | 98 | py |
gunpowder | gunpowder-master/gunpowder/contrib/nodes/zero_out_const_sections.py | from gunpowder.nodes.batch_filter import BatchFilter
class ZeroOutConstSections(BatchFilter):
"""Every z-section that has constant values only will be set to 0.
This is to handle blank (missing) sections in a less invasive way: Instead
of leaving them at -1 (which is "black", the lowest possible input to the
CNN), 0 ("gray") might be easier to ignore.
For that you should call this filter after you are done with all other
intensity manipulations.
"""
def __init__(self, intensities):
self.intensities = intensities
def process(self, batch, request):
assert batch.get_total_roi().dims == 3, "This filter only works on 3D data."
raw = batch.arrays[self.intensities]
for z in range(
(raw.spec.roi / self.spec[self.intensities].voxel_size).get_shape()[0]
):
if raw.data[z].min() == raw.data[z].max():
raw.data[z] = 0
| 940 | 32.607143 | 84 | py |
gunpowder | gunpowder-master/gunpowder/contrib/nodes/add_boundary_distance_gradients.py | import logging
import numpy as np
from gunpowder.array import Array
from gunpowder.batch_request import BatchRequest
from gunpowder.nodes.batch_filter import BatchFilter
from numpy.lib.stride_tricks import as_strided
from scipy.ndimage.morphology import distance_transform_edt
logger = logging.getLogger(__name__)
class AddBoundaryDistanceGradients(BatchFilter):
"""Add an array with vectors pointing away from the closest boundary.
The vectors are the spacial gradients of the distance transform, i.e., the
distance to the boundary between labels or the background label (0).
Args:
label_array_key(:class:``ArrayKey``): The array to read the labels
from.
gradient_array_key(:class:``ArrayKey``): The array to generate
containing the gradients.
distance_array_key(:class:``ArrayKey``, optional): The array to
generate containing the values of the distance transform.
boundary_array_key(:class:``ArrayKey``, optional): The array to
generate containing a boundary labeling. Note this array will be
doubled as it encodes boundaries between voxels.
normalize(string, optional): ``None``, ``'l1'``, or ``'l2'``. Specifies
if and how to normalize the gradients.
scale(string, optional): ``None`` or ``exp``. If ``exp``, distance
gradients will be scaled by ``beta*e**(-d*alpha)``, where ``d`` is
the distance to the boundary.
scale_args(tuple, optional): For ``exp`` a tuple with the values of
``alpha`` and ``beta``.
"""
def __init__(
self,
label_array_key,
gradient_array_key,
distance_array_key=None,
boundary_array_key=None,
normalize=None,
scale=None,
scale_args=None,
):
self.label_array_key = label_array_key
self.gradient_array_key = gradient_array_key
self.distance_array_key = distance_array_key
self.boundary_array_key = boundary_array_key
self.normalize = normalize
self.scale = scale
self.scale_args = scale_args
def setup(self):
assert self.label_array_key in self.spec, (
"Upstream does not provide %s needed by "
"AddBoundaryDistanceGradients" % self.label_array_key
)
spec = self.spec[self.label_array_key].copy()
spec.dtype = np.float32
self.provides(self.gradient_array_key, spec)
if self.distance_array_key is not None:
self.provides(self.distance_array_key, spec)
if self.boundary_array_key is not None:
spec.voxel_size /= 2
self.provides(self.boundary_array_key, spec)
self.enable_autoskip()
def prepare(self, request):
deps = BatchRequest()
deps[self.label_array_key] = request[self.gradient_array_key]
return deps
def process(self, batch, request):
if not self.gradient_array_key in request:
return
labels = batch.arrays[self.label_array_key].data
voxel_size = self.spec[self.label_array_key].voxel_size
# get boundaries between label regions
boundaries = self.__find_boundaries(labels)
# mark boundaries with 0 (not 1)
boundaries = 1.0 - boundaries
if np.sum(boundaries == 0) == 0:
# no boundary -- no distance to compute
distances = np.zeros(labels.shape, dtype=np.float32)
else:
# get distances (voxel_size/2 because image is doubled)
distances = distance_transform_edt(
boundaries, sampling=tuple(float(v) / 2 for v in voxel_size)
)
distances = distances.astype(np.float32)
# restore original shape
downsample = (slice(None, None, 2),) * len(voxel_size)
distances = distances[downsample]
# set distances in background to 0
distances[labels == 0] = 0
gradients = np.asarray(np.gradient(distances, *voxel_size))
# set gradients on background voxels to 0
for d in range(len(voxel_size)):
gradients[d, labels == 0] = 0
if self.normalize is not None:
self.__normalize(gradients, self.normalize)
if self.scale is not None:
self.__scale(gradients, distances, self.scale, self.scale_args)
spec = self.spec[self.gradient_array_key].copy()
spec.roi = request[self.gradient_array_key].roi
batch.arrays[self.gradient_array_key] = Array(gradients, spec)
if self.distance_array_key is not None and self.distance_array_key in request:
batch.arrays[self.distance_array_key] = Array(distances, spec)
if self.boundary_array_key is not None and self.boundary_array_key in request:
# add one more face at each dimension, as boundary map has shape
# 2*s - 1 of original shape s
grown = np.ones(tuple(s + 1 for s in boundaries.shape))
grown[tuple(slice(0, s) for s in boundaries.shape)] = boundaries
spec.voxel_size = voxel_size / 2
logger.debug("voxel size of boundary array: %s", spec.voxel_size)
batch.arrays[self.boundary_array_key] = Array(grown, spec)
def __find_boundaries(self, labels):
# labels: 1 1 1 1 0 0 2 2 2 2 3 3 n
# shift : 1 1 1 1 0 0 2 2 2 2 3 n - 1
# diff : 0 0 0 1 0 1 0 0 0 1 0 n - 1
# bound.: 00000001000100000001000 2n - 1
logger.debug("computing boundaries for %s", labels.shape)
dims = len(labels.shape)
in_shape = labels.shape
out_shape = tuple(2 * s - 1 for s in in_shape)
out_slices = tuple(slice(0, s) for s in out_shape)
boundaries = np.zeros(out_shape, dtype=bool)
logger.debug("boundaries shape is %s", boundaries.shape)
for d in range(dims):
logger.debug("processing dimension %d", d)
shift_p = [slice(None)] * dims
shift_p[d] = slice(1, in_shape[d])
shift_n = [slice(None)] * dims
shift_n[d] = slice(0, in_shape[d] - 1)
diff = (labels[tuple(shift_p)] - labels[tuple(shift_n)]) != 0
logger.debug("diff shape is %s", diff.shape)
target = [slice(None, None, 2)] * dims
target[d] = slice(1, out_shape[d], 2)
logger.debug("target slices are %s", target)
boundaries[tuple(target)] = diff
return boundaries
def __normalize(self, gradients, norm):
dims = gradients.shape[0]
if norm == "l1":
factors = sum([np.abs(gradients[d]) for d in range(dims)])
elif norm == "l2":
factors = np.sqrt(sum([np.square(gradients[d]) for d in range(dims)]))
else:
raise RuntimeError("norm %s not supported" % norm)
factors[factors < 1e-5] = 1
gradients /= factors
def __scale(self, gradients, distances, scale, scale_args):
dims = gradients.shape[0]
if scale == "exp":
alpha, beta = self.scale_args
factors = np.exp(-distances * alpha) * beta
gradients *= factors
| 7,250 | 34.544118 | 86 | py |
gunpowder | gunpowder-master/gunpowder/contrib/nodes/add_nonsymmetric_affinities.py | import copy
import logging
import numpy as np
import pdb
from gunpowder.array import Array
from gunpowder.nodes.batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class AddNonsymmetricAffinities(BatchFilter):
def __init__(
self,
affinity_vectors,
array_key_1,
array_key_2,
affinity_array_key_1,
affinity_array_key_2,
):
self.array_key_1 = array_key_1
self.array_key_2 = array_key_2
self.affinity_array_key_1 = affinity_array_key_1
self.affinity_array_key_2 = affinity_array_key_2
self.affinity_vectors = affinity_vectors
def setup(self):
assert self.array_key_1 in self.spec, (
"Upstream does not provide %s needed by \
AddNonsymmetricAffinities"
% self.array_key_1
)
assert self.array_key_2 in self.spec, (
"Upstream does not provide %s needed by \
AddNonsymmetricAffinities"
% self.array_key_2
)
voxel_size = self.spec[self.array_key_1].voxel_size
self.upstream_spec = self.get_upstream_provider().spec
self.upstream_roi = self.upstream_spec.get_total_roi()
# get maximum offset in each dimension
self.padding = np.max(np.abs(self.affinity_vectors), axis=0)
self.padding = tuple(round_to_voxel_size(self.padding, voxel_size))
logger.debug("padding neg: %s" % np.asarray(self.padding))
spec = self.spec[self.array_key_1].copy()
# if spec.roi is not None:
self.provides(self.affinity_array_key_1, spec)
self.provides(self.affinity_array_key_2, spec)
self.enable_autoskip()
def prepare(self, request):
array_1_roi = request[self.array_key_1].roi
logger.debug("downstream %s request: " % self.array_key_1 + str(array_1_roi))
array_2_roi = request[self.array_key_2].roi
logger.debug("downstream %s request: " % self.array_key_2 + str(array_2_roi))
# grow labels ROI to accomodate padding TODO: vol 2
array_1_roi = array_1_roi.grow(self.padding, self.padding)
array_2_roi = array_2_roi.grow(self.padding, self.padding)
request[self.array_key_1].roi = array_1_roi
request[self.array_key_2].roi = array_2_roi
logger.debug("upstream %s request: " % self.array_key_1 + str(array_1_roi))
logger.debug("upstream %s request: " % self.array_key_2 + str(array_2_roi))
# pdb.set_trace()
def process(self, batch, request):
full_vol1 = batch.arrays[self.array_key_1]
full_vol2 = batch.arrays[self.array_key_2]
# Both full_vol1 should match
assert (
full_vol1.spec.dtype == full_vol2.spec.dtype
), "data type of array 1(%s) and array 2(%s) should match" % (
full_vol1.spec.dtype,
full_vol2.spec.dtype,
)
assert (
full_vol1.spec.voxel_size == full_vol2.spec.voxel_size
), "data type of array 1(%s) and array 2(%s) should match" % (
full_vol1.spec.voxel_size,
full_vol2.spec.voxel_size,
)
logger.debug("computing ground-truth affinities from labels")
# Calculate affinities 1: from vol2 onto vol1
# Initialize affinity map
request_vol = request[self.affinity_array_key_1]
affinity_map = np.zeros(
(len(self.affinity_vectors),)
+ tuple(request_vol.roi.shape / request_vol.voxel_size),
dtype=full_vol1.spec.dtype,
)
# calculate affinities
vol1 = full_vol1.crop(request_vol.roi)
for i, vector in enumerate(self.affinity_vectors):
vol2 = full_vol2.crop(request_vol.roi.shift(tuple(-vector)))
affinity_map[i, :, :, :] = np.bitwise_and(vol1.data, vol2.data)
batch.arrays[self.affinity_array_key_1] = Array(
affinity_map, spec=request[self.affinity_array_key_1].copy()
)
batch.arrays[self.affinity_array_key_1].attrs[
"affinity_vectors"
] = self.affinity_vectors
# Calculate affinities 2: from vol1 onto vol2
# Initialize affinity map
request_vol = request[self.affinity_array_key_2]
affinity_map = np.zeros(
(len(self.affinity_vectors),)
+ tuple(request_vol.roi.shape / request_vol.voxel_size),
dtype=full_vol1.spec.dtype,
)
# calculate affinities
vol2 = full_vol2.crop(request_vol.roi)
for i, vector in enumerate(self.affinity_vectors):
vol1 = full_vol1.crop(request_vol.roi.shift(tuple(vector)))
affinity_map[i, :, :, :] = np.bitwise_and(vol1.data, vol2.data)
batch.arrays[self.affinity_array_key_2] = Array(
affinity_map, spec=request[self.affinity_array_key_2].copy()
)
batch.arrays[self.affinity_array_key_2].attrs[
"affinity_vectors"
] = self.affinity_vectors
# Crop all other requests
for array_key, array in request.array_specs.items():
batch.arrays[array_key] = batch.arrays[array_key].crop(array.roi)
for points_key, points in request.points_specs.items():
recropped = batch.points[points_key].spec.roi = points.roi
batch.points[points_key] = recropped
def round_to_voxel_size(shape, voxel_size):
voxel_size = np.asarray(voxel_size, dtype=float)
shape = np.ceil(shape / voxel_size) * voxel_size
return np.array(shape, dtype="int32")
| 5,551 | 34.139241 | 85 | py |
gunpowder | gunpowder-master/gunpowder/contrib/nodes/hdf5_points_source.py | import copy
import logging
import numpy as np
from gunpowder.batch import Batch
from gunpowder.coordinate import Coordinate
from gunpowder.ext import h5py
from gunpowder.graph import GraphKey, Graph
from gunpowder.graph_spec import GraphSpec
from gunpowder.profiling import Timing
from gunpowder.roi import Roi
from gunpowder.nodes.batch_provider import BatchProvider
logger = logging.getLogger(__name__)
class Hdf5PointsSource(BatchProvider):
"""An HDF5 data source for :class:``Graph``. Currently only supports a
specific case where graphs represent pre- and post-synaptic markers.
Args:
filename (string): The HDF5 file.
datasets (dict): Dictionary of :class:``GraphKey`` -> dataset names
that this source offers.
rois (dict): Dictionary of :class:``GraphKey`` -> :class:``Roi`` to
set the ROI for each point set provided by this source.
"""
def __init__(self, filename, datasets, rois):
self.filename = filename
self.datasets = datasets
self.rois = rois
self.ndims = None
def setup(self):
hdf_file = h5py.File(self.filename, "r")
for points_key, ds_name in self.datasets.items():
if ds_name not in hdf_file:
raise RuntimeError("%s not in %s" % (ds_name, self.filename))
spec = PointsSpec()
spec.roi = self.rois[points_key]
self.provides(points_key, spec)
hdf_file.close()
def provide(self, request):
timing = Timing(self)
timing.start()
batch = Batch()
with h5py.File(self.filename, "r") as hdf_file:
# if pre and postsynaptic locations required, their id
# SynapseLocation dictionaries should be created together s.t. ids
# are unique and allow to find partner locations
if (
PointsKeys.PRESYN in request.points_specs
or PointsKeys.POSTSYN in request.points_specs
):
assert (
request.points_specs[PointsKeys.PRESYN].roi
== request.points_specs[PointsKeys.POSTSYN].roi
)
# Cremi specific, ROI offset corresponds to offset present in the
# synapse location relative to the raw data.
dataset_offset = self.spec[PointsKeys.PRESYN].roi.offset
presyn_points, postsyn_points = self.__get_syn_points(
roi=request.points_specs[PointsKeys.PRESYN].roi,
syn_file=hdf_file,
dataset_offset=dataset_offset,
)
for points_key, request_spec in request.points_specs.items():
logger.debug("Reading %s in %s...", points_key, request_spec.roi)
id_to_point = {
PointsKeys.PRESYN: presyn_points,
PointsKeys.POSTSYN: postsyn_points,
}[points_key]
points_spec = self.spec[points_key].copy()
points_spec.roi = request_spec.roi
batch.points[points_key] = Points(data=id_to_point, spec=points_spec)
logger.debug("done")
timing.stop()
batch.profiling_stats.add(timing)
return batch
def __get_syn_points(self, roi, syn_file, dataset_offset=None):
presyn_points_dict, postsyn_points_dict = {}, {}
presyn_node_ids = syn_file["annotations/presynaptic_site/partners"][
:, 0
].tolist()
postsyn_node_ids = syn_file["annotations/presynaptic_site/partners"][
:, 1
].tolist()
for node_nr, node_id in enumerate(syn_file["annotations/ids"]):
location = syn_file["annotations/locations"][node_nr]
if dataset_offset is not None:
logging.debug(
"adding global offset to points %i %i %i"
% (dataset_offset[0], dataset_offset[1], dataset_offset[2])
)
location += dataset_offset
# cremi synapse locations are in physical space
if roi.contains(Coordinate(location)):
if node_id in presyn_node_ids:
kind = "PreSyn"
assert syn_file["annotations/types"][node_nr] == "presynaptic_site"
syn_id = int(np.where(presyn_node_ids == node_id)[0])
partner_node_id = postsyn_node_ids[syn_id]
elif node_id in postsyn_node_ids:
kind = "PostSyn"
assert syn_file["annotations/types"][node_nr] == "postsynaptic_site"
syn_id = int(np.where(postsyn_node_ids == node_id)[0])
partner_node_id = presyn_node_ids[syn_id]
else:
raise Exception("Node id neither pre- no post-synaptic")
partners_ids = [int(partner_node_id)]
location_id = int(node_id)
props = {}
if node_id in syn_file["annotations/comments/target_ids"]:
props = {"unsure": True}
# create synpaseLocation & add to dict
if kind == "PreSyn":
syn_point = PreSynPoint(
location=location,
location_id=location_id,
synapse_id=syn_id,
partner_ids=partners_ids,
props=props,
)
presyn_points_dict[int(node_id)] = copy.deepcopy(syn_point)
elif kind == "PostSyn":
syn_point = PostSynPoint(
location=location,
location_id=location_id,
synapse_id=syn_id,
partner_ids=partners_ids,
props=props,
)
postsyn_points_dict[int(node_id)] = copy.deepcopy(syn_point)
return presyn_points_dict, postsyn_points_dict
def __repr__(self):
return self.filename
| 6,123 | 36.570552 | 88 | py |
gunpowder | gunpowder-master/gunpowder/contrib/nodes/prepare_malis.py | import copy
import logging
import numpy as np
from gunpowder.array import Array
from gunpowder.batch_request import BatchRequest
from gunpowder.nodes.batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class PrepareMalis(BatchFilter):
"""Creates a component label array needed for two-phase malis training.
Args:
labels_array_key(:class:`ArrayKey`): The label array to use.
malis_comp_array_key(:class:`ArrayKey`): The malis component array
to generate.
ignore_array_key(:class:`ArrayKey`, optional): An ignore mask to
use.
"""
def __init__(self, labels_array_key, malis_comp_array_key, ignore_array_key=None):
self.labels_array_key = labels_array_key
self.malis_comp_array_key = malis_comp_array_key
self.ignore_array_key = ignore_array_key
def setup(self):
spec = self.spec[self.labels_array_key].copy()
self.provides(self.malis_comp_array_key, spec)
self.enable_autoskip()
def prepare(self, request):
deps = BatchRequest()
deps[self.labels_array_key] = copy.deepcopy(request[self.labels_array_key])
if self.ignore_array_key is not None:
deps[self.ignore_array_key] = copy.deepcopy(request[self.labels_array_key])
return deps
def process(self, batch, request):
gt_labels = batch.arrays[self.labels_array_key]
next_id = gt_labels.data.max() + 1
gt_pos_pass = gt_labels.data
if self.ignore_array_key is not None:
gt_neg_pass = np.array(gt_labels.data)
gt_neg_pass[batch.arrays[self.ignore_array_key].data == 0] = next_id
else:
gt_neg_pass = gt_pos_pass
spec = self.spec[self.malis_comp_array_key].copy()
spec.roi = request[self.labels_array_key].roi
batch.arrays[self.malis_comp_array_key] = Array(
np.array([gt_neg_pass, gt_pos_pass]), spec
)
# Why don't we update gt_affinities in the same way?
# -> not needed
#
# GT affinities are all 0 in the masked area (because masked area is
# assumed to be set to background in batch.gt).
#
# In the negative pass:
#
# We set all affinities inside GT regions to 1. Affinities in masked
# area as predicted. Belongs to one forground region (introduced
# above). But we only count loss on edges connecting different labels
# -> loss in masked-out area only from outside regions.
#
# In the positive pass:
#
# We set all affinities outside GT regions to 0 -> no loss in masked
# out area.
| 2,699 | 33.177215 | 87 | py |
gunpowder | gunpowder-master/gunpowder/contrib/nodes/__init__.py | from __future__ import absolute_import
from .add_blobs_from_points import AddBlobsFromPoints
from .add_boundary_distance_gradients import AddBoundaryDistanceGradients
from .add_gt_mask_exclusive_zone import AddGtMaskExclusiveZone
from .add_nonsymmetric_affinities import AddNonsymmetricAffinities
from .add_vector_map import AddVectorMap
from .dvid_partner_annotation_source import DvidPartnerAnnotationSource
from .hdf5_points_source import Hdf5PointsSource
from .prepare_malis import PrepareMalis
from .zero_out_const_sections import ZeroOutConstSections
| 558 | 45.583333 | 73 | py |
gunpowder | gunpowder-master/gunpowder/contrib/nodes/dvid_partner_annotation_source.py | import distutils.util
import numpy as np
import logging
import requests
from copy import deepcopy
from gunpowder.batch import Batch
from gunpowder.nodes.batch_provider import BatchProvider
from gunpowder.graph import GraphKey, Graph
from gunpowder.graph_spec import GraphSpec
from gunpowder.profiling import Timing
from gunpowder.graph import Node
logger = logging.getLogger(__name__)
class DvidPartnerAnnoationSourceReadException(Exception):
pass
class MaskNotProvidedException(Exception):
pass
# TODO: This seems broken. There is code involving a voxel size, but points
# don't have voxel sizes
class DvidPartnerAnnotationSource(BatchProvider):
"""
:param hostname: hostname for DVID server
:type hostname: str
:param port: port for DVID server
:type port: int
:param uuid: UUID of node on DVID server
:type uuid: str
:param datasets: dict {GraphKey: DVID data instance}
"""
def __init__(self, hostname, port, uuid, datasets=None, rois=None):
self.hostname = hostname
self.port = port
self.url = "http://{}:{}".format(self.hostname, self.port)
self.uuid = uuid
self.datasets = datasets if datasets is not None else {}
self.rois = rois if rois is not None else {}
self.node_service = None
self.dims = 0
def setup(self):
for points_key, points_name in self.datasets.items():
self.provides(points_key, GraphSpec(roi=self.points_rois[points_key]))
logger.info("DvidPartnerAnnotationSource.spec:\n{}".format(self.spec))
def provide(self, request):
timing = Timing(self)
timing.start()
batch = Batch()
# if pre and postsynaptic locations requested, their id : SynapseLocation dictionaries should be created
# together s.t. the ids are unique and allow to find partner locations
if GraphKey.PRESYN in request.points or GraphKey.POSTSYN in request.points:
try: # either both have the same roi, or only one of them is requested
assert (
request.points[GraphKey.PRESYN] == request.points[GraphKey.POSTSYN]
)
except AssertionError:
assert (
GraphKey.PRESYN not in request.points
or GraphKey.POSTSYN not in request.points
)
if GraphKey.PRESYN in request.points:
presyn_points, postsyn_points = self.__read_syn_points(
roi=request.points[GraphKey.PRESYN]
)
elif GraphKey.POSTSYN in request.points:
presyn_points, postsyn_points = self.__read_syn_points(
roi=request.points[GraphKey.POSTSYN]
)
for points_key, roi in request.points.items():
# check if requested points can be provided
if points_key not in self.spec:
raise RuntimeError(
"Asked for %s which this source does not provide" % points_key
)
# check if request roi lies within provided roi
if not self.spec[points_key].roi.contains(roi):
raise RuntimeError(
"%s's ROI %s outside of my ROI %s"
% (points_key, roi, self.spec[points_key].roi)
)
logger.debug("Reading %s in %s..." % (points_key, roi))
id_to_point = {
GraphKey.PRESYN: presyn_points,
GraphKey.POSTSYN: postsyn_points,
}[points_key]
batch.points[points_key] = Graph(data=id_to_point, spec=GraphSpec(roi=roi))
logger.debug("done")
timing.stop()
batch.profiling_stats.add(timing)
return batch
def __load_json_annotations(
self, array_shape_voxel, array_offset_voxel, array_name
):
url = (
"http://"
+ str(self.hostname)
+ ":"
+ str(self.port)
+ "/api/node/"
+ str(self.uuid)
+ "/"
+ str(array_name)
+ "/elements/{}_{}_{}/{}_{}_{}".format(
array_shape_voxel[2],
array_shape_voxel[1],
array_shape_voxel[0],
array_offset_voxel[2],
array_offset_voxel[1],
array_offset_voxel[0],
)
)
annotations_file = requests.get(url)
json_annotations = annotations_file.json()
if json_annotations is None:
json_annotations = [] # create empty_dummy_json_annotations
# raise Exception ('No synapses found in region defined by array_offset {} and array_shape {}'.format(array_offset, array_shape))
return json_annotations
def __read_syn_points(self, roi):
"""read json file from dvid source, in json format to create for every location given"""
if GraphKey.PRESYN in self.points_voxel_size:
voxel_size = self.points_voxel_size[GraphKey.PRESYN]
elif GraphKey.POSTSYN in self.points_voxel_size:
voxel_size = self.points_voxel_size[GraphKey.POSTSYN]
syn_file_json = self.__load_json_annotations(
array_shape_voxel=roi.shape // voxel_size,
array_offset_voxel=roi.offset // voxel_size,
array_name=self.datasets[GraphKey.PRESYN],
)
presyn_points_dict, postsyn_points_dict = {}, {}
location_to_location_id_dict, location_id_to_partner_locations = {}, {}
for node_nr, node in enumerate(syn_file_json):
# collect information
kind = str(node["Kind"])
location = (
np.asarray((node["Pos"][2], node["Pos"][1], node["Pos"][0]))
* voxel_size
)
location_id = int(node_nr)
# some synapses are wrongly annotated in dvid source, have 'Tag': null ???, they are skipped
try:
syn_id = int(node["Tags"][0][3:])
except:
continue
location_to_location_id_dict[str(location)] = location_id
partner_locations = []
try:
for relation in node["Rels"]:
partner_locations.append(
(
np.asarray(
[
relation["To"][2],
relation["To"][1],
relation["To"][0],
]
)
)
* voxel_size
)
except:
partner_locations = []
location_id_to_partner_locations[int(node_nr)] = partner_locations
# check if property given, not always given
props = {}
if "conf" in node["Prop"]:
props["conf"] = float(node["Prop"]["conf"])
if "agent" in node["Prop"]:
props["agent"] = str(node["Prop"]["agent"])
if "flagged" in node["Prop"]:
str_value_flagged = str(node["Prop"]["flagged"])
props["flagged"] = bool(distutils.util.strtobool(str_value_flagged))
if "multi" in node["Prop"]:
str_value_multi = str(node["Prop"]["multi"])
props["multi"] = bool(distutils.util.strtobool(str_value_multi))
# create synPoint with information collected so far (partner_ids not completed yet)
if kind == "PreSyn":
syn_point = Node(
location=location,
location_id=location_id,
synapse_id=syn_id,
partner_ids=[],
props=props,
)
presyn_points_dict[int(node_nr)] = deepcopy(syn_point)
elif kind == "PostSyn":
syn_(
location=location,
location_id=location_id,
synapse_id=syn_id,
partner_ids=[],
props=props,
)
postsyn_points_dict[int(node_nr)] = deepcopy(syn_point)
# add partner ids
last_node_nr = len(syn_file_json) - 1
for current_syn_point_id in location_id_to_partner_locations.keys():
all_partner_ids = []
for partner_loc in location_id_to_partner_locations[current_syn_point_id]:
if location_to_location_id_dict.has_key(str(partner_loc)):
all_partner_ids.append(
int(location_to_location_id_dict[str(partner_loc)])
)
else:
last_node_nr = last_node_nr + 1
assert not location_to_location_id_dict.has_key(str(partner_loc))
all_partner_ids.append(int(last_node_nr))
if current_syn_point_id in presyn_points_dict:
presyn_points_dict[current_syn_point_id].partner_ids = all_partner_ids
elif current_syn_point_id in postsyn_points_dict:
postsyn_points_dict[current_syn_point_id].partner_ids = all_partner_ids
else:
raise Exception("current syn_point id not found in any dictionary")
return presyn_points_dict, postsyn_points_dict
def __repr__(self):
return "DvidPartnerAnnoationSource(hostname={}, port={}, uuid={}, raw_array_name={}, gt_array_name={}".format(
self.hostname,
self.port,
self.uuid,
self.array_names[ArrayKeys.RAW],
self.array_names[ArrayKeys.GT_LABELS],
)
| 9,773 | 37.031128 | 141 | py |
gunpowder | gunpowder-master/gunpowder/ext/zarr_file.py | from collections.abc import MutableMapping
from typing import Union
import zarr
from zarr._storage.store import BaseStore
class ZarrFile:
"""To be used as a context manager, similar to h5py.File."""
def __init__(self, store: Union[BaseStore, MutableMapping, str], mode="a"):
self.store = store
self.mode = mode
def __enter__(self):
return zarr.open(self.store, mode=self.mode)
def __exit__(self, *args):
pass
| 463 | 22.2 | 79 | py |
gunpowder | gunpowder-master/gunpowder/ext/__init__.py | from __future__ import print_function
import logging
import traceback
import sys
logger = logging.getLogger(__name__)
class NoSuchModule(object):
def __init__(self, name):
self.__name = name
self.__traceback_str = traceback.format_tb(sys.exc_info()[2])
errtype, value = sys.exc_info()[:2]
self.__exception = errtype(value)
def __getattr__(self, item):
raise self.__exception
try:
import dvision
except ImportError as e:
dvision = NoSuchModule("dvision")
try:
import h5py
except ImportError as e:
h5py = NoSuchModule("h5py")
try:
import pyklb
except ImportError as e:
pyklb = NoSuchModule("pyklb")
try:
import tensorflow
except ImportError as e:
tensorflow = NoSuchModule("tensorflow")
try:
import torch
except ImportError as e:
torch = NoSuchModule("torch")
try:
import tensorboardX
except ImportError as e:
tensorboardX = NoSuchModule("tensorboardX")
try:
import malis
except ImportError as e:
malis = NoSuchModule("malis")
try:
import augment
except ImportError as e:
augment = NoSuchModule("augment")
try:
import zarr
from .zarr_file import ZarrFile
except ImportError as e:
zarr = NoSuchModule("zarr")
ZarrFile = None
try:
import daisy
except ImportError as e:
daisy = NoSuchModule("daisy")
try:
import jax
except ImportError as e:
jax = NoSuchModule("jax")
try:
import jax.numpy as jnp
except ImportError as e:
jnp = NoSuchModule("jnp")
try:
import haiku
except ImportError as e:
haiku = NoSuchModule("haiku")
try:
import optax
except ImportError as e:
optax = NoSuchModule("optax")
| 1,676 | 17.228261 | 69 | py |
gunpowder | gunpowder-master/gunpowder/tensorflow/local_server.py | import logging
import multiprocessing
import ctypes
from gunpowder.ext import tensorflow as tf
from gunpowder.freezable import Freezable
logger = logging.getLogger(__name__)
class LocalServer(Freezable):
"""Wrapper around ``tf.train.Server`` to create a local server on-demand.
This class is necessary because tensorflow's GPU support should not be
initialized before forking processes (the CUDA driver needs to be
initialized in each process separately, not in the main process and then
forked). Creating a ``tf.train.Server`` initializes GPU support, however.
With this wrapper, server creating can be delayed until a GPU process
creates a ``tf.Session``::
session = tf.Session(target=LocalServer.get_target())
"""
__target = multiprocessing.Array(ctypes.c_char, b" " * 256)
__server = None
@staticmethod
def get_target():
"""Get the target string of this tensorflow server to connect a
``tf.Session()``. This will start the server, if it is not running
already.
"""
with LocalServer.__target.get_lock():
target = LocalServer.__target.value
if target == b" " * 256:
logger.info("Creating local tensorflow server")
LocalServer.__server = tf.train.Server.create_local_server()
target = LocalServer.__server.target
if not isinstance(target, bytes):
target = target.encode("ascii")
logger.info("Server running at %s", target)
else:
logger.info("Server already running at %s", target)
LocalServer.__target.value = target
return target
| 1,711 | 33.938776 | 77 | py |
gunpowder | gunpowder-master/gunpowder/tensorflow/__init__.py | from __future__ import absolute_import
from .nodes import *
from .local_server import LocalServer
| 99 | 19 | 38 | py |
gunpowder | gunpowder-master/gunpowder/tensorflow/nodes/__init__.py | from __future__ import absolute_import
from .train import Train
from .predict import Predict
| 94 | 18 | 38 | py |
gunpowder | gunpowder-master/gunpowder/tensorflow/nodes/predict.py | import ctypes
import logging
import multiprocessing as mp
import numpy as np
from functools import reduce
from gunpowder.array import ArrayKey, Array
from gunpowder.ext import tensorflow as tf
from gunpowder.nodes.generic_predict import GenericPredict
from gunpowder.tensorflow.local_server import LocalServer
from operator import mul
logger = logging.getLogger(__name__)
class Predict(GenericPredict):
"""Tensorflow implementation of :class:`gunpowder.nodes.Predict`.
Args:
checkpoint (``string``):
Basename of a tensorflow checkpoint storing the tensorflow graph
and associated tensor values and metadata, as created by
:class:`gunpowder.nodes.Train`, for example.
inputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of input tensors in the network to
array keys.
outputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of output tensors in the network to array
keys. New arrays will be generated by this node for each entry (if
requested downstream).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (``outputs``). This is
useful to set the ``voxel_size``, for example, if they differ from
the voxel size of the input arrays. Only fields that are not
``None`` in the given :class:`ArraySpec` will be used.
graph: (``string``, optional):
An optional path to a tensorflow computation graph that should be
used for prediction. The checkpoint is used to restore the values
of matching variable names in the graph. Note that the graph
specified here can differ from the one associated to the
checkpoint.
skip_empty (``bool``, optional):
Skip prediction, if all inputs are empty (contain only 0). In this
case, outputs are simply set to 0.
max_shared_memory (``int``, optional):
The maximal amount of shared memory in bytes to allocate to send
batches to the GPU processes. Defaults to 1GB.
"""
def __init__(
self,
checkpoint,
inputs,
outputs,
array_specs=None,
graph=None,
skip_empty=False,
max_shared_memory=1024 * 1024 * 1024,
):
super(Predict, self).__init__(inputs, outputs, array_specs)
self.checkpoint = checkpoint
self.meta_graph = graph
self.session = None
self.graph = None
self.skip_empty = skip_empty
self.manager = mp.Manager()
self.max_shared_memory = max_shared_memory
self.shared_input_array_config = self.manager.dict()
self.shared_output_array_config = self.manager.dict()
self.shared_input_arrays = {}
self.shared_output_arrays = {}
self.shared_input_memory = mp.RawArray(ctypes.c_float, self.max_shared_memory)
self.shared_output_memory = mp.RawArray(ctypes.c_float, self.max_shared_memory)
self.send_lock = mp.Lock()
self.receive_lock = mp.Lock()
self.predict_process_initialized = mp.Event()
self.worker_sent_inputs = mp.Event()
self.predict_received_inputs = mp.Event()
self.predict_sent_outputs = mp.Event()
self.predict_process = mp.Process(target=self.__predict)
self.predict_process_crashed = mp.Value("i", False)
self.predict_process.start()
self.predict_process_initialized.wait()
def predict(self, batch, request):
if not self.shared_output_arrays:
self.__init_shared_output_arrays()
if self.skip_empty:
can_skip = True
for array_key in self.inputs.values():
if batch[array_key].data.any():
can_skip = False
break
if can_skip:
logger.info("Skipping batch %i (all inputs are 0)" % batch.id)
for name, array_key in self.outputs.items():
shape = self.shared_output_arrays[name].shape
dtype = self.shared_output_arrays[name].dtype
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi.copy()
batch.arrays[array_key] = Array(np.zeros(shape, dtype=dtype), spec)
return
logger.debug("predicting in batch %i", batch.id)
output_tensors = self.__collect_outputs(request)
input_data = self.__collect_provided_inputs(batch)
self.send_lock.acquire()
if not self.shared_input_arrays:
if not self.shared_input_array_config:
self.__create_shared_input_array_config(batch, request)
self.__init_shared_input_arrays()
self.__write_inputs_to_shared(input_data)
self.worker_sent_inputs.set()
self.receive_lock.acquire()
self.predict_received_inputs.wait()
self.__check_background_process([self.receive_lock, self.send_lock])
self.predict_received_inputs.clear()
self.send_lock.release()
self.predict_sent_outputs.wait()
self.predict_sent_outputs.clear()
output_data = self.__read_outputs_from_shared(output_tensors)
self.receive_lock.release()
for array_key in output_tensors:
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(output_data[array_key], spec)
logger.debug("predicted in batch %i", batch.id)
def __predict(self):
"""The background predict process."""
try:
# TODO: is the server still needed?
target = LocalServer.get_target()
logger.info("Initializing tf session, connecting to %s...", target)
self.graph = tf.Graph()
self.session = tf.Session(target=target, graph=self.graph)
with self.graph.as_default():
self.__read_checkpoint()
if not self.shared_output_arrays:
if not self.shared_output_array_config:
self.__create_shared_output_array_config()
self.__init_shared_output_arrays()
# from now on it is save to access the shared array configuration
self.predict_process_initialized.set()
# loop predict
while True:
# wait for inputs
self.worker_sent_inputs.wait()
self.worker_sent_inputs.clear()
if not self.shared_input_arrays:
self.__init_shared_input_arrays()
# read inputs
input_data = self.__read_inputs_from_shared()
self.predict_received_inputs.set()
# compute outputs
output_data = self.session.run(
{t: t for t in self.outputs.keys()}, feed_dict=input_data
)
# write outputs
self.__write_outputs_to_shared(output_data)
self.predict_sent_outputs.set()
except Exception as e:
self.predict_process_crashed.value = True
# release locks and events
self.predict_process_initialized.set()
self.worker_sent_inputs.clear()
self.predict_received_inputs.set()
self.predict_sent_outputs.set()
raise e
def teardown(self):
self.predict_process.terminate()
self.predict_process.join()
def __check_background_process(self, locks=[]):
if self.predict_process_crashed.value:
# release all locks before raising exception
for l in locks:
l.release()
raise RuntimeError("Background process died.")
def __read_checkpoint(self):
# read the graph associated to the checkpoint
if self.meta_graph is None:
meta_graph_file = self.checkpoint + ".meta"
# read alternative, custom graph
else:
meta_graph_file = self.meta_graph
logger.info(
"Reading graph from %s and weights from %s...",
meta_graph_file,
self.checkpoint,
)
saver = tf.train.import_meta_graph(meta_graph_file, clear_devices=True)
# restore variables from checkpoint
saver.restore(self.session, self.checkpoint)
def __collect_outputs(self, request=None):
"""Get a dict:
array key: tensor name
If request is not None, return only outputs that are in request.
"""
array_outputs = {}
for tensor_name, array_key in self.outputs.items():
if request is None or array_key in request:
array_outputs[array_key] = tensor_name
return array_outputs
def __collect_provided_inputs(self, batch):
"""Get a dict:
tensor name: ndarray
"""
inputs = {}
for input_name, input_key in self.inputs.items():
if isinstance(input_key, ArrayKey):
if input_key in batch.arrays:
inputs[input_name] = batch.arrays[input_key].data
else:
logger.warn(
"batch does not contain %s, input %s will not " "be set",
input_key,
input_name,
)
elif isinstance(input_key, np.ndarray):
inputs[input_name] = input_key
elif isinstance(input_key, str):
inputs[input_name] = getattr(batch, input_key)
else:
raise Exception(
"Unknown network input key {}, can't be given to "
"network".format(input_key)
)
return inputs
def __create_shared_input_array_config(self, batch, request):
"""Store the shared array config in a shared dictionary. Should be run
once by the first worker to submit a batch."""
begin = 0
for name, array_key in self.inputs.items():
shape = batch[array_key].data.shape
size = reduce(mul, shape, 1)
dtype = batch[array_key].data.dtype
self.shared_input_array_config[name] = (begin, size, shape, dtype)
begin += size * np.dtype(dtype).itemsize
assert (
begin <= self.max_shared_memory
), "The input arrays exceed the max_shared_memory"
def __create_shared_output_array_config(self):
"""To be called by predict process."""
begin = 0
for name, array_key in self.outputs.items():
tensor = self.graph.get_tensor_by_name(name)
shape = tensor.get_shape().as_list()
size = reduce(mul, shape, 1)
dtype = tensor.dtype.as_numpy_dtype
self.shared_output_array_config[name] = (begin, size, tuple(shape), dtype)
begin += size * np.dtype(dtype).itemsize
assert (
begin <= self.max_shared_memory
), "The output arrays exceed the max_shared_memory"
def __init_shared_input_arrays(self):
"""Assign the shared memory to numpy arrays."""
for name, (begin, size, shape, dtype) in self.shared_input_array_config.items():
self.shared_input_arrays[name] = np.frombuffer(
self.shared_input_memory, dtype=dtype, offset=begin, count=size
).reshape(shape)
def __init_shared_output_arrays(self):
"""Assign the shared memory to numpy arrays."""
for name, (
begin,
size,
shape,
dtype,
) in self.shared_output_array_config.items():
self.shared_output_arrays[name] = np.frombuffer(
self.shared_output_memory, dtype=dtype, offset=begin, count=size
).reshape(shape)
def __write_inputs_to_shared(self, input_data):
for tensor_name, data in input_data.items():
self.shared_input_arrays[tensor_name][:] = data
def __read_inputs_from_shared(self):
return {
tensor_name: self.shared_input_arrays[tensor_name].copy()
for tensor_name in self.inputs.keys()
}
def __write_outputs_to_shared(self, output_data):
for tensor_name, data in output_data.items():
self.shared_output_arrays[tensor_name][:] = data
def __read_outputs_from_shared(self, output_tensors):
return {
array_key: self.shared_output_arrays[tensor_name].copy()
for array_key, tensor_name in output_tensors.items()
}
| 12,847 | 33.818428 | 88 | py |
gunpowder | gunpowder-master/gunpowder/tensorflow/nodes/train.py | import logging
import os
import numpy as np
from gunpowder.array import ArrayKey, Array
from gunpowder.ext import tensorflow as tf
from gunpowder.nodes.generic_train import GenericTrain
from gunpowder.tensorflow.local_server import LocalServer
logger = logging.getLogger(__name__)
class Train(GenericTrain):
"""Tensorflow implementation of :class:`gunpowder.nodes.Train`.
Args:
graph (``string``):
Filename of a tensorflow meta-graph storing the tensorflow graph
containing an optimizer. A meta-graph file can be created by
running::
# create tensorflow graph
...
# store it
tf.train.export_meta_graph(filename='...')
optimizer (``string`` or function):
Either the name of the tensorflow operator performing a training
iteration, or a function that, given the graph of the meta-graph
file, adds a custom loss and optimizer.
If a function is given, it should return a tuple ``(loss,
optimizer)`` of a tensor and an operator representing the loss and
the optimizer, respectively. In this case, parameter ``loss``
should be ``None``.
Example::
def add_custom_optimizer(graph):
# get the output of your graph
output = graph.get_tensor_by_name('...')
# create your custom loss
loss = custom_loss(output)
# add an optimizer of your choice
optimizer = tf.train.AdamOptimizer().minimize(loss)
return (loss, optimizer)
loss (``string`` or ``None``):
The name of the tensorflow tensor containing the loss, or ``None``
if ``optimizer`` is a function.
inputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of input tensors in the network to
array keys.
outputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of output tensors in the network to array
keys. New arrays will be generated by this node for each entry (if
requested downstream).
gradients (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of output tensors in the network to
array keys. New arrays containing the gradient of an output with
respect to the loss will be generated by this node for each entry
(if requested downstream).
summary (``string`` or
``dict``, ``string`` -> (``string`` (tensor name), freq),
optional):
The name of the tensorflow tensor containing the tensorboard
summaries or dictionary for different subcategories of summaires
(key: string, value: tuple with tensor/op name and frequency,
of evaluation).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (``outputs``). This is
useful to set the ``voxel_size``, for example, if they differ from
the voxel size of the input arrays. Only fields that are not
``None`` in the given :class:`ArraySpec` will be used.
save_every (``int``, optional):
After how many iterations to create a checkpoint to store the
learnt weights.
log_dir (``string``, optional):
Directory for saving tensorboard summaries.
log_every (``int``, optional):
After how many iterations to write out tensorboard summaries.
"""
def __init__(
self,
graph,
optimizer,
loss,
inputs,
outputs,
gradients,
summary=None,
array_specs=None,
save_every=2000,
log_dir="./",
log_every=1,
):
super(Train, self).__init__(
inputs, outputs, gradients, array_specs, spawn_subprocess=False
)
self.meta_graph_filename = graph
self.optimizer_func = None
self.optimizer_loss_names = None
self.optimizer = None
self.loss = None
self.summary = summary
self.session = None
self.tf_gradient = {}
self.graph = None
self.basic_saver = None
self.full_saver = None
self.save_every = save_every
self.iteration = None
self.iteration_increment = None
self.summary_saver = None
self.log_dir = log_dir
self.log_every = log_every
# Check if optimizer is a str in python 2/3 compatible way.
if isinstance(optimizer, ("".__class__, "".__class__)):
self.optimizer_loss_names = (optimizer, loss)
else:
self.optimizer_func = optimizer
# at least for some versions of tensorflow, the checkpoint name has to
# start with a . if it is a relative path
if not os.path.isabs(self.meta_graph_filename):
self.meta_graph_filename = os.path.join(".", self.meta_graph_filename)
def start(self):
target = LocalServer.get_target()
logger.info("Initializing tf session, connecting to %s...", target)
self.graph = tf.Graph()
self.session = tf.Session(target=target, graph=self.graph)
with self.graph.as_default():
self.__read_meta_graph()
if self.summary is not None:
self.summary_saver = tf.summary.FileWriter(self.log_dir, self.graph)
if self.optimizer_func is None:
# get actual operations/tensors from names
self.optimizer = self.graph.get_operation_by_name(
self.optimizer_loss_names[0]
)
self.loss = self.graph.get_tensor_by_name(self.optimizer_loss_names[1])
# add symbolic gradients
for tensor_name in self.gradients:
tensor = self.graph.get_tensor_by_name(tensor_name)
self.tf_gradient[tensor_name] = tf.gradients(self.loss, [tensor])[0]
def train_step(self, batch, request):
array_outputs = self.__collect_requested_outputs(request)
inputs = self.__collect_provided_inputs(batch)
to_compute = {
"optimizer": self.optimizer,
"loss": self.loss,
"iteration": self.iteration_increment,
}
to_compute.update(array_outputs)
# compute outputs, gradients, and update variables
if isinstance(self.summary, str):
to_compute["summaries"] = self.summary
elif isinstance(self.summary, dict):
for k, (v, f) in self.summary.items():
if int(self.current_step + 1) % f == 0:
to_compute[k] = v
outputs = self.session.run(to_compute, feed_dict=inputs)
for array_key in array_outputs:
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(outputs[array_key], spec)
batch.loss = outputs["loss"]
batch.iteration = outputs["iteration"][0]
self.current_step = batch.iteration
if self.summary is not None:
if isinstance(self.summary, str) and (
batch.iteration % self.log_every == 0 or batch.iteration == 1
):
self.summary_saver.add_summary(outputs["summaries"], batch.iteration)
else:
for k, (_, f) in self.summary.items():
if int(self.current_step) % f == 0:
self.summary_saver.add_summary(outputs[k], batch.iteration)
if batch.iteration % self.save_every == 0:
checkpoint_name = (
self.meta_graph_filename + "_checkpoint_%i" % batch.iteration
)
logger.info("Creating checkpoint %s", checkpoint_name)
self.full_saver.save(self.session, checkpoint_name)
def stop(self):
if self.session is not None:
self.optimizer = None
self.loss = None
if self.summary is not None:
self.summary_saver.close()
self.session.close()
self.graph = None
self.session = None
def __read_meta_graph(self):
logger.info("Reading meta-graph...")
# read the original meta-graph
tf.train.import_meta_graph(
self.meta_graph_filename + ".meta", clear_devices=True
)
# add custom gunpowder variables
with tf.variable_scope("gunpowder"):
self.iteration = tf.get_variable(
"iteration", shape=1, initializer=tf.zeros_initializer, trainable=False
)
self.iteration_increment = tf.assign(self.iteration, self.iteration + 1)
# Until now, only variables have been added to the graph that are part
# of every checkpoint. We create a 'basic_saver' for only those
# variables.
self.basic_saver = tf.train.Saver(max_to_keep=None)
# Add custom optimizer and loss, if requested. This potentially adds
# more variables, not covered by the basic_saver.
if self.optimizer_func is not None:
loss, optimizer = self.optimizer_func(self.graph)
self.loss = loss
self.optimizer = optimizer
# We create a 'full_saver' including those variables.
self.full_saver = tf.train.Saver(max_to_keep=None)
# find most recent checkpoint
checkpoint_dir = os.path.dirname(self.meta_graph_filename)
checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
if checkpoint:
try:
# Try to restore the graph, including the custom optimizer
# state (if a custom optimizer was used).
self.__restore_graph(checkpoint, restore_full=True)
except tf.errors.NotFoundError:
# If that failed, we just transitioned from an earlier training
# without the custom optimizer. In this case, restore only the
# variables of the original meta-graph and 'gunpowder'
# variables. Custom optimizer variables will be default
# initialized.
logger.info("Checkpoint did not contain custom optimizer " "variables")
self.__restore_graph(checkpoint, restore_full=False)
else:
logger.info("No checkpoint found")
# initialize all variables
self.session.run(tf.global_variables_initializer())
def __restore_graph(self, checkpoint, restore_full):
logger.info("Restoring model from %s", checkpoint)
if restore_full:
logger.info("...using a saver for all variables")
self.full_saver.restore(self.session, checkpoint)
else:
# initialize all variables, such that non-basic variables are
# initialized
self.session.run(tf.global_variables_initializer())
logger.info("...using a saver for basic variables only")
self.basic_saver.restore(self.session, checkpoint)
self.current_step = self.session.run(self.iteration)
def __collect_requested_outputs(self, request):
array_outputs = {}
for output_name, array_key in self.outputs.items():
if array_key in request:
array_outputs[array_key] = output_name
for output_name, array_key in self.gradients.items():
if array_key in request:
array_outputs[array_key] = self.tf_gradient[output_name]
return array_outputs
def __collect_provided_inputs(self, batch):
inputs = {}
for input_name, input_key in self.inputs.items():
if isinstance(input_key, ArrayKey):
if input_key in batch.arrays:
inputs[input_name] = batch.arrays[input_key].data
else:
logger.warn(
"batch does not contain %s, input %s will not " "be set",
input_key,
input_name,
)
elif isinstance(input_key, np.ndarray):
inputs[input_name] = input_key
elif isinstance(input_key, str):
inputs[input_name] = getattr(batch, input_key)
else:
raise Exception(
"Unknown network input key {}, can't be given to "
"network".format(input_key)
)
return inputs
| 12,704 | 35.93314 | 87 | py |
gunpowder | gunpowder-master/examples/cremi/mknet.py | from gunpowder.zoo.tensorflow import unet, conv_pass
import tensorflow as tf
import json
def create_network(input_shape, name):
tf.reset_default_graph()
# create a placeholder for the 3D raw input tensor
raw = tf.placeholder(tf.float32, shape=input_shape)
# create a U-Net
raw_batched = tf.reshape(raw, (1, 1) + input_shape)
unet_output = unet(raw_batched, 6, 4, [[1,3,3],[1,3,3],[1,3,3]])
# add a convolution layer to create 3 output maps representing affinities
# in z, y, and x
pred_affs_batched = conv_pass(
unet_output,
kernel_size=1,
num_fmaps=3,
num_repetitions=1,
activation='sigmoid')
# get the shape of the output
output_shape_batched = pred_affs_batched.get_shape().as_list()
output_shape = output_shape_batched[1:] # strip the batch dimension
# the 4D output tensor (3, depth, height, width)
pred_affs = tf.reshape(pred_affs_batched, output_shape)
# create a placeholder for the corresponding ground-truth affinities
gt_affs = tf.placeholder(tf.float32, shape=output_shape)
# create a placeholder for per-voxel loss weights
loss_weights = tf.placeholder(
tf.float32,
shape=output_shape)
# compute the loss as the weighted mean squared error between the
# predicted and the ground-truth affinities
loss = tf.losses.mean_squared_error(
gt_affs,
pred_affs,
loss_weights)
# use the Adam optimizer to minimize the loss
opt = tf.train.AdamOptimizer(
learning_rate=0.5e-4,
beta1=0.95,
beta2=0.999,
epsilon=1e-8)
optimizer = opt.minimize(loss)
# store the network in a meta-graph file
tf.train.export_meta_graph(filename=name + '.meta')
# store network configuration for use in train and predict scripts
config = {
'raw': raw.name,
'pred_affs': pred_affs.name,
'gt_affs': gt_affs.name,
'loss_weights': loss_weights.name,
'loss': loss.name,
'optimizer': optimizer.name,
'input_shape': input_shape,
'output_shape': output_shape[1:]
}
with open(name + '_config.json', 'w') as f:
json.dump(config, f)
if __name__ == "__main__":
# create a network for training
create_network((84, 268, 268), 'train_net')
# create a larger network for faster prediction
create_network((120, 322, 322), 'test_net')
| 2,432 | 29.797468 | 77 | py |
gunpowder | gunpowder-master/examples/cremi/predict.py | from __future__ import print_function
import gunpowder as gp
import json
def predict(iteration):
##################
# DECLARE ARRAYS #
##################
# raw intensities
raw = gp.ArrayKey('RAW')
# the predicted affinities
pred_affs = gp.ArrayKey('PRED_AFFS')
####################
# DECLARE REQUESTS #
####################
with open('test_net_config.json', 'r') as f:
net_config = json.load(f)
# get the input and output size in world units (nm, in this case)
voxel_size = gp.Coordinate((40, 4, 4))
input_size = gp.Coordinate(net_config['input_shape'])*voxel_size
output_size = gp.Coordinate(net_config['output_shape'])*voxel_size
context = input_size - output_size
# formulate the request for what a batch should contain
request = gp.BatchRequest()
request.add(raw, input_size)
request.add(pred_affs, output_size)
#############################
# ASSEMBLE TESTING PIPELINE #
#############################
source = gp.Hdf5Source(
'sample_A_padded_20160501.hdf',
datasets = {
raw: 'volumes/raw'
})
# get the ROI provided for raw (we need it later to calculate the ROI in
# which we can make predictions)
with gp.build(source):
raw_roi = source.spec[raw].roi
pipeline = (
# read from HDF5 file
source +
# convert raw to float in [0, 1]
gp.Normalize(raw) +
# perform one training iteration for each passing batch (here we use
# the tensor names earlier stored in train_net.config)
gp.tensorflow.Predict(
graph='test_net.meta',
checkpoint='train_net_checkpoint_%d'%iteration,
inputs={
net_config['raw']: raw
},
outputs={
net_config['pred_affs']: pred_affs
},
array_specs={
pred_affs: gp.ArraySpec(roi=raw_roi.grow(-context, -context))
}) +
# store all passing batches in the same HDF5 file
gp.Hdf5Write(
{
raw: '/volumes/raw',
pred_affs: '/volumes/pred_affs',
},
output_filename='predictions_sample_A.hdf',
compression_type='gzip'
) +
# show a summary of time spend in each node every 10 iterations
gp.PrintProfilingStats(every=10) +
# iterate over the whole dataset in a scanning fashion, emitting
# requests that match the size of the network
gp.Scan(reference=request)
)
with gp.build(pipeline):
# request an empty batch from Scan to trigger scanning of the dataset
# without keeping the complete dataset in memory
pipeline.request_batch(gp.BatchRequest())
if __name__ == "__main__":
predict(200000)
| 2,863 | 28.22449 | 77 | py |
gunpowder | gunpowder-master/examples/cremi/train.py | from __future__ import print_function
import gunpowder as gp
import json
import math
import logging
logging.basicConfig(level=logging.INFO)
def train(iterations):
##################
# DECLARE ARRAYS #
##################
# raw intensities
raw = gp.ArrayKey('RAW')
# objects labelled with unique IDs
gt_labels = gp.ArrayKey('LABELS')
# array of per-voxel affinities to direct neighbors
gt_affs= gp.ArrayKey('AFFINITIES')
# weights to use to balance the loss
loss_weights = gp.ArrayKey('LOSS_WEIGHTS')
# the predicted affinities
pred_affs = gp.ArrayKey('PRED_AFFS')
# the gredient of the loss wrt to the predicted affinities
pred_affs_gradients = gp.ArrayKey('PRED_AFFS_GRADIENTS')
####################
# DECLARE REQUESTS #
####################
with open('train_net_config.json', 'r') as f:
net_config = json.load(f)
# get the input and output size in world units (nm, in this case)
voxel_size = gp.Coordinate((40, 4, 4))
input_size = gp.Coordinate(net_config['input_shape'])*voxel_size
output_size = gp.Coordinate(net_config['output_shape'])*voxel_size
# formulate the request for what a batch should (at least) contain
request = gp.BatchRequest()
request.add(raw, input_size)
request.add(gt_affs, output_size)
request.add(loss_weights, output_size)
# when we make a snapshot for inspection (see below), we also want to
# request the predicted affinities and gradients of the loss wrt the
# affinities
snapshot_request = gp.BatchRequest()
snapshot_request[pred_affs] = request[gt_affs]
snapshot_request[pred_affs_gradients] = request[gt_affs]
##############################
# ASSEMBLE TRAINING PIPELINE #
##############################
pipeline = (
# a tuple of sources, one for each sample (A, B, and C) provided by the
# CREMI challenge
tuple(
# read batches from the HDF5 file
gp.Hdf5Source(
'sample_'+s+'_padded_20160501.hdf',
datasets = {
raw: 'volumes/raw',
gt_labels: 'volumes/labels/neuron_ids'
}
) +
# convert raw to float in [0, 1]
gp.Normalize(raw) +
# chose a random location for each requested batch
gp.RandomLocation()
for s in ['A', 'B', 'C']
) +
# chose a random source (i.e., sample) from the above
gp.RandomProvider() +
# elastically deform the batch
gp.ElasticAugment(
[4,40,40],
[0,2,2],
[0,math.pi/2.0],
prob_slip=0.05,
prob_shift=0.05,
max_misalign=25) +
# apply transpose and mirror augmentations
gp.SimpleAugment(transpose_only=[1, 2]) +
# scale and shift the intensity of the raw array
gp.IntensityAugment(
raw,
scale_min=0.9,
scale_max=1.1,
shift_min=-0.1,
shift_max=0.1,
z_section_wise=True) +
# grow a boundary between labels
gp.GrowBoundary(
gt_labels,
steps=3,
only_xy=True) +
# convert labels into affinities between voxels
gp.AddAffinities(
[[-1, 0, 0], [0, -1, 0], [0, 0, -1]],
gt_labels,
gt_affs) +
# create a weight array that balances positive and negative samples in
# the affinity array
gp.BalanceLabels(
gt_affs,
loss_weights) +
# pre-cache batches from the point upstream
gp.PreCache(
cache_size=10,
num_workers=5) +
# perform one training iteration for each passing batch (here we use
# the tensor names earlier stored in train_net.config)
gp.tensorflow.Train(
'train_net',
net_config['optimizer'],
net_config['loss'],
inputs={
net_config['raw']: raw,
net_config['gt_affs']: gt_affs,
net_config['loss_weights']: loss_weights
},
outputs={
net_config['pred_affs']: pred_affs
},
gradients={
net_config['pred_affs']: pred_affs_gradients
},
save_every=1) +
# save the passing batch as an HDF5 file for inspection
gp.Snapshot(
{
raw: '/volumes/raw',
gt_labels: '/volumes/labels/neuron_ids',
gt_affs: '/volumes/labels/affs',
pred_affs: '/volumes/pred_affs',
pred_affs_gradients: '/volumes/pred_affs_gradients'
},
output_dir='snapshots',
output_filename='batch_{iteration}.hdf',
every=100,
additional_request=snapshot_request,
compression_type='gzip') +
# show a summary of time spend in each node every 10 iterations
gp.PrintProfilingStats(every=10)
)
#########
# TRAIN #
#########
print("Training for", iterations, "iterations")
with gp.build(pipeline):
for i in range(iterations):
pipeline.request_batch(request)
print("Finished")
if __name__ == "__main__":
train(200000)
| 5,395 | 28.167568 | 79 | py |
gunpowder | gunpowder-master/tests/conftest.py | import pytest
import multiprocessing as mp
# cannot parametrize unittest.TestCase. We should test both
# fork and spawn but I'm not sure how to.
# @pytest.fixture(params=["fork", "spawn"], autouse=True)
@pytest.fixture(autouse=True)
def context(monkeypatch):
ctx = mp.get_context("spawn")
monkeypatch.setattr(mp, "Queue", ctx.Queue)
monkeypatch.setattr(mp, "Process", ctx.Process)
monkeypatch.setattr(mp, "Event", ctx.Event)
monkeypatch.setattr(mp, "Value", ctx.Value)
| 492 | 29.8125 | 59 | py |
gunpowder | gunpowder-master/tests/__init__.py | 0 | 0 | 0 | py | |
gunpowder | gunpowder-master/tests/cases/add_vector_map.py | import unittest
from .provider_test import ProviderTest
from gunpowder import (
ArrayKeys,
ArraySpec,
GraphSpec,
Roi,
Array,
GraphKeys,
GraphKey,
Batch,
BatchProvider,
Graph,
Node,
Coordinate,
ArrayKey,
BatchRequest,
build,
)
from gunpowder.contrib import AddVectorMap
from copy import deepcopy
import itertools
import numpy as np
class AddVectorMapTestSource(BatchProvider):
def setup(self):
for identifier in [ArrayKeys.RAW, ArrayKeys.GT_LABELS]:
self.provides(
identifier,
ArraySpec(
roi=Roi((1000, 1000, 1000), (400, 400, 400)), voxel_size=(20, 2, 2)
),
)
for identifier in [GraphKeys.PRESYN, GraphKeys.POSTSYN]:
self.provides(
identifier, GraphSpec(roi=Roi((1000, 1000, 1000), (400, 400, 400)))
)
def provide(self, request):
batch = Batch()
# have the pixels encode their position
if ArrayKeys.RAW in request:
# the z,y,x coordinates of the ROI
roi = request[ArrayKeys.RAW].roi
roi_voxel = roi // self.spec[ArrayKeys.RAW].voxel_size
meshgrids = np.meshgrid(
range(roi_voxel.begin[0], roi_voxel.end[0]),
range(roi_voxel.begin[1], roi_voxel.end[1]),
range(roi_voxel.begin[2], roi_voxel.end[2]),
indexing="ij",
)
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
spec = self.spec[ArrayKeys.RAW].copy()
spec.roi = roi
batch.arrays[ArrayKeys.RAW] = Array(data, spec)
if ArrayKeys.GT_LABELS in request:
roi = request[ArrayKeys.GT_LABELS].roi
roi_voxel_shape = (roi // self.spec[ArrayKeys.GT_LABELS].voxel_size).shape
data = np.ones(roi_voxel_shape)
data[roi_voxel_shape[0] // 2 :, roi_voxel_shape[1] // 2 :, :] = 2
data[roi_voxel_shape[0] // 2 :, -(roi_voxel_shape[1] // 2) :, :] = 3
spec = self.spec[ArrayKeys.GT_LABELS].copy()
spec.roi = roi
batch.arrays[ArrayKeys.GT_LABELS] = Array(data, spec)
if GraphKeys.PRESYN in request:
data_presyn, data_postsyn = self.__get_pre_and_postsyn_locations(
roi=request[GraphKeys.PRESYN].roi
)
elif GraphKeys.POSTSYN in request:
data_presyn, data_postsyn = self.__get_pre_and_postsyn_locations(
roi=request[GraphKeys.POSTSYN].roi
)
voxel_size_points = self.spec[ArrayKeys.RAW].voxel_size
for graph_key, spec in request.graph_specs.items():
if graph_key == GraphKeys.PRESYN:
data = data_presyn
if graph_key == GraphKeys.POSTSYN:
data = data_postsyn
batch.graphs[graph_key] = Graph(
list(data.values()), [], GraphSpec(spec.roi)
)
return batch
def __get_pre_and_postsyn_locations(self, roi):
presyn_locs, postsyn_locs = {}, {}
min_dist_between_presyn_locs = 250
voxel_size_points = self.spec[ArrayKeys.RAW].voxel_size
min_dist_pre_to_postsyn_loc, max_dist_pre_to_postsyn_loc = 60, 120
num_presyn_locations = roi.size // (
np.prod(50 * np.asarray(voxel_size_points))
) # 1 synapse per 50vx^3 cube
num_postsyn_locations = np.random.randint(
low=1, high=3
) # 1 to 3 postsyn partners
loc_id = 0
all_presyn_locs = []
for nr_presyn_loc in range(num_presyn_locations):
loc_id = loc_id + 1
presyn_loc_id = loc_id
presyn_loc_too_close = True
while presyn_loc_too_close:
presyn_location = np.asarray(
[
np.random.randint(low=roi.begin[0], high=roi.end[0]),
np.random.randint(low=roi.begin[1], high=roi.end[1]),
np.random.randint(low=roi.begin[2], high=roi.end[2]),
]
)
# ensure that partner locations of diff presyn locations are not overlapping
presyn_loc_too_close = False
for previous_loc in all_presyn_locs:
if np.linalg.norm(presyn_location - previous_loc) < (
min_dist_between_presyn_locs
):
presyn_loc_too_close = True
syn_id = nr_presyn_loc
partner_ids = []
for nr_partner_loc in range(num_postsyn_locations):
loc_id = loc_id + 1
partner_ids.append(loc_id)
postsyn_loc_is_inside = False
while not postsyn_loc_is_inside:
postsyn_location = presyn_location + np.random.choice(
(-1, 1), size=3, replace=True
) * np.random.randint(
min_dist_pre_to_postsyn_loc, max_dist_pre_to_postsyn_loc, size=3
)
if roi.contains(Coordinate(postsyn_location)):
postsyn_loc_is_inside = True
postsyn_locs[int(loc_id)] = deepcopy(
Node(
loc_id,
location=postsyn_location,
attrs={
"location_id": loc_id,
"synapse_id": syn_id,
"partner_ids": [presyn_loc_id],
"props": {},
},
)
)
presyn_locs[int(presyn_loc_id)] = deepcopy(
Node(
presyn_loc_id,
location=presyn_location,
attrs={
"location_id": presyn_loc_id,
"synapse_id": syn_id,
"partner_ids": partner_ids,
"props": {},
},
)
)
return presyn_locs, postsyn_locs
class TestAddVectorMap(ProviderTest):
def test_output_min_distance(self):
voxel_size = Coordinate((20, 2, 2))
ArrayKey("GT_VECTORS_MAP_PRESYN")
GraphKey("PRESYN")
GraphKey("POSTSYN")
arraytypes_to_source_target_pointstypes = {
ArrayKeys.GT_VECTORS_MAP_PRESYN: (GraphKeys.PRESYN, GraphKeys.POSTSYN)
}
arraytypes_to_stayinside_arraytypes = {
ArrayKeys.GT_VECTORS_MAP_PRESYN: ArrayKeys.GT_LABELS
}
# test for partner criterion 'min_distance'
radius_phys = 30
pipeline_min_distance = AddVectorMapTestSource() + AddVectorMap(
src_and_trg_points=arraytypes_to_source_target_pointstypes,
voxel_sizes={ArrayKeys.GT_VECTORS_MAP_PRESYN: voxel_size},
radius_phys=radius_phys,
partner_criterion="min_distance",
stayinside_array_keys=arraytypes_to_stayinside_arraytypes,
pad_for_partners=(0, 0, 0),
)
with build(pipeline_min_distance):
request = BatchRequest()
raw_roi = pipeline_min_distance.spec[ArrayKeys.RAW].roi
gt_labels_roi = pipeline_min_distance.spec[ArrayKeys.GT_LABELS].roi
presyn_roi = pipeline_min_distance.spec[GraphKeys.PRESYN].roi
request.add(ArrayKeys.RAW, raw_roi.shape)
request.add(ArrayKeys.GT_LABELS, gt_labels_roi.shape)
request.add(GraphKeys.PRESYN, presyn_roi.shape)
request.add(GraphKeys.POSTSYN, presyn_roi.shape)
request.add(ArrayKeys.GT_VECTORS_MAP_PRESYN, presyn_roi.shape)
for identifier, spec in request.items():
spec.roi = spec.roi.shift(Coordinate(1000, 1000, 1000))
batch = pipeline_min_distance.request_batch(request)
presyn_locs = {n.id: n for n in batch.graphs[GraphKeys.PRESYN].nodes}
postsyn_locs = {n.id: n for n in batch.graphs[GraphKeys.POSTSYN].nodes}
vector_map_presyn = batch.arrays[ArrayKeys.GT_VECTORS_MAP_PRESYN].data
offset_vector_map_presyn = request[ArrayKeys.GT_VECTORS_MAP_PRESYN].roi.offset
self.assertTrue(len(presyn_locs) > 0)
self.assertTrue(len(postsyn_locs) > 0)
for loc_id, point in presyn_locs.items():
if request[ArrayKeys.GT_VECTORS_MAP_PRESYN].roi.contains(
Coordinate(point.location)
):
self.assertTrue(
batch.arrays[ArrayKeys.GT_VECTORS_MAP_PRESYN].spec.roi.contains(
Coordinate(point.location)
)
)
dist_to_loc = {}
for partner_id in point.attrs["partner_ids"]:
if partner_id in postsyn_locs.keys():
partner_location = postsyn_locs[partner_id].location
dist_to_loc[
np.linalg.norm(partner_location - point.location)
] = partner_location
min_dist = np.min(list(dist_to_loc.keys()))
relevant_partner_loc = dist_to_loc[min_dist]
presyn_loc_shifted_vx = (
point.location - offset_vector_map_presyn
) // voxel_size
radius_vx = [(radius_phys // vx_dim) for vx_dim in voxel_size]
region_to_check = np.clip(
[
(presyn_loc_shifted_vx - radius_vx),
(presyn_loc_shifted_vx + radius_vx),
],
a_min=(0, 0, 0),
a_max=vector_map_presyn.shape[-3:],
)
for x, y, z in itertools.product(
range(int(region_to_check[0][0]), int(region_to_check[1][0])),
range(int(region_to_check[0][1]), int(region_to_check[1][1])),
range(int(region_to_check[0][2]), int(region_to_check[1][2])),
):
if (
np.linalg.norm(
(np.array((x, y, z)) - np.asarray(point.location))
)
< radius_phys
):
vector = [
vector_map_presyn[dim][x, y, z]
for dim in range(vector_map_presyn.shape[0])
]
if not np.sum(vector) == 0:
trg_loc_of_vector_phys = (
np.asarray(offset_vector_map_presyn)
+ (voxel_size * np.array([x, y, z]))
+ np.asarray(vector)
)
self.assertTrue(
np.array_equal(
trg_loc_of_vector_phys, relevant_partner_loc
)
)
# test for partner criterion 'all'
pipeline_all = AddVectorMapTestSource() + AddVectorMap(
src_and_trg_points=arraytypes_to_source_target_pointstypes,
voxel_sizes={ArrayKeys.GT_VECTORS_MAP_PRESYN: voxel_size},
radius_phys=radius_phys,
partner_criterion="all",
stayinside_array_keys=arraytypes_to_stayinside_arraytypes,
pad_for_partners=(0, 0, 0),
)
with build(pipeline_all):
batch = pipeline_all.request_batch(request)
presyn_locs = {n.id: n for n in batch.graphs[GraphKeys.PRESYN].nodes}
postsyn_locs = {n.id: n for n in batch.graphs[GraphKeys.POSTSYN].nodes}
vector_map_presyn = batch.arrays[ArrayKeys.GT_VECTORS_MAP_PRESYN].data
offset_vector_map_presyn = request[ArrayKeys.GT_VECTORS_MAP_PRESYN].roi.offset
self.assertTrue(len(presyn_locs) > 0)
self.assertTrue(len(postsyn_locs) > 0)
for loc_id, point in presyn_locs.items():
if request[ArrayKeys.GT_VECTORS_MAP_PRESYN].roi.contains(
Coordinate(point.location)
):
self.assertTrue(
batch.arrays[ArrayKeys.GT_VECTORS_MAP_PRESYN].spec.roi.contains(
Coordinate(point.location)
)
)
partner_ids_to_locs_per_src, count_vectors_per_partner = {}, {}
for partner_id in point.attrs["partner_ids"]:
if partner_id in postsyn_locs.keys():
partner_ids_to_locs_per_src[partner_id] = postsyn_locs[
partner_id
].location.tolist()
count_vectors_per_partner[partner_id] = 0
presyn_loc_shifted_vx = (
point.location - offset_vector_map_presyn
) // voxel_size
radius_vx = [(radius_phys // vx_dim) for vx_dim in voxel_size]
region_to_check = np.clip(
[
(presyn_loc_shifted_vx - radius_vx),
(presyn_loc_shifted_vx + radius_vx),
],
a_min=(0, 0, 0),
a_max=vector_map_presyn.shape[-3:],
)
for x, y, z in itertools.product(
range(int(region_to_check[0][0]), int(region_to_check[1][0])),
range(int(region_to_check[0][1]), int(region_to_check[1][1])),
range(int(region_to_check[0][2]), int(region_to_check[1][2])),
):
if (
np.linalg.norm(
(np.array((x, y, z)) - np.asarray(point.location))
)
< radius_phys
):
vector = [
vector_map_presyn[dim][x, y, z]
for dim in range(vector_map_presyn.shape[0])
]
if not np.sum(vector) == 0:
trg_loc_of_vector_phys = (
np.asarray(offset_vector_map_presyn)
+ (voxel_size * np.array([x, y, z]))
+ np.asarray(vector)
)
self.assertTrue(
trg_loc_of_vector_phys.tolist()
in partner_ids_to_locs_per_src.values()
)
for (
partner_id,
partner_loc,
) in partner_ids_to_locs_per_src.items():
if np.array_equal(
np.asarray(trg_loc_of_vector_phys), partner_loc
):
count_vectors_per_partner[partner_id] += 1
self.assertTrue(
(
list(count_vectors_per_partner.values())
- np.min(list(count_vectors_per_partner.values()))
<= len(count_vectors_per_partner.keys())
).all()
)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestAddVectorMap)
unittest.TextTestRunner(verbosity=2).run(suite)
| 15,651 | 40.298153 | 92 | py |
gunpowder | gunpowder-master/tests/cases/unsqueeze.py | import copy
import numpy as np
import gunpowder as gp
from .provider_test import ProviderTest
class ExampleSourceUnsqueeze(gp.BatchProvider):
def __init__(self, voxel_size):
self.voxel_size = gp.Coordinate(voxel_size)
self.roi = gp.Roi((0, 0, 0), (10, 10, 10)) * self.voxel_size
self.raw = gp.ArrayKey("RAW")
self.labels = gp.ArrayKey("LABELS")
self.array_spec_raw = gp.ArraySpec(
roi=self.roi, voxel_size=self.voxel_size, dtype="uint8", interpolatable=True
)
self.array_spec_labels = gp.ArraySpec(
roi=self.roi,
voxel_size=self.voxel_size,
dtype="uint64",
interpolatable=False,
)
def setup(self):
self.provides(self.raw, self.array_spec_raw)
self.provides(self.labels, self.array_spec_labels)
def provide(self, request):
outputs = gp.Batch()
# RAW
raw_spec = copy.deepcopy(self.array_spec_raw)
raw_spec.roi = request[self.raw].roi
raw_shape = request[self.raw].roi.shape / self.voxel_size
outputs[self.raw] = gp.Array(
np.random.randint(0, 256, raw_shape, dtype=raw_spec.dtype), raw_spec
)
# LABELS
labels_spec = copy.deepcopy(self.array_spec_labels)
labels_spec.roi = request[self.labels].roi
labels_shape = request[self.labels].roi.shape / self.voxel_size
labels = np.ones(labels_shape, dtype=labels_spec.dtype)
outputs[self.labels] = gp.Array(labels, labels_spec)
return outputs
class TestUnsqueeze(ProviderTest):
def test_unsqueeze(self):
raw = gp.ArrayKey("RAW")
labels = gp.ArrayKey("LABELS")
voxel_size = gp.Coordinate((50, 5, 5))
input_voxels = gp.Coordinate((10, 10, 10))
input_size = input_voxels * voxel_size
request = gp.BatchRequest()
request.add(raw, input_size)
request.add(labels, input_size)
pipeline = (
ExampleSourceUnsqueeze(voxel_size)
+ gp.Unsqueeze([raw, labels])
+ gp.Unsqueeze([raw], axis=1)
)
with gp.build(pipeline) as p:
batch = p.request_batch(request)
assert batch[raw].data.shape == (1,) + (1,) + input_voxels
assert batch[labels].data.shape == (1,) + input_voxels
def test_unsqueeze_not_possible(self):
raw = gp.ArrayKey("RAW")
labels = gp.ArrayKey("LABELS")
voxel_size = gp.Coordinate((50, 5, 5))
input_voxels = gp.Coordinate((5, 5, 5))
input_size = input_voxels * voxel_size
request = gp.BatchRequest()
request.add(raw, input_size)
request.add(labels, input_size)
pipeline = ExampleSourceUnsqueeze(voxel_size) + gp.Unsqueeze([raw], axis=1)
with self.assertRaises(gp.PipelineRequestError):
with gp.build(pipeline) as p:
batch = p.request_batch(request)
| 2,965 | 29.57732 | 88 | py |
gunpowder | gunpowder-master/tests/cases/squeeze.py | import copy
import numpy as np
import gunpowder as gp
from .provider_test import ProviderTest
class ExampleSourceSqueeze(gp.BatchProvider):
def __init__(self, voxel_size):
self.voxel_size = gp.Coordinate(voxel_size)
self.roi = gp.Roi((0, 0, 0), (10, 10, 10)) * self.voxel_size
self.raw = gp.ArrayKey("RAW")
self.labels = gp.ArrayKey("LABELS")
self.array_spec_raw = gp.ArraySpec(
roi=self.roi, voxel_size=self.voxel_size, dtype="uint8", interpolatable=True
)
self.array_spec_labels = gp.ArraySpec(
roi=self.roi,
voxel_size=self.voxel_size,
dtype="uint64",
interpolatable=False,
)
def setup(self):
self.provides(self.raw, self.array_spec_raw)
self.provides(self.labels, self.array_spec_labels)
def provide(self, request):
outputs = gp.Batch()
# RAW
raw_spec = copy.deepcopy(self.array_spec_raw)
raw_spec.roi = request[self.raw].roi
raw_shape = request[self.raw].roi.shape / self.voxel_size
outputs[self.raw] = gp.Array(
np.random.randint(0, 256, raw_shape, dtype=raw_spec.dtype), raw_spec
)
# Unsqueeze
outputs[self.raw].data = np.expand_dims(outputs[self.raw].data, axis=0)
outputs[self.raw].data = np.expand_dims(outputs[self.raw].data, axis=0)
# LABELS
labels_spec = copy.deepcopy(self.array_spec_labels)
labels_spec.roi = request[self.labels].roi
labels_shape = request[self.labels].roi.shape / self.voxel_size
labels = np.ones(labels_shape, dtype=labels_spec.dtype)
outputs[self.labels] = gp.Array(labels, labels_spec)
# Unsqueeze
outputs[self.labels].data = np.expand_dims(outputs[self.labels].data, axis=0)
return outputs
class TestSqueeze(ProviderTest):
def test_squeeze(self):
raw = gp.ArrayKey("RAW")
labels = gp.ArrayKey("LABELS")
voxel_size = gp.Coordinate((50, 5, 5))
input_voxels = gp.Coordinate((5, 5, 5))
input_size = input_voxels * voxel_size
request = gp.BatchRequest()
request.add(raw, input_size)
request.add(labels, input_size)
pipeline = (
ExampleSourceSqueeze(voxel_size)
+ gp.Squeeze([raw], axis=1)
+ gp.Squeeze([raw, labels])
)
with gp.build(pipeline) as p:
batch = p.request_batch(request)
assert batch[raw].data.shape == input_voxels
assert batch[labels].data.shape == input_voxels
def test_squeeze_not_possible(self):
raw = gp.ArrayKey("RAW")
labels = gp.ArrayKey("LABELS")
voxel_size = gp.Coordinate((50, 5, 5))
input_voxels = gp.Coordinate((5, 5, 5))
input_size = input_voxels * voxel_size
request = gp.BatchRequest()
request.add(raw, input_size)
request.add(labels, input_size)
pipeline = ExampleSourceSqueeze(voxel_size) + gp.Squeeze([raw], axis=2)
with self.assertRaises(gp.PipelineRequestError):
with gp.build(pipeline) as p:
batch = p.request_batch(request)
| 3,211 | 29.884615 | 88 | py |
gunpowder | gunpowder-master/tests/cases/elastic_augment_points.py | import unittest
from gunpowder import (
BatchProvider,
Batch,
BatchRequest,
GraphSpec,
GraphKeys,
GraphKey,
Graph,
Node,
ArraySpec,
ArrayKeys,
ArrayKey,
Array,
Roi,
Coordinate,
ElasticAugment,
RasterizeGraph,
RasterizationSettings,
Snapshot,
build,
)
from .provider_test import ProviderTest
import numpy as np
import math
import time
import unittest
class PointTestSource3D(BatchProvider):
def setup(self):
self.points = [
Node(0, np.array([0, 0, 0])),
Node(1, np.array([0, 10, 0])),
Node(2, np.array([0, 20, 0])),
Node(3, np.array([0, 30, 0])),
Node(4, np.array([0, 40, 0])),
Node(5, np.array([0, 50, 0])),
]
self.provides(
GraphKeys.TEST_POINTS,
GraphSpec(roi=Roi((-100, -100, -100), (200, 200, 200))),
)
self.provides(
ArrayKeys.TEST_LABELS,
ArraySpec(
roi=Roi((-100, -100, -100), (200, 200, 200)),
voxel_size=Coordinate((4, 1, 1)),
interpolatable=False,
),
)
def point_to_voxel(self, array_roi, location):
# location is in world units, get it into voxels
location = location / self.spec[ArrayKeys.TEST_LABELS].voxel_size
# shift location relative to beginning of array roi
location -= array_roi.begin / self.spec[ArrayKeys.TEST_LABELS].voxel_size
return tuple(slice(int(l - 2), int(l + 3)) for l in location)
def provide(self, request):
batch = Batch()
roi_points = request[GraphKeys.TEST_POINTS].roi
roi_array = request[ArrayKeys.TEST_LABELS].roi
roi_voxel = roi_array // self.spec[ArrayKeys.TEST_LABELS].voxel_size
data = np.zeros(roi_voxel.shape, dtype=np.uint32)
data[:, ::2] = 100
for node in self.points:
loc = self.point_to_voxel(roi_array, node.location)
data[loc] = node.id
spec = self.spec[ArrayKeys.TEST_LABELS].copy()
spec.roi = roi_array
batch.arrays[ArrayKeys.TEST_LABELS] = Array(data, spec=spec)
points = []
for node in self.points:
if roi_points.contains(node.location):
points.append(node)
batch.graphs[GraphKeys.TEST_POINTS] = Graph(
points, [], GraphSpec(roi=roi_points)
)
return batch
class DensePointTestSource3D(BatchProvider):
def setup(self):
self.points = [
Node(i, np.array([(i // 100) % 10 * 4, (i // 10) % 10 * 4, i % 10 * 4]))
for i in range(1000)
]
self.provides(
GraphKeys.TEST_POINTS,
GraphSpec(roi=Roi((-40, -40, -40), (120, 120, 120))),
)
self.provides(
ArrayKeys.TEST_LABELS,
ArraySpec(
roi=Roi((-40, -40, -40), (120, 120, 120)),
voxel_size=Coordinate((4, 1, 1)),
interpolatable=False,
),
)
def point_to_voxel(self, array_roi, location):
# location is in world units, get it into voxels
location = location / self.spec[ArrayKeys.TEST_LABELS].voxel_size
# shift location relative to beginning of array roi
location -= array_roi.begin / self.spec[ArrayKeys.TEST_LABELS].voxel_size
return tuple(slice(int(l - 2), int(l + 3)) for l in location)
def provide(self, request):
batch = Batch()
roi_points = request[GraphKeys.TEST_POINTS].roi
roi_array = request[ArrayKeys.TEST_LABELS].roi
roi_voxel = roi_array // self.spec[ArrayKeys.TEST_LABELS].voxel_size
data = np.zeros(roi_voxel.shape, dtype=np.uint32)
data[:, ::2] = 100
for node in self.points:
loc = self.point_to_voxel(roi_array, node.location)
data[loc] = node.id
spec = self.spec[ArrayKeys.TEST_LABELS].copy()
spec.roi = roi_array
batch.arrays[ArrayKeys.TEST_LABELS] = Array(data, spec=spec)
points = []
for point in self.points:
if roi_points.contains(point.location):
points.append(point)
batch[GraphKeys.TEST_POINTS] = Graph(points, [], GraphSpec(roi=roi_points))
return batch
class TestElasticAugment(ProviderTest):
def test_3d_basics(self):
test_labels = ArrayKey("TEST_LABELS")
test_points = GraphKey("TEST_POINTS")
test_raster = ArrayKey("TEST_RASTER")
pipeline = (
PointTestSource3D()
+ ElasticAugment(
[10, 10, 10],
[0.1, 0.1, 0.1],
# [0, 0, 0], # no jitter
[0, 2.0 * math.pi],
)
+ RasterizeGraph(
test_points,
test_raster,
settings=RasterizationSettings(radius=2, mode="peak"),
)
+ Snapshot(
{test_labels: "volumes/labels", test_raster: "volumes/raster"},
dataset_dtypes={test_raster: np.float32},
output_dir=self.path_to(),
output_filename="elastic_augment_test{id}-{iteration}.hdf",
)
)
for _ in range(5):
with build(pipeline):
request_roi = Roi((-20, -20, -20), (40, 40, 40))
request = BatchRequest()
request[test_labels] = ArraySpec(roi=request_roi)
request[test_points] = GraphSpec(roi=request_roi)
request[test_raster] = ArraySpec(roi=request_roi)
batch = pipeline.request_batch(request)
labels = batch[test_labels]
points = batch[test_points]
# the point at (0, 0, 0) should not have moved
self.assertTrue(points.contains(0))
labels_data_roi = (
labels.spec.roi - labels.spec.roi.begin
) / labels.spec.voxel_size
# points should have moved together with the voxels
for point in points.nodes:
loc = point.location - labels.spec.roi.begin
loc = loc / labels.spec.voxel_size
loc = Coordinate(int(round(x)) for x in loc)
if labels_data_roi.contains(loc):
self.assertEqual(labels.data[loc], point.id)
def test_random_seed(self):
test_labels = ArrayKey("TEST_LABELS")
test_points = GraphKey("TEST_POINTS")
test_raster = ArrayKey("TEST_RASTER")
pipeline = (
PointTestSource3D()
+ ElasticAugment(
[10, 10, 10],
[0.1, 0.1, 0.1],
# [0, 0, 0], # no jitter
[0, 2.0 * math.pi],
)
+ # rotate randomly
# [math.pi/4, math.pi/4]) + # rotate by 45 deg
# [0, 0]) + # no rotation
RasterizeGraph(
test_points,
test_raster,
settings=RasterizationSettings(radius=2, mode="peak"),
)
+ Snapshot(
{test_labels: "volumes/labels", test_raster: "volumes/raster"},
dataset_dtypes={test_raster: np.float32},
output_dir=self.path_to(),
output_filename="elastic_augment_test{id}-{iteration}.hdf",
)
)
batch_points = []
for _ in range(5):
with build(pipeline):
request_roi = Roi((-20, -20, -20), (40, 40, 40))
request = BatchRequest(random_seed=10)
request[test_labels] = ArraySpec(roi=request_roi)
request[test_points] = GraphSpec(roi=request_roi)
request[test_raster] = ArraySpec(roi=request_roi)
batch = pipeline.request_batch(request)
labels = batch[test_labels]
points = batch[test_points]
batch_points.append(
tuple((node.id, tuple(node.location)) for node in points.nodes)
)
# the point at (0, 0, 0) should not have moved
data = {node.id: node for node in points.nodes}
self.assertTrue(0 in data)
labels_data_roi = (
labels.spec.roi - labels.spec.roi.begin
) / labels.spec.voxel_size
# points should have moved together with the voxels
for node in points.nodes:
loc = node.location - labels.spec.roi.begin
loc = loc / labels.spec.voxel_size
loc = Coordinate(int(round(x)) for x in loc)
if labels_data_roi.contains(loc):
self.assertEqual(labels.data[loc], node.id)
for point_data in zip(*batch_points):
self.assertEqual(len(set(point_data)), 1)
def test_fast_transform(self):
test_labels = ArrayKey("TEST_LABELS")
test_points = GraphKey("TEST_POINTS")
test_raster = ArrayKey("TEST_RASTER")
fast_pipeline = (
DensePointTestSource3D()
+ ElasticAugment(
[10, 10, 10],
[0.1, 0.1, 0.1],
[0, 2.0 * math.pi],
use_fast_points_transform=True,
)
+ RasterizeGraph(
test_points,
test_raster,
settings=RasterizationSettings(radius=2, mode="peak"),
)
)
reference_pipeline = (
DensePointTestSource3D()
+ ElasticAugment([10, 10, 10], [0.1, 0.1, 0.1], [0, 2.0 * math.pi])
+ RasterizeGraph(
test_points,
test_raster,
settings=RasterizationSettings(radius=2, mode="peak"),
)
)
timings = []
for i in range(5):
points_fast = {}
points_reference = {}
# seed chosen specifically to make this test fail
seed = i + 15
with build(fast_pipeline):
request_roi = Roi((0, 0, 0), (40, 40, 40))
request = BatchRequest(random_seed=seed)
request[test_labels] = ArraySpec(roi=request_roi)
request[test_points] = GraphSpec(roi=request_roi)
request[test_raster] = ArraySpec(roi=request_roi)
t1_fast = time.time()
batch = fast_pipeline.request_batch(request)
t2_fast = time.time()
points_fast = {node.id: node for node in batch[test_points].nodes}
with build(reference_pipeline):
request_roi = Roi((0, 0, 0), (40, 40, 40))
request = BatchRequest(random_seed=seed)
request[test_labels] = ArraySpec(roi=request_roi)
request[test_points] = GraphSpec(roi=request_roi)
request[test_raster] = ArraySpec(roi=request_roi)
t1_ref = time.time()
batch = reference_pipeline.request_batch(request)
t2_ref = time.time()
points_reference = {node.id: node for node in batch[test_points].nodes}
timings.append((t2_fast - t1_fast, t2_ref - t1_ref))
diffs = []
missing = 0
for point_id, point in points_reference.items():
if point_id not in points_fast:
missing += 1
continue
diff = point.location - points_fast[point_id].location
diffs.append(tuple(diff))
self.assertAlmostEqual(
np.linalg.norm(diff),
0,
delta=1,
msg="fast transform returned location {} but expected {} for point {}".format(
point.location, points_fast[point_id].location, point_id
),
)
t_fast, t_ref = [np.mean(x) for x in zip(*timings)]
self.assertLess(t_fast, t_ref)
self.assertEqual(missing, 0)
def test_fast_transform_no_recompute(self):
test_labels = ArrayKey("TEST_LABELS")
test_points = GraphKey("TEST_POINTS")
test_raster = ArrayKey("TEST_RASTER")
fast_pipeline = (
DensePointTestSource3D()
+ ElasticAugment(
[10, 10, 10],
[0.1, 0.1, 0.1],
[0, 2.0 * math.pi],
use_fast_points_transform=True,
recompute_missing_points=False,
)
+ RasterizeGraph(
test_points,
test_raster,
settings=RasterizationSettings(radius=2, mode="peak"),
)
)
reference_pipeline = (
DensePointTestSource3D()
+ ElasticAugment([10, 10, 10], [0.1, 0.1, 0.1], [0, 2.0 * math.pi])
+ RasterizeGraph(
test_points,
test_raster,
settings=RasterizationSettings(radius=2, mode="peak"),
)
)
timings = []
for i in range(5):
points_fast = {}
points_reference = {}
# seed chosen specifically to make this test fail
seed = i + 15
with build(fast_pipeline):
request_roi = Roi((0, 0, 0), (40, 40, 40))
request = BatchRequest(random_seed=seed)
request[test_labels] = ArraySpec(roi=request_roi)
request[test_points] = GraphSpec(roi=request_roi)
request[test_raster] = ArraySpec(roi=request_roi)
t1_fast = time.time()
batch = fast_pipeline.request_batch(request)
t2_fast = time.time()
points_fast = {node.id: node for node in batch[test_points].nodes}
with build(reference_pipeline):
request_roi = Roi((0, 0, 0), (40, 40, 40))
request = BatchRequest(random_seed=seed)
request[test_labels] = ArraySpec(roi=request_roi)
request[test_points] = GraphSpec(roi=request_roi)
request[test_raster] = ArraySpec(roi=request_roi)
t1_ref = time.time()
batch = reference_pipeline.request_batch(request)
t2_ref = time.time()
points_reference = {node.id: node for node in batch[test_points].nodes}
timings.append((t2_fast - t1_fast, t2_ref - t1_ref))
diffs = []
missing = 0
for point_id, point in points_reference.items():
if point_id not in points_fast:
missing += 1
continue
diff = point.location - points_fast[point_id].location
diffs.append(tuple(diff))
self.assertAlmostEqual(
np.linalg.norm(diff),
0,
delta=1,
msg="fast transform returned location {} but expected {} for point {}".format(
point.location, points_fast[point_id].location, point_id
),
)
t_fast, t_ref = [np.mean(x) for x in zip(*timings)]
self.assertLess(t_fast, t_ref)
self.assertGreater(missing, 0)
| 15,464 | 34.470183 | 98 | py |
gunpowder | gunpowder-master/tests/cases/intensity_augment.py | from .provider_test import ProviderTest
from gunpowder import IntensityAugment, ArrayKeys, build, Normalize
import numpy as np
class TestIntensityAugment(ProviderTest):
def test_shift(self):
pipeline = (
self.test_source
+ Normalize(ArrayKeys.RAW)
+ IntensityAugment(
ArrayKeys.RAW, scale_min=0, scale_max=0, shift_min=0.5, shift_max=0.5
)
)
with build(pipeline):
for i in range(100):
batch = pipeline.request_batch(self.test_request)
x = batch.arrays[ArrayKeys.RAW].data
assert np.isclose(x.min(), 0.5)
assert np.isclose(x.max(), 0.5)
| 709 | 28.583333 | 85 | py |
gunpowder | gunpowder-master/tests/cases/elastic_augment.py | from gunpowder import (
BatchProvider,
GraphSpec,
Roi,
Coordinate,
ArrayKeys,
ArraySpec,
Batch,
Array,
ArrayKey,
GraphKey,
BatchRequest,
RasterizationSettings,
RasterizeGraph,
Snapshot,
ElasticAugment,
build,
)
from gunpowder.graph import GraphKeys, Graph, Node
from .provider_test import ProviderTest
import numpy as np
import math
class GraphTestSource3D(BatchProvider):
def setup(self):
self.nodes = [
Node(id=0, location=np.array([0, 0, 0])),
Node(id=1, location=np.array([0, 10, 0])),
Node(id=2, location=np.array([0, 20, 0])),
Node(id=3, location=np.array([0, 30, 0])),
Node(id=4, location=np.array([0, 40, 0])),
Node(id=5, location=np.array([0, 50, 0])),
]
self.provides(
GraphKeys.TEST_GRAPH,
GraphSpec(roi=Roi((-100, -100, -100), (200, 200, 200))),
)
self.provides(
ArrayKeys.TEST_LABELS,
ArraySpec(
roi=Roi((-100, -100, -100), (200, 200, 200)),
voxel_size=Coordinate((4, 1, 1)),
interpolatable=False,
),
)
def node_to_voxel(self, array_roi, location):
# location is in world units, get it into voxels
location = location / self.spec[ArrayKeys.TEST_LABELS].voxel_size
# shift location relative to beginning of array roi
location -= array_roi.begin / self.spec[ArrayKeys.TEST_LABELS].voxel_size
return tuple(slice(int(l - 2), int(l + 3)) for l in location)
def provide(self, request):
batch = Batch()
roi_graph = request[GraphKeys.TEST_GRAPH].roi
roi_array = request[ArrayKeys.TEST_LABELS].roi
roi_voxel = roi_array // self.spec[ArrayKeys.TEST_LABELS].voxel_size
data = np.zeros(roi_voxel.shape, dtype=np.uint32)
data[:, ::2] = 100
for node in self.nodes:
loc = self.node_to_voxel(roi_array, node.location)
data[loc] = node.id
spec = self.spec[ArrayKeys.TEST_LABELS].copy()
spec.roi = roi_array
batch.arrays[ArrayKeys.TEST_LABELS] = Array(data, spec=spec)
nodes = []
for node in self.nodes:
if roi_graph.contains(node.location):
nodes.append(node)
batch.graphs[GraphKeys.TEST_GRAPH] = Graph(
nodes=nodes, edges=[], spec=GraphSpec(roi=roi_graph)
)
return batch
class TestElasticAugment(ProviderTest):
def test_3d_basics(self):
test_labels = ArrayKey("TEST_LABELS")
test_graph = GraphKey("TEST_GRAPH")
test_raster = ArrayKey("TEST_RASTER")
pipeline = (
GraphTestSource3D()
+ ElasticAugment(
[10, 10, 10],
[0.1, 0.1, 0.1],
# [0, 0, 0], # no jitter
[0, 2.0 * math.pi],
)
+ # rotate randomly
# [math.pi/4, math.pi/4]) + # rotate by 45 deg
# [0, 0]) + # no rotation
RasterizeGraph(
test_graph,
test_raster,
settings=RasterizationSettings(radius=2, mode="peak"),
)
+ Snapshot(
{test_labels: "volumes/labels", test_raster: "volumes/raster"},
dataset_dtypes={test_raster: np.float32},
output_dir=self.path_to(),
output_filename="elastic_augment_test{id}-{iteration}.hdf",
)
)
for _ in range(5):
with build(pipeline):
request_roi = Roi((-20, -20, -20), (40, 40, 40))
request = BatchRequest()
request[test_labels] = ArraySpec(roi=request_roi)
request[test_graph] = GraphSpec(roi=request_roi)
request[test_raster] = ArraySpec(roi=request_roi)
batch = pipeline.request_batch(request)
labels = batch[test_labels]
graph = batch[test_graph]
# the node at (0, 0, 0) should not have moved
# The node at (0,0,0) seems to have moved
# self.assertIn(
# Node(id=0, location=np.array([0, 0, 0])), list(graph.nodes)
# )
self.assertIn(0, [v.id for v in graph.nodes])
labels_data_roi = (
labels.spec.roi - labels.spec.roi.begin
) / labels.spec.voxel_size
# graph should have moved together with the voxels
for node in graph.nodes:
loc = node.location - labels.spec.roi.begin
loc = loc / labels.spec.voxel_size
loc = Coordinate(int(round(x)) for x in loc)
if labels_data_roi.contains(loc):
self.assertEqual(labels.data[loc], node.id)
| 4,944 | 31.966667 | 81 | py |
gunpowder | gunpowder-master/tests/cases/astype.py | from .provider_test import ProviderTest
from gunpowder import *
import numpy as np
class AsTypeTestSource(BatchProvider):
def setup(self):
self.provides(
ArrayKeys.RAW,
ArraySpec(roi=Roi((0, 0, 0), (1000, 1000, 1000)), voxel_size=(4, 4, 4)),
)
self.provides(
ArrayKeys.GT_LABELS,
ArraySpec(roi=Roi((0, 0, 0), (1000, 1000, 1000)), voxel_size=(4, 4, 4)),
)
def provide(self, request):
batch = Batch()
# have the pixels encode their position
for array_key, spec in request.array_specs.items():
roi = spec.roi
data_roi = roi / 4
# the z,y,x coordinates of the ROI
meshgrids = np.meshgrid(
range(data_roi.get_begin()[0], data_roi.get_end()[0]),
range(data_roi.get_begin()[1], data_roi.get_end()[1]),
range(data_roi.get_begin()[2], data_roi.get_end()[2]),
indexing="ij",
)
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
spec = self.spec[array_key].copy()
spec.roi = roi
batch.arrays[array_key] = Array(data, spec)
return batch
class TestAsType(ProviderTest):
def test_output(self):
ArrayKey("RAW_TYPECAST")
ArrayKey("GT_LABELS_TYPECAST")
request = BatchRequest()
request.add(ArrayKeys.RAW, (200, 200, 200))
request.add(ArrayKeys.RAW_TYPECAST, (120, 120, 120))
request.add(ArrayKeys.GT_LABELS, (200, 200, 200))
request.add(ArrayKeys.GT_LABELS_TYPECAST, (200, 200, 200))
pipeline = (
AsTypeTestSource()
+ AsType(ArrayKeys.RAW, np.float16, ArrayKeys.RAW_TYPECAST)
+ AsType(ArrayKeys.GT_LABELS, np.int16, ArrayKeys.GT_LABELS_TYPECAST)
)
with build(pipeline):
batch = pipeline.request_batch(request)
for array_key, array in batch.arrays.items():
# assert that pixels encode their position for supposedly unaltered
# arrays
if array_key in [ArrayKeys.RAW, ArrayKeys.GT_LABELS]:
# the z,y,x coordinates of the ROI
roi = array.spec.roi / 4
meshgrids = np.meshgrid(
range(roi.get_begin()[0], roi.get_end()[0]),
range(roi.get_begin()[1], roi.get_end()[1]),
range(roi.get_begin()[2], roi.get_end()[2]),
indexing="ij",
)
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
self.assertTrue(np.array_equal(array.data, data), str(array_key))
elif array_key == ArrayKeys.RAW_TYPECAST:
self.assertTrue(
array.data.dtype == np.float16,
f"RAW_TYPECAST dtype: {array.data.dtype} does not equal expected: np.float16",
)
self.assertTrue(
int(array.data[1, 11, 1]) == 43,
f"RAW_TYPECAST[1,11,1]: int({array.data[1,11,1]}) does not equal expected: 43",
)
elif array_key == ArrayKeys.GT_LABELS_TYPECAST:
self.assertTrue(
array.data.dtype == np.int16,
f"GT_LABELS_TYPECAST dtype: {array.data.dtype} does not equal expected: np.int16",
)
self.assertTrue(
int(array.data[1, 11, 1]) == 13,
f"GT_LABELS_TYPECAST[1,11,1]: int({array.data[1,11,1]}) does not equal expected: 13",
)
else:
self.assertTrue(False, "unexpected array type")
| 3,700 | 36.01 | 105 | py |
gunpowder | gunpowder-master/tests/cases/graph_keys.py | from __future__ import print_function
from gunpowder import GraphKey, GraphKeys
import unittest
class TestGraphKeys(unittest.TestCase):
def test_register(self):
GraphKey("TEST_GRAPH")
self.assertTrue(GraphKeys.TEST_GRAPH)
self.assertRaises(AttributeError, getattr, GraphKeys, "TEST_GRAPH_2")
| 323 | 26 | 77 | py |
gunpowder | gunpowder-master/tests/cases/resample.py | from .helper_sources import ArraySource
from gunpowder import (
ArrayKey,
ArraySpec,
Roi,
Coordinate,
Batch,
BatchRequest,
Array,
MergeProvider,
Resample,
build,
)
import numpy as np
def test_up_and_downsample():
meshgrids = np.meshgrid(range(0, 250), range(0, 250), range(0, 250), indexing="ij")
array = meshgrids[0] + meshgrids[1] + meshgrids[2]
array = np.stack([array, array, array], axis=0)
raw_key = ArrayKey("RAW")
raw_resampled_key = ArrayKey("RAW_RESAMPLED")
gt_key = ArrayKey("GT")
gt_resampled_key = ArrayKey("GT_LABELS_RESAMPLED")
raw_source = ArraySource(
raw_key,
Array(
array, ArraySpec(Roi((0, 0, 0), (1000, 1000, 1000)), Coordinate(4, 4, 4))
),
)
gt_source = ArraySource(
gt_key,
Array(
array, ArraySpec(Roi((0, 0, 0), (1000, 1000, 1000)), Coordinate(4, 4, 4))
),
)
request = BatchRequest()
request.add(raw_key, (200, 200, 200))
request.add(raw_resampled_key, (120, 120, 120))
request.add(gt_key, (200, 200, 200))
request.add(gt_resampled_key, (192, 192, 192))
pipeline = (
(raw_source, gt_source)
+ MergeProvider()
+ Resample(raw_key, Coordinate((8, 8, 8)), raw_resampled_key)
+ Resample(
gt_key, Coordinate((2, 2, 2)), gt_resampled_key, interp_order=0
) # Test upsampling, without interpolation
)
with build(pipeline):
batch = pipeline.request_batch(request)
for array_key, array in batch.arrays.items():
# assert that pixels encode their position for supposedly unaltered
# arrays
if array_key in [raw_key, gt_key]:
# the z,y,x coordinates of the ROI
roi = array.spec.roi / 4
meshgrids = np.meshgrid(
range(roi.get_begin()[0], roi.get_end()[0]),
range(roi.get_begin()[1], roi.get_end()[1]),
range(roi.get_begin()[2], roi.get_end()[2]),
indexing="ij",
)
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
data = np.stack([data, data, data], axis=0)
assert np.array_equal(array.data, data), str(array_key)
elif array_key == raw_resampled_key:
# Note: First assert averages over the voxels in the raw roi: (40:48, 40:48, 40:48), values of [30,31,31,32,31,32,32,33], the average of which is 31.5. Casting to an integer, in this case, rounds down, resulting in 31.
assert (
array.data[0, 0, 0, 0] == 31
), f"RAW_RESAMPLED[0,0,0]: {array.data[0,0,0]} does not equal expected: 31"
assert (
array.data[0, 1, 0, 0] == 33
), f"RAW_RESAMPLED[1,0,0]: {array.data[1,0,0]} does not equal expected: 33"
elif array_key == gt_resampled_key:
# Note: GT_LABELS_RESAMPLED is shifted a full pixel in from each side of original array to pad upsampling
assert (
array.data[0, 0, 0, 0] == 3
), f"GT_LABELS_RESAMPLED[0,0,0]: {array.data[0,0,0]} does not equal expected: 0"
assert (
array.data[0, 1, 0, 0] == 3
), f"GT_LABELS_RESAMPLED[1,0,0]: {array.data[1,0,0]} does not equal expected: 0"
assert (
array.data[0, 2, 0, 0] == 4
), f"GT_LABELS_RESAMPLED[2,0,0]: {array.data[2,0,0]} does not equal expected: 1"
assert (
array.data[0, 3, 0, 0] == 4
), f"GT_LABELS_RESAMPLED[3,0,0]: {array.data[3,0,0]} does not equal expected: 1"
else:
assert False, "unexpected array type"
| 3,703 | 35.313725 | 230 | py |
gunpowder | gunpowder-master/tests/cases/noise_augment.py | from .provider_test import ProviderTest
from gunpowder import IntensityAugment, ArrayKeys, build, Normalize, NoiseAugment
import numpy as np
class TestIntensityAugment(ProviderTest):
def test_shift(self):
pipeline = (
self.test_source
+ Normalize(ArrayKeys.RAW)
+ IntensityAugment(
ArrayKeys.RAW, scale_min=0, scale_max=0, shift_min=0.5, shift_max=0.5
)
+ NoiseAugment(ArrayKeys.RAW, clip=True)
)
with build(pipeline):
for i in range(100):
batch = pipeline.request_batch(self.test_request)
x = batch.arrays[ArrayKeys.RAW].data
assert x.min() < 0.5
assert x.min() >= 0
assert x.max() > 0.5
assert x.max() <= 1
| 826 | 29.62963 | 85 | py |
gunpowder | gunpowder-master/tests/cases/zarr_read_write.py | from .helper_sources import ArraySource
from gunpowder import *
from gunpowder.ext import zarr, ZarrFile, NoSuchModule
import pytest
import numpy as np
@pytest.mark.skipif(isinstance(zarr, NoSuchModule), reason="zarr is not installed")
@pytest.mark.parametrize(
"zarr_store_func",
[
"tmp_path / 'zarr_write_test.zarr'",
"tmp_path / 'zarr_write_test.n5'",
"tmp_path / 'zarr_write_test.hdf'",
"zarr.DirectoryStore(f'{tmp_path}/array.zarr')",
"zarr.storage.TempStore(dir=tmp_path)",
],
)
def test_read_write(tmp_path, zarr_store_func):
zarr_store = eval(zarr_store_func)
raw_key = ArrayKey("RAW")
gt_key = ArrayKey("GT")
roi_raw = Roi((20000, 2000, 2000), (2000, 200, 200))
roi_gt = Roi((20100, 2010, 2010), (1800, 180, 180))
voxel_size = Coordinate(20, 2, 2)
raw_data = np.array(
np.meshgrid(
range((roi_raw / voxel_size).begin[0], (roi_raw / voxel_size).end[0]),
range((roi_raw / voxel_size).begin[1], (roi_raw / voxel_size).end[1]),
range((roi_raw / voxel_size).begin[2], (roi_raw / voxel_size).end[2]),
indexing="ij",
)
)
gt_data = np.array(
np.meshgrid(
range((roi_gt / voxel_size).begin[0], (roi_gt / voxel_size).end[0]),
range((roi_gt / voxel_size).begin[1], (roi_gt / voxel_size).end[1]),
range((roi_gt / voxel_size).begin[2], (roi_gt / voxel_size).end[2]),
indexing="ij",
)
)
raw_array = Array(raw_data, ArraySpec(roi_raw, voxel_size))
gt_array = Array(gt_data, ArraySpec(roi_gt, voxel_size))
source = (
ArraySource(raw_key, raw_array),
ArraySource(gt_key, gt_array),
) + MergeProvider()
chunk_request = BatchRequest()
chunk_request.add(raw_key, (800, 80, 38))
chunk_request.add(gt_key, (600, 60, 18))
pipeline = (
source
+ ZarrWrite({raw_key: "arrays/raw"}, store=zarr_store)
+ Scan(chunk_request)
)
with build(pipeline):
raw_spec = pipeline.spec[raw_key]
labels_spec = pipeline.spec[gt_key]
full_request = BatchRequest({raw_key: raw_spec, gt_key: labels_spec})
batch = pipeline.request_batch(full_request)
# assert that stored HDF dataset equals batch array
read_pipeline = ZarrSource(zarr_store, datasets={raw_key: "arrays/raw"})
full_request = BatchRequest({raw_key: full_request[raw_key]})
with build(read_pipeline):
full_batch = read_pipeline.request_batch(full_request)
assert (
raw_data.shape[-3:]
== full_batch[raw_key].spec.roi.shape // full_batch[raw_key].spec.voxel_size
)
assert roi_raw.offset == full_batch[raw_key].spec.roi.offset
assert voxel_size == full_batch[raw_key].spec.voxel_size
assert (raw_data == batch.arrays[raw_key].data).all()
def test_old_api(tmp_path):
raw_key = ArrayKey("RAW")
ZarrWrite({raw_key: "arrays/raw"}, tmp_path, "data.zarr")
ZarrWrite({raw_key: "arrays/raw"}, output_dir=tmp_path, output_filename="data.zarr")
ZarrSource(filename=f"{tmp_path}/data.zarr", datasets={raw_key: "arrays/raw"})
ZarrSource(datasets=f"{tmp_path}/data.zarr", filename={raw_key: "arrays/raw"})
ZarrSource(f"{tmp_path}/data.zarr", {raw_key: "arrays/raw"})
| 3,343 | 33.122449 | 88 | py |
gunpowder | gunpowder-master/tests/cases/placeholder_requests.py | from gunpowder import (
PipelineRequestError,
BatchProvider,
BatchRequest,
Batch,
Roi,
Coordinate,
GraphSpec,
GraphKey,
ArrayKeys,
ArrayKey,
ArraySpec,
Array,
ElasticAugment,
RandomLocation,
Snapshot,
build,
)
from gunpowder.graph import Graph, GraphKeys, Node
from .provider_test import ProviderTest
import pytest
import numpy as np
import math
import copy
class PointTestSource3D(BatchProvider):
def setup(self):
self.points = [
Node(0, np.array([0, 10, 0])),
Node(1, np.array([0, 30, 0])),
Node(2, np.array([0, 50, 0])),
Node(3, np.array([0, 70, 0])),
Node(4, np.array([0, 90, 0])),
]
self.provides(
GraphKeys.TEST_POINTS,
GraphSpec(roi=Roi((-100, -100, -100), (300, 300, 300))),
)
self.provides(
ArrayKeys.TEST_LABELS,
ArraySpec(
roi=Roi((-100, -100, -100), (300, 300, 300)),
voxel_size=Coordinate((4, 1, 1)),
interpolatable=False,
),
)
def point_to_voxel(self, array_roi, location):
# location is in world units, get it into voxels
location = location / self.spec[ArrayKeys.TEST_LABELS].voxel_size
# shift location relative to beginning of array roi
location -= array_roi.begin / self.spec[ArrayKeys.TEST_LABELS].voxel_size
return tuple(slice(int(l - 2), int(l + 3)) for l in location)
def provide(self, request):
batch = Batch()
if GraphKeys.TEST_POINTS in request:
roi_points = request[GraphKeys.TEST_POINTS].roi
contained_points = []
for point in self.points:
if roi_points.contains(point.location):
contained_points.append(copy.deepcopy(point))
batch[GraphKeys.TEST_POINTS] = Graph(
contained_points, [], GraphSpec(roi=roi_points)
)
if ArrayKeys.TEST_LABELS in request:
roi_array = request[ArrayKeys.TEST_LABELS].roi
roi_voxel = roi_array // self.spec[ArrayKeys.TEST_LABELS].voxel_size
data = np.zeros(roi_voxel.shape, dtype=np.uint32)
data[:, ::2] = 100
for point in self.points:
loc = self.point_to_voxel(roi_array, point.location)
data[loc] = point.id
spec = self.spec[ArrayKeys.TEST_LABELS].copy()
spec.roi = roi_array
batch.arrays[ArrayKeys.TEST_LABELS] = Array(data, spec=spec)
return batch
class TestPlaceholderRequest(ProviderTest):
def test_without_placeholder(self):
test_labels = ArrayKey("TEST_LABELS")
test_points = GraphKey("TEST_POINTS")
pipeline = (
PointTestSource3D()
+ RandomLocation(ensure_nonempty=test_points)
+ ElasticAugment([10, 10, 10], [0.1, 0.1, 0.1], [0, 2.0 * math.pi])
+ Snapshot(
{test_labels: "volumes/labels"},
output_dir=self.path_to(),
output_filename="elastic_augment_test{id}-{iteration}.hdf",
)
)
with build(pipeline):
for i in range(2):
request_size = Coordinate((40, 40, 40))
request_a = BatchRequest(random_seed=i)
request_a.add(test_points, request_size)
request_b = BatchRequest(random_seed=i)
request_b.add(test_points, request_size)
request_b.add(test_labels, request_size)
# No array to provide a voxel size to ElasticAugment
with pytest.raises(PipelineRequestError):
pipeline.request_batch(request_a)
batch_b = pipeline.request_batch(request_b)
self.assertIn(test_labels, batch_b)
def test_placeholder(self):
test_labels = ArrayKey("TEST_LABELS")
test_points = GraphKey("TEST_POINTS")
pipeline = (
PointTestSource3D()
+ RandomLocation(ensure_nonempty=test_points)
+ ElasticAugment([10, 10, 10], [0.1, 0.1, 0.1], [0, 2.0 * math.pi])
+ Snapshot(
{test_labels: "volumes/labels"},
output_dir=self.path_to(),
output_filename="elastic_augment_test{id}-{iteration}.hdf",
)
)
with build(pipeline):
for i in range(2):
request_size = Coordinate((40, 40, 40))
request_a = BatchRequest(random_seed=i)
request_a.add(test_points, request_size)
request_a.add(test_labels, request_size, placeholder=True)
request_b = BatchRequest(random_seed=i)
request_b.add(test_points, request_size)
request_b.add(test_labels, request_size)
batch_a = pipeline.request_batch(request_a)
batch_b = pipeline.request_batch(request_b)
points_a = batch_a[test_points].nodes
points_b = batch_b[test_points].nodes
for a, b in zip(points_a, points_b):
assert all(np.isclose(a.location, b.location))
| 5,273 | 31.355828 | 81 | py |
gunpowder | gunpowder-master/tests/cases/iterate_locations.py | from .provider_test import ProviderTest
from gunpowder import (
ArrayKey,
ArrayKeys,
ArraySpec,
BatchRequest,
Node,
Edge,
GraphSpec,
GraphKey,
GraphKeys,
GraphSource,
IterateLocations,
build,
Roi,
Coordinate,
)
import numpy as np
import networkx as nx
class DummyDaisyGraphProvider:
"""Dummy graph provider mimicing daisy.SharedGraphProvider.
Must have directed attribute, __getitem__(roi) that returns networkx
graph, and position_attribute.
"""
def __init__(self, nodes, edges, directed=False):
self.nodes = nodes
self.edges = edges
self.directed = directed
self.position_attribute = "location"
def __getitem__(self, roi):
if self.directed:
graph = nx.DiGraph()
else:
graph = nx.Graph()
for node in self.nodes:
if roi.contains(node.location):
graph.add_node(node.id, location=node.location)
for edge in self.edges:
if edge.u in graph.nodes:
graph.add_edge(edge.u, edge.v)
return graph
class TestIterateLocation(ProviderTest):
@property
def edges(self):
return [Edge(0, 1), Edge(1, 2), Edge(2, 3), Edge(3, 4), Edge(4, 0)]
@property
def nodes(self):
return [
Node(0, location=np.array([0, 0, 0], dtype=self.spec.dtype)),
Node(1, location=np.array([1, 1, 1], dtype=self.spec.dtype)),
Node(2, location=np.array([2, 2, 2], dtype=self.spec.dtype)),
Node(3, location=np.array([3, 3, 3], dtype=self.spec.dtype)),
Node(4, location=np.array([4, 4, 4], dtype=self.spec.dtype)),
]
@property
def spec(self):
return GraphSpec(
roi=Roi(Coordinate([0, 0, 0]), Coordinate([5, 5, 5])), directed=True
)
def test_output(self):
GraphKey("TEST_GRAPH")
ArrayKey("NODE_ID")
dummy_provider = DummyDaisyGraphProvider(self.nodes, self.edges, directed=True)
graph_source = GraphSource(dummy_provider, GraphKeys.TEST_GRAPH, self.spec)
iterate_locations = IterateLocations(
GraphKeys.TEST_GRAPH, node_id=ArrayKeys.NODE_ID
)
pipeline = graph_source + iterate_locations
request = BatchRequest(
{
GraphKeys.TEST_GRAPH: GraphSpec(roi=Roi((0, 0, 0), (1, 1, 1))),
ArrayKeys.NODE_ID: ArraySpec(nonspatial=True),
}
)
node_ids = []
seen_vertices = []
expected_vertices = self.nodes
with build(pipeline):
for _ in range(len(self.nodes)):
batch = pipeline.request_batch(request)
node_ids.extend(batch[ArrayKeys.NODE_ID].data)
graph = batch[GraphKeys.TEST_GRAPH]
self.assertEqual(graph.num_vertices(), 1)
node = next(graph.nodes)
seen_vertices.append(node)
self.assertCountEqual(
[v.id for v in expected_vertices],
node_ids,
)
for vertex in seen_vertices:
# locations are shifted to lie in roi (so, (0, 0, 0))
assert all(np.isclose(np.array([0.0, 0.0, 0.0]), vertex.location))
| 3,300 | 30.740385 | 87 | py |
gunpowder | gunpowder-master/tests/cases/crop.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
ArrayKeys,
ArraySpec,
Roi,
GraphKey,
GraphKeys,
GraphSpec,
Crop,
build,
)
import logging
logger = logging.getLogger(__name__)
class ExampleSourceCrop(BatchProvider):
def setup(self):
self.provides(
ArrayKeys.RAW,
ArraySpec(roi=Roi((200, 20, 20), (1800, 180, 180)), voxel_size=(20, 2, 2)),
)
self.provides(
GraphKeys.PRESYN, GraphSpec(roi=Roi((200, 20, 20), (1800, 180, 180)))
)
def provide(self, request):
pass
class TestCrop(ProviderTest):
def test_output(self):
cropped_roi_raw = Roi((400, 40, 40), (1000, 100, 100))
cropped_roi_presyn = Roi((800, 80, 80), (800, 80, 80))
GraphKey("PRESYN")
pipeline = (
ExampleSourceCrop()
+ Crop(ArrayKeys.RAW, cropped_roi_raw)
+ Crop(GraphKeys.PRESYN, cropped_roi_presyn)
)
with build(pipeline):
self.assertTrue(pipeline.spec[ArrayKeys.RAW].roi == cropped_roi_raw)
self.assertTrue(pipeline.spec[GraphKeys.PRESYN].roi == cropped_roi_presyn)
pipeline = ExampleSourceCrop() + Crop(
ArrayKeys.RAW,
fraction_negative=(0.25, 0, 0),
fraction_positive=(0.25, 0, 0),
)
expected_roi_raw = Roi((650, 20, 20), (900, 180, 180))
with build(pipeline):
logger.info(pipeline.spec[ArrayKeys.RAW].roi)
logger.info(expected_roi_raw)
self.assertTrue(pipeline.spec[ArrayKeys.RAW].roi == expected_roi_raw)
| 1,648 | 26.032787 | 87 | py |
gunpowder | gunpowder-master/tests/cases/node_dependencies.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
BatchFilter,
BatchRequest,
Batch,
ArrayKeys,
ArraySpec,
ArrayKey,
Array,
Roi,
build,
)
import numpy as np
class NodeDependenciesTestSource(BatchProvider):
def setup(self):
self.provides(
ArrayKeys.A,
ArraySpec(roi=Roi((0, 0, 0), (1000, 1000, 1000)), voxel_size=(4, 4, 4)),
)
self.provides(
ArrayKeys.B,
ArraySpec(roi=Roi((0, 0, 0), (1000, 1000, 1000)), voxel_size=(4, 4, 4)),
)
def provide(self, request):
batch = Batch()
# have the pixels encode their position
for array_key, spec in request.array_specs.items():
roi = spec.roi
for d in range(3):
assert roi.begin[d] % 4 == 0, "roi %s does not align with voxels"
data_roi = roi / 4
# the z,y,x coordinates of the ROI
meshgrids = np.meshgrid(
range(data_roi.begin[0], data_roi.end[0]),
range(data_roi.begin[1], data_roi.end[1]),
range(data_roi.begin[2], data_roi.end[2]),
indexing="ij",
)
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
spec = self.spec[array_key].copy()
spec.roi = roi
batch.arrays[array_key] = Array(data, spec)
return batch
class NodeDependenciesTestNode(BatchFilter):
"""Creates C from B."""
def __init__(self):
self.context = (20, 20, 20)
def setup(self):
self.provides(ArrayKeys.C, self.spec[ArrayKeys.B])
def prepare(self, request):
assert ArrayKeys.C in request
dependencies = BatchRequest()
dependencies[ArrayKeys.B] = ArraySpec(
request[ArrayKeys.C].roi.grow(self.context, self.context)
)
return dependencies
def process(self, batch, request):
outputs = Batch()
# make sure a ROI is what we requested
b_roi = request[ArrayKeys.C].roi.grow(self.context, self.context)
assert batch[ArrayKeys.B].spec.roi == b_roi
# add C to batch
c = batch[ArrayKeys.B].crop(request[ArrayKeys.C].roi)
outputs[ArrayKeys.C] = c
return outputs
class TestNodeDependencies(ProviderTest):
def test_dependecies(self):
ArrayKey("A")
ArrayKey("B")
ArrayKey("C")
pipeline = NodeDependenciesTestSource()
pipeline += NodeDependenciesTestNode()
c_roi = Roi((100, 100, 100), (100, 100, 100))
# simple test, ask only for C
request = BatchRequest()
request[ArrayKeys.C] = ArraySpec(roi=c_roi)
with build(pipeline):
batch = pipeline.request_batch(request)
assert ArrayKeys.A not in batch
assert ArrayKeys.B not in batch
assert batch[ArrayKeys.C].spec.roi == c_roi
# ask for C and B of same size as needed by node
b_roi = c_roi.grow((20, 20, 20), (20, 20, 20))
request = BatchRequest()
request[ArrayKeys.C] = ArraySpec(roi=c_roi)
request[ArrayKeys.B] = ArraySpec(roi=b_roi)
with build(pipeline):
batch = pipeline.request_batch(request)
c = batch[ArrayKeys.C]
b = batch[ArrayKeys.B]
assert b.spec.roi == b_roi
assert c.spec.roi == c_roi
assert np.equal(b.crop(c.spec.roi).data, c.data).all()
# ask for C and B of larger size
b_roi = c_roi.grow((40, 40, 40), (40, 40, 40))
request = BatchRequest()
request[ArrayKeys.B] = ArraySpec(roi=b_roi)
request[ArrayKeys.C] = ArraySpec(roi=c_roi)
with build(pipeline):
batch = pipeline.request_batch(request)
b = batch[ArrayKeys.B]
c = batch[ArrayKeys.C]
assert ArrayKeys.A not in batch
assert b.spec.roi == b_roi
assert c.spec.roi == c_roi
assert np.equal(b.crop(c.spec.roi).data, c.data).all()
# ask for C and B of smaller size
b_roi = c_roi.grow((-40, -40, -40), (-40, -40, -40))
request = BatchRequest()
request[ArrayKeys.B] = ArraySpec(roi=b_roi)
request[ArrayKeys.C] = ArraySpec(roi=c_roi)
with build(pipeline):
batch = pipeline.request_batch(request)
b = batch[ArrayKeys.B]
c = batch[ArrayKeys.C]
assert ArrayKeys.A not in batch
assert b.spec.roi == b_roi
assert c.spec.roi == c_roi
assert np.equal(c.crop(b.spec.roi).data, b.data).all()
| 4,593 | 27.184049 | 84 | py |
gunpowder | gunpowder-master/tests/cases/scan.py | from gunpowder import (
BatchProvider,
BatchRequest,
Batch,
ArrayKeys,
ArrayKey,
ArraySpec,
Array,
GraphKey,
GraphKeys,
GraphSpec,
Graph,
Node,
Roi,
Coordinate,
Scan,
build,
)
import numpy as np
import itertools
def coordinate_to_id(i, j, k):
i, j, k = (i - 20000) // 100, (j - 2000) // 10, (k - 2000) // 10
return i + j * 20 + k * 400
class ScanTestSource(BatchProvider):
def __init__(self, raw_key, gt_labels_key, gt_graph_key):
self.raw_key = raw_key
self.gt_labels_key = gt_labels_key
self.gt_graph_key = gt_graph_key
def setup(self):
self.provides(
self.raw_key,
ArraySpec(
roi=Roi((20000, 2000, 2000), (2000, 200, 200)), voxel_size=(20, 2, 2)
),
)
self.provides(
self.gt_labels_key,
ArraySpec(
roi=Roi((20100, 2010, 2010), (1800, 180, 180)), voxel_size=(20, 2, 2)
),
)
self.provides(
self.gt_graph_key,
GraphSpec(
roi=Roi((None, None, None), (None, None, None)),
),
)
def provide(self, request):
# print("ScanTestSource: Got request " + str(request))
batch = Batch()
# have the pixels encode their position
for array_key, spec in request.array_specs.items():
roi = spec.roi
roi_voxel = roi // self.spec[array_key].voxel_size
# print("ScanTestSource: Adding " + str(array_key))
# the z,y,x coordinates of the ROI
meshgrids = np.meshgrid(
range(roi_voxel.begin[0], roi_voxel.end[0]),
range(roi_voxel.begin[1], roi_voxel.end[1]),
range(roi_voxel.begin[2], roi_voxel.end[2]),
indexing="ij",
)
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
# print("Roi is: " + str(roi))
spec = self.spec[array_key].copy()
spec.roi = roi
batch.arrays[array_key] = Array(data, spec)
for graph_key, spec in request.graph_specs.items():
# node at x, y, z if x%100==0, y%10==0, z%10==0
nodes = []
start = spec.roi.begin - (spec.roi.begin % Coordinate(100, 10, 10))
for i, j, k in itertools.product(
*[range(a, b, s) for a, b, s in zip(start, spec.roi.end, [100, 10, 10])]
):
location = np.array([i, j, k])
if spec.roi.contains(location):
nodes.append(Node(id=coordinate_to_id(i, j, k), location=location))
batch.graphs[graph_key] = Graph(nodes, [], spec)
return batch
def test_output():
raw_key = ArrayKey("RAW")
gt_labels_key = ArrayKey("GT_LABELS")
gt_graph_key = GraphKey("GT_GRAPH")
chunk_request = BatchRequest()
chunk_request.add(raw_key, (400, 30, 34))
chunk_request.add(gt_labels_key, (200, 10, 14))
chunk_request.add(gt_graph_key, (400, 30, 34))
pipeline = ScanTestSource(raw_key, gt_labels_key, gt_graph_key) + Scan(
chunk_request, num_workers=10
)
with build(pipeline):
raw_spec = pipeline.spec[ArrayKeys.RAW]
labels_spec = pipeline.spec[ArrayKeys.GT_LABELS]
graph_spec = pipeline.spec[GraphKeys.GT_GRAPH]
full_request = BatchRequest(
{
ArrayKeys.RAW: raw_spec,
ArrayKeys.GT_LABELS: labels_spec,
GraphKeys.GT_GRAPH: graph_spec,
}
)
batch = pipeline.request_batch(full_request)
voxel_size = pipeline.spec[ArrayKeys.RAW].voxel_size
# assert that pixels encode their position
for array_key, array in batch.arrays.items():
# the z,y,x coordinates of the ROI
roi = array.spec.roi
meshgrids = np.meshgrid(
range(roi.begin[0] // voxel_size[0], roi.end[0] // voxel_size[0]),
range(roi.begin[1] // voxel_size[1], roi.end[1] // voxel_size[1]),
range(roi.begin[2] // voxel_size[2], roi.end[2] // voxel_size[2]),
indexing="ij",
)
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
assert (array.data == data).all()
for graph_key, graph in batch.graphs.items():
roi = graph.spec.roi
for i, j, k in itertools.product(
range(20000, 22000, 100), range(2000, 2200, 10), range(2000, 2200, 10)
):
assert all(
np.isclose(
graph.node(coordinate_to_id(i, j, k)).location, np.array([i, j, k])
)
)
assert batch.arrays[ArrayKeys.RAW].spec.roi.offset == (20000, 2000, 2000)
# test scanning with empty request
pipeline = ScanTestSource(raw_key, gt_labels_key, gt_graph_key) + Scan(
chunk_request, num_workers=1
)
with build(pipeline):
batch = pipeline.request_batch(BatchRequest())
| 4,999 | 30.446541 | 88 | py |
gunpowder | gunpowder-master/tests/cases/update_with.py | import numpy as np
from gunpowder import (
BatchProvider,
BatchFilter,
Array,
ArraySpec,
ArrayKey,
Graph,
GraphSpec,
GraphKey,
Batch,
BatchRequest,
Roi,
PipelineRequestError,
build,
)
import pytest
class ArrayTestSource(BatchProvider):
def __init__(self, key, spec):
default_spec = ArraySpec(
voxel_size=(1,) * spec.roi.dims,
interpolatable=False,
nonspatial=False,
dtype=np.uint8,
)
default_spec.update_with(spec)
spec = default_spec
self.key = key
self.array = Array(
np.zeros(
spec.roi.shape / spec.voxel_size,
dtype=spec.dtype,
),
spec=spec,
)
def setup(self):
self.provides(self.key, self.array.spec)
def provide(self, request):
batch = Batch()
roi = request[self.key].roi
batch[self.key] = self.array.crop(roi)
return batch
class RequiresSpec(BatchFilter):
def __init__(self, key, required_spec):
self.key = key
self.required_spec = required_spec
def setup(self):
self.updates(self.key, self.spec[self.key].copy())
def prepare(self, request):
deps = BatchRequest()
deps[self.key] = self.required_spec
return deps
def process(self, batch, request):
return batch
@pytest.mark.parametrize("request_dtype", [np.uint8, np.int64, np.float32])
def test_require_dtype(request_dtype):
dtypes = [
np.uint8,
np.uint16,
np.uint32,
np.int32,
np.int64,
np.float32,
np.float64,
]
array = ArrayKey("ARRAY")
roi = Roi((0, 0), (10, 10))
for dtype in dtypes:
source = ArrayTestSource(array, ArraySpec(roi=roi, dtype=dtype))
pipeline = source + RequiresSpec(array, ArraySpec(roi=roi, dtype=request_dtype))
with build(pipeline):
batch_request = BatchRequest()
batch_request[array] = ArraySpec(roi)
if dtype == request_dtype:
pipeline.request_batch(batch_request)
else:
with pytest.raises(PipelineRequestError):
pipeline.request_batch(batch_request)
@pytest.mark.parametrize("request_voxel_size", [(1, 1), (2, 2)])
def test_require_voxel_size(request_voxel_size):
voxel_sizes = [
(1, 1),
(4, 4),
(6, 6),
]
array = ArrayKey("ARRAY")
roi = Roi((0, 0), (12, 12))
for voxel_size in voxel_sizes:
source = ArrayTestSource(array, ArraySpec(roi=roi, voxel_size=voxel_size))
pipeline = source + RequiresSpec(
array, ArraySpec(roi=roi, voxel_size=request_voxel_size)
)
with build(pipeline):
batch_request = BatchRequest()
batch_request[array] = ArraySpec(roi)
if voxel_size == request_voxel_size:
pipeline.request_batch(batch_request)
else:
with pytest.raises(PipelineRequestError):
pipeline.request_batch(batch_request)
class GraphTestSource(BatchProvider):
def __init__(self, key, spec):
default_spec = GraphSpec(directed=True)
default_spec.update_with(spec)
spec = default_spec
self.key = key
self.graph = Graph(
[],
[],
spec=spec,
)
def setup(self):
self.provides(self.key, self.graph.spec)
def provide(self, request):
batch = Batch()
roi = request[self.key].roi
batch[self.key] = self.graph.crop(roi)
return batch
@pytest.mark.parametrize("requested_directed", [True, False])
def test_require_directed(requested_directed):
directed_options = [True, False]
graph = GraphKey("GRAPH")
roi = Roi((0, 0), (10, 10))
for provided_directed in directed_options:
source = GraphTestSource(graph, GraphSpec(roi=roi, directed=provided_directed))
pipeline = source + RequiresSpec(
graph, GraphSpec(roi=roi, directed=requested_directed)
)
with build(pipeline):
batch_request = BatchRequest()
batch_request[graph] = GraphSpec(roi)
if provided_directed == requested_directed:
pipeline.request_batch(batch_request)
else:
with pytest.raises(PipelineRequestError):
pipeline.request_batch(batch_request)
| 4,517 | 27.2375 | 88 | py |
gunpowder | gunpowder-master/tests/cases/rasterize_points.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
BatchRequest,
Batch,
Roi,
Coordinate,
GraphSpec,
Array,
ArrayKey,
ArrayKeys,
ArraySpec,
RasterizeGraph,
RasterizationSettings,
build,
)
from gunpowder.graph import GraphKeys, GraphKey, Graph, Node, Edge
import numpy as np
import math
from random import randint
class GraphTestSource3D(BatchProvider):
def __init__(self):
self.voxel_size = Coordinate((40, 4, 4))
self.nodes = [
# corners
Node(id=1, location=np.array((-200, -200, -200))),
Node(id=2, location=np.array((-200, -200, 199))),
Node(id=3, location=np.array((-200, 199, -200))),
Node(id=4, location=np.array((-200, 199, 199))),
Node(id=5, location=np.array((199, -200, -200))),
Node(id=6, location=np.array((199, -200, 199))),
Node(id=7, location=np.array((199, 199, -200))),
Node(id=8, location=np.array((199, 199, 199))),
# center
Node(id=9, location=np.array((0, 0, 0))),
Node(id=10, location=np.array((-1, -1, -1))),
]
self.graph_spec = GraphSpec(roi=Roi((-100, -100, -100), (300, 300, 300)))
self.array_spec = ArraySpec(
roi=Roi((-200, -200, -200), (400, 400, 400)), voxel_size=self.voxel_size
)
self.graph = Graph(self.nodes, [], self.graph_spec)
def setup(self):
self.provides(
GraphKeys.TEST_GRAPH,
self.graph_spec,
)
self.provides(
ArrayKeys.GT_LABELS,
self.array_spec,
)
def provide(self, request):
batch = Batch()
graph_roi = request[GraphKeys.TEST_GRAPH].roi
batch.graphs[GraphKeys.TEST_GRAPH] = self.graph.crop(graph_roi).trim(graph_roi)
roi_array = request[ArrayKeys.GT_LABELS].roi
image = np.ones(roi_array.shape / self.voxel_size, dtype=np.uint64)
# label half of GT_LABELS differently
depth = image.shape[0]
image[0 : depth // 2] = 2
spec = self.spec[ArrayKeys.GT_LABELS].copy()
spec.roi = roi_array
batch.arrays[ArrayKeys.GT_LABELS] = Array(image, spec=spec)
return batch
class GraphTestSourceWithEdge(BatchProvider):
def __init__(self):
self.voxel_size = Coordinate((1, 1, 1))
self.nodes = [
# corners
Node(id=1, location=np.array((0, 4, 4))),
Node(id=2, location=np.array((9, 4, 4))),
]
self.edges = [Edge(1, 2)]
self.graph_spec = GraphSpec(roi=Roi((0, 0, 0), (10, 10, 10)))
self.graph = Graph(self.nodes, self.edges, self.graph_spec)
def setup(self):
self.provides(
GraphKeys.TEST_GRAPH_WITH_EDGE,
self.graph_spec,
)
def provide(self, request):
batch = Batch()
graph_roi = request[GraphKeys.TEST_GRAPH_WITH_EDGE].roi
batch.graphs[GraphKeys.TEST_GRAPH_WITH_EDGE] = self.graph.crop(graph_roi).trim(
graph_roi
)
return batch
class TestRasterizePoints(ProviderTest):
def test_3d(self):
GraphKey("TEST_GRAPH")
ArrayKey("RASTERIZED")
pipeline = GraphTestSource3D() + RasterizeGraph(
GraphKeys.TEST_GRAPH,
ArrayKeys.RASTERIZED,
ArraySpec(voxel_size=(40, 4, 4)),
)
with build(pipeline):
request = BatchRequest()
roi = Roi((0, 0, 0), (200, 200, 200))
request[GraphKeys.TEST_GRAPH] = GraphSpec(roi=roi)
request[ArrayKeys.GT_LABELS] = ArraySpec(roi=roi)
request[ArrayKeys.RASTERIZED] = ArraySpec(roi=roi)
batch = pipeline.request_batch(request)
rasterized = batch.arrays[ArrayKeys.RASTERIZED].data
self.assertEqual(rasterized[0, 0, 0], 1)
self.assertEqual(rasterized[2, 20, 20], 0)
self.assertEqual(rasterized[4, 49, 49], 1)
# same with different foreground/background labels
pipeline = GraphTestSource3D() + RasterizeGraph(
GraphKeys.TEST_GRAPH,
ArrayKeys.RASTERIZED,
ArraySpec(voxel_size=(40, 4, 4)),
RasterizationSettings(radius=1, fg_value=0, bg_value=1),
)
with build(pipeline):
request = BatchRequest()
roi = Roi((0, 0, 0), (200, 200, 200))
request[GraphKeys.TEST_GRAPH] = GraphSpec(roi=roi)
request[ArrayKeys.GT_LABELS] = ArraySpec(roi=roi)
request[ArrayKeys.RASTERIZED] = ArraySpec(roi=roi)
batch = pipeline.request_batch(request)
rasterized = batch.arrays[ArrayKeys.RASTERIZED].data
self.assertEqual(rasterized[0, 0, 0], 0)
self.assertEqual(rasterized[2, 20, 20], 1)
self.assertEqual(rasterized[4, 49, 49], 0)
# same with different radius and inner radius
pipeline = GraphTestSource3D() + RasterizeGraph(
GraphKeys.TEST_GRAPH,
ArrayKeys.RASTERIZED,
ArraySpec(voxel_size=(40, 4, 4)),
RasterizationSettings(
radius=40, inner_radius_fraction=0.25, fg_value=1, bg_value=0
),
)
with build(pipeline):
request = BatchRequest()
roi = Roi((0, 0, 0), (200, 200, 200))
request[GraphKeys.TEST_GRAPH] = GraphSpec(roi=roi)
request[ArrayKeys.GT_LABELS] = ArraySpec(roi=roi)
request[ArrayKeys.RASTERIZED] = ArraySpec(roi=roi)
batch = pipeline.request_batch(request)
rasterized = batch.arrays[ArrayKeys.RASTERIZED].data
# in the middle of the ball, there should be 0 (since inner radius is set)
self.assertEqual(rasterized[0, 0, 0], 0)
# check larger radius: rasterized point (0, 0, 0) should extend in
# x,y by 10; z, by 1
self.assertEqual(rasterized[0, 10, 0], 1)
self.assertEqual(rasterized[0, 0, 10], 1)
self.assertEqual(rasterized[1, 0, 0], 1)
self.assertEqual(rasterized[2, 20, 20], 0)
self.assertEqual(rasterized[4, 49, 49], 0)
# same with anisotropic radius
pipeline = GraphTestSource3D() + RasterizeGraph(
GraphKeys.TEST_GRAPH,
ArrayKeys.RASTERIZED,
ArraySpec(voxel_size=(40, 4, 4)),
RasterizationSettings(radius=(40, 40, 20), fg_value=1, bg_value=0),
)
with build(pipeline):
request = BatchRequest()
roi = Roi((0, 0, 0), (120, 80, 80))
request[GraphKeys.TEST_GRAPH] = GraphSpec(roi=roi)
request[ArrayKeys.GT_LABELS] = ArraySpec(roi=roi)
request[ArrayKeys.RASTERIZED] = ArraySpec(roi=roi)
batch = pipeline.request_batch(request)
rasterized = batch.arrays[ArrayKeys.RASTERIZED].data
# check larger radius: rasterized point (0, 0, 0) should extend in
# x,y by 10; z, by 1
self.assertEqual(rasterized[0, 10, 0], 1)
self.assertEqual(rasterized[0, 11, 0], 0)
self.assertEqual(rasterized[0, 0, 5], 1)
self.assertEqual(rasterized[0, 0, 6], 0)
self.assertEqual(rasterized[1, 0, 0], 1)
self.assertEqual(rasterized[2, 0, 0], 0)
# same with anisotropic radius and inner radius
pipeline = GraphTestSource3D() + RasterizeGraph(
GraphKeys.TEST_GRAPH,
ArrayKeys.RASTERIZED,
ArraySpec(voxel_size=(40, 4, 4)),
RasterizationSettings(
radius=(40, 40, 20), inner_radius_fraction=0.75, fg_value=1, bg_value=0
),
)
with build(pipeline):
request = BatchRequest()
roi = Roi((0, 0, 0), (120, 80, 80))
request[GraphKeys.TEST_GRAPH] = GraphSpec(roi=roi)
request[ArrayKeys.GT_LABELS] = ArraySpec(roi=roi)
request[ArrayKeys.RASTERIZED] = ArraySpec(roi=roi)
batch = pipeline.request_batch(request)
rasterized = batch.arrays[ArrayKeys.RASTERIZED].data
# in the middle of the ball, there should be 0 (since inner radius is set)
self.assertEqual(rasterized[0, 0, 0], 0)
# check larger radius: rasterized point (0, 0, 0) should extend in
# x,y by 10; z, by 1
self.assertEqual(rasterized[0, 10, 0], 1)
self.assertEqual(rasterized[0, 11, 0], 0)
self.assertEqual(rasterized[0, 0, 5], 1)
self.assertEqual(rasterized[0, 0, 6], 0)
self.assertEqual(rasterized[1, 0, 0], 1)
self.assertEqual(rasterized[2, 0, 0], 0)
def test_with_edge(self):
graph_with_edge = GraphKey("TEST_GRAPH_WITH_EDGE")
array_with_edge = ArrayKey("RASTERIZED_EDGE")
pipeline = GraphTestSourceWithEdge() + RasterizeGraph(
GraphKeys.TEST_GRAPH_WITH_EDGE,
ArrayKeys.RASTERIZED_EDGE,
ArraySpec(voxel_size=(1, 1, 1)),
settings=RasterizationSettings(0.5),
)
with build(pipeline):
request = BatchRequest()
roi = Roi((0, 0, 0), (10, 10, 10))
request[GraphKeys.TEST_GRAPH_WITH_EDGE] = GraphSpec(roi=roi)
request[ArrayKeys.RASTERIZED_EDGE] = ArraySpec(roi=roi)
batch = pipeline.request_batch(request)
rasterized = batch.arrays[ArrayKeys.RASTERIZED_EDGE].data
assert (
rasterized.sum() == 10
), f"rasterized has ones at: {np.where(rasterized==1)}"
| 9,698 | 32.794425 | 87 | py |
gunpowder | gunpowder-master/tests/cases/dvid_source.py | from .provider_test import ProviderTest
from unittest import skipIf
from gunpowder import *
from gunpowder.ext import dvision, NoSuchModule
import numpy as np
import socket
import logging
logger = logging.getLogger(__name__)
DVID_SERVER = "slowpoke1"
def is_dvid_unavailable(server):
if isinstance(dvision, NoSuchModule):
return True
try:
socket.gethostbyname(server)
return False
except Exception: # todo: make more specific
return True
class TestDvidSource(ProviderTest):
@skipIf(is_dvid_unavailable(DVID_SERVER), "DVID server not available")
def test_output_3d(self):
# create array keys
raw = ArrayKey("RAW")
seg = ArrayKey("SEG")
mask = ArrayKey("MASK")
pipeline = DvidSource(
DVID_SERVER,
32768,
"2ad1d8f0f172425c9f87b60fd97331e6",
datasets={raw: "grayscale", seg: "groundtruth"},
masks={mask: "seven_column"},
) + Snapshot(
{
raw: "/volumes/raw",
seg: "/volumes/labels/neuron_ids",
mask: "/volumes/labels/mask",
},
output_dir=self.path_to(),
output_filename="dvid_source_test{id}-{iteration}.hdf",
)
with build(pipeline):
batch = pipeline.request_batch(
BatchRequest(
{
raw: ArraySpec(roi=Roi((33000, 15000, 20000), (32000, 8, 80))),
seg: ArraySpec(roi=Roi((33000, 15000, 20000), (32000, 8, 80))),
mask: ArraySpec(roi=Roi((33000, 15000, 20000), (32000, 8, 80))),
}
)
)
self.assertTrue(batch.arrays[raw].spec.interpolatable)
self.assertFalse(batch.arrays[seg].spec.interpolatable)
self.assertFalse(batch.arrays[mask].spec.interpolatable)
self.assertEqual(batch.arrays[raw].spec.voxel_size, (8, 8, 8))
self.assertEqual(batch.arrays[seg].spec.voxel_size, (8, 8, 8))
self.assertEqual(batch.arrays[mask].spec.voxel_size, (8, 8, 8))
| 2,158 | 31.223881 | 88 | py |
gunpowder | gunpowder-master/tests/cases/merge_provider.py | import unittest
from gunpowder import (
GraphSpec,
GraphKey,
Roi,
Coordinate,
Batch,
BatchProvider,
ArrayKeys,
ArrayKey,
ArraySpec,
Array,
BatchRequest,
MergeProvider,
RandomLocation,
build,
)
from gunpowder.graph import GraphKeys, Graph
from gunpowder.pipeline import PipelineSetupError
import numpy as np
class GraphTestSource(BatchProvider):
def __init__(self, voxel_size):
self.voxel_size = voxel_size
def setup(self):
self.provides(GraphKeys.PRESYN, GraphSpec(roi=Roi((0, 0, 0), (100, 100, 100))))
def provide(self, request):
batch = Batch()
graph_roi = request[GraphKeys.PRESYN].roi
batch.graphs[GraphKeys.PRESYN] = Graph([], [], GraphSpec(roi=graph_roi))
return batch
class ArrayTestSoure(BatchProvider):
def __init__(self, voxel_size):
self.voxel_size = voxel_size
def setup(self):
self.provides(
ArrayKeys.GT_LABELS,
ArraySpec(roi=Roi((0, 0, 0), (100, 100, 100)), voxel_size=self.voxel_size),
)
def provide(self, request):
roi_array = request[ArrayKeys.GT_LABELS].roi
data = np.zeros(roi_array.shape / self.spec[ArrayKeys.GT_LABELS].voxel_size)
batch = Batch()
spec = self.spec[ArrayKeys.GT_LABELS].copy()
spec.roi = roi_array
batch.arrays[ArrayKeys.GT_LABELS] = Array(data, spec)
return batch
class TestMergeProvider(unittest.TestCase):
def test_merge_basics(self):
voxel_size = (1, 1, 1)
GraphKey("PRESYN")
ArrayKey("GT_LABELS")
graphsource = GraphTestSource(voxel_size)
arraysource = ArrayTestSoure(voxel_size)
pipeline = (graphsource, arraysource) + MergeProvider() + RandomLocation()
window_request = Coordinate((50, 50, 50))
with build(pipeline):
# Check basic merging.
request = BatchRequest()
request.add((GraphKeys.PRESYN), window_request)
request.add((ArrayKeys.GT_LABELS), window_request)
batch_res = pipeline.request_batch(request)
self.assertTrue(ArrayKeys.GT_LABELS in batch_res.arrays)
self.assertTrue(GraphKeys.PRESYN in batch_res.graphs)
# Check that request of only one source also works.
request = BatchRequest()
request.add((GraphKeys.PRESYN), window_request)
batch_res = pipeline.request_batch(request)
self.assertFalse(ArrayKeys.GT_LABELS in batch_res.arrays)
self.assertTrue(GraphKeys.PRESYN in batch_res.graphs)
# Check that it fails, when having two sources that provide the same type.
arraysource2 = ArrayTestSoure(voxel_size)
pipeline_fail = (arraysource, arraysource2) + MergeProvider() + RandomLocation()
with self.assertRaises(PipelineSetupError):
with build(pipeline_fail):
pass
| 2,949 | 31.777778 | 88 | py |
gunpowder | gunpowder-master/tests/cases/add_affinities.py | from .provider_test import ProviderTest
from gunpowder import *
from itertools import product
from unittest import skipIf
import itertools
import numpy as np
import logging
class ExampleSource(BatchProvider):
def setup(self):
self.provides(
ArrayKeys.GT_LABELS,
ArraySpec(
roi=Roi((-40, -40, -40), (160, 160, 160)),
voxel_size=(20, 4, 8),
interpolatable=False,
),
)
self.provides(
ArrayKeys.GT_MASK,
ArraySpec(
roi=Roi((-40, -40, -40), (160, 160, 160)),
voxel_size=(20, 4, 8),
interpolatable=False,
),
)
def provide(self, request):
batch = Batch()
roi = request[ArrayKeys.GT_LABELS].roi
shape = (roi / self.spec[ArrayKeys.GT_LABELS].voxel_size).get_shape()
spec = self.spec[ArrayKeys.GT_LABELS].copy()
spec.roi = roi
batch.arrays[ArrayKeys.GT_LABELS] = Array(np.random.randint(0, 2, shape), spec)
roi = request[ArrayKeys.GT_MASK].roi
shape = (roi / self.spec[ArrayKeys.GT_MASK].voxel_size).get_shape()
spec = self.spec[ArrayKeys.GT_MASK].copy()
spec.roi = roi
batch.arrays[ArrayKeys.GT_MASK] = Array(np.random.randint(0, 2, shape), spec)
return batch
def test_output():
labels_key = ArrayKey("GT_LABELS")
mask_key = ArrayKey("GT_MASK")
affs_key = ArrayKey("GT_AFFINITIES")
affs_mask_key = ArrayKey("GT_AFFINITIES_MASK")
neighborhood = [
Coordinate((-2, 0, 0)),
Coordinate((0, -1, 0)),
Coordinate((0, 0, 1)),
Coordinate((1, 1, 1)),
]
pipeline = ExampleSource() + AddAffinities(
neighborhood,
labels=labels_key,
labels_mask=mask_key,
affinities=affs_key,
affinities_mask=affs_mask_key,
)
with build(pipeline):
for i in range(10):
request = BatchRequest()
request.add(labels_key, (100, 16, 64))
request.add(mask_key, (100, 16, 64))
request.add(affs_key, (100, 16, 64))
request.add(affs_mask_key, (100, 16, 64))
batch = pipeline.request_batch(request)
assert labels_key in batch.arrays
assert mask_key in batch.arrays
assert affs_key in batch.arrays
assert affs_mask_key in batch.arrays
labels = batch.arrays[labels_key]
labels_mask = batch.arrays[mask_key]
affs = batch.arrays[affs_key]
affs_mask = batch.arrays[affs_mask_key]
assert (len(neighborhood),) + labels.data.shape == affs.data.shape
voxel_roi = Roi((0, 0, 0), labels.data.shape)
for z, y, x in product(*[range(d) for d in labels.data.shape]):
p = Coordinate((z, y, x))
for n in range(len(neighborhood)):
pn = p + neighborhood[n]
if not voxel_roi.contains(pn):
continue
a = labels.data[p]
b = labels.data[pn]
masked = labels_mask.data[p] == 0 or labels_mask.data[pn] == 0
if a == b and a != 0 and b != 0:
assert (
affs.data[(n,) + p] == 1.0
), "%s -> %s, %s -> %s, but is not 1" % (p, pn, a, b)
else:
assert (
affs.data[(n,) + p] == 0.0
), "%s -> %s, %s -> %s, but is not 0" % (p, pn, a, b)
if masked:
assert (
affs_mask.data[(n,) + p] == 0.0
), "%s or %s are masked, but mask is not 0" % (p, pn)
request = BatchRequest()
request.add(affs_key, (100, 16, 64))
request.add(affs_mask_key, (100, 16, 64))
batch = pipeline.request_batch(request)
assert labels_key not in batch.arrays
assert mask_key not in batch.arrays
assert affs_key in batch.arrays
assert affs_mask_key in batch.arrays
| 4,197 | 31.796875 | 87 | py |
gunpowder | gunpowder-master/tests/cases/random_location_points.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
BatchRequest,
Batch,
Node,
Graph,
GraphSpec,
GraphKey,
GraphKeys,
RandomLocation,
build,
Roi,
Coordinate,
)
import numpy as np
import pytest
import unittest
class ExampleSourceRandomLocation(BatchProvider):
def __init__(self):
self.graph = Graph(
[
Node(1, np.array([1, 1, 1])),
Node(2, np.array([500, 500, 500])),
Node(3, np.array([550, 550, 550])),
],
[],
GraphSpec(roi=Roi((-500, -500, -500), (1500, 1500, 1500))),
)
def setup(self):
self.provides(GraphKeys.TEST_POINTS, self.graph.spec)
def provide(self, request):
batch = Batch()
roi = request[GraphKeys.TEST_POINTS].roi
batch[GraphKeys.TEST_POINTS] = self.graph.crop(roi).trim(roi)
return batch
class TestRandomLocationPoints(ProviderTest):
@pytest.mark.xfail
def test_output(self):
"""
Fails due to probabilities being calculated in advance, rather than after creating
each roi. The new approach does not account for all possible roi's containing
each point, some of which may not contain its nearest neighbors.
"""
GraphKey("TEST_POINTS")
pipeline = ExampleSourceRandomLocation() + RandomLocation(
ensure_nonempty=GraphKeys.TEST_POINTS, point_balance_radius=100
)
# count the number of times we get each point
histogram = {}
with build(pipeline):
for i in range(5000):
batch = pipeline.request_batch(
BatchRequest(
{
GraphKeys.TEST_POINTS: GraphSpec(
roi=Roi((0, 0, 0), (100, 100, 100))
)
}
)
)
points = {node.id: node for node in batch[GraphKeys.TEST_POINTS].nodes}
self.assertTrue(len(points) > 0)
self.assertTrue((1 in points) != (2 in points or 3 in points), points)
for node in batch[GraphKeys.TEST_POINTS].nodes:
if node.id not in histogram:
histogram[node.id] = 1
else:
histogram[node.id] += 1
total = sum(histogram.values())
for k, v in histogram.items():
histogram[k] = float(v) / total
# we should get roughly the same count for each point
for i in histogram.keys():
for j in histogram.keys():
self.assertAlmostEqual(histogram[i], histogram[j], 1)
def test_equal_probability(self):
GraphKey("TEST_POINTS")
pipeline = ExampleSourceRandomLocation() + RandomLocation(
ensure_nonempty=GraphKeys.TEST_POINTS
)
# count the number of times we get each point
histogram = {}
with build(pipeline):
for i in range(5000):
batch = pipeline.request_batch(
BatchRequest(
{
GraphKeys.TEST_POINTS: GraphSpec(
roi=Roi((0, 0, 0), (10, 10, 10))
)
}
)
)
points = {node.id: node for node in batch[GraphKeys.TEST_POINTS].nodes}
self.assertTrue(len(points) > 0)
self.assertTrue((1 in points) != (2 in points or 3 in points), points)
for point in batch[GraphKeys.TEST_POINTS].nodes:
if point.id not in histogram:
histogram[point.id] = 1
else:
histogram[point.id] += 1
total = sum(histogram.values())
for k, v in histogram.items():
histogram[k] = float(v) / total
# we should get roughly the same count for each point
for i in histogram.keys():
for j in histogram.keys():
self.assertAlmostEqual(histogram[i], histogram[j], 1)
@unittest.expectedFailure
def test_ensure_centered(self):
"""
Expected failure due to emergent behavior of two desired rules:
1) Points on the upper bound of Roi are not considered contained
2) When considering a point as a center of a random location,
scale by the number of points within some delta distance
if two points are equally likely to be chosen, and centering
a roi on either of them means the other is on the bounding box
of the roi, then it can be the case that if the roi is centered
one of them, the roi contains only that one, but if the roi is
centered on the second, then both are considered contained,
breaking the equal likelihood of picking each point.
"""
GraphKey("TEST_POINTS")
pipeline = ExampleSourceRandomLocation() + RandomLocation(
ensure_nonempty=GraphKeys.TEST_POINTS, ensure_centered=True
)
# count the number of times we get each point
histogram = {}
with build(pipeline):
for i in range(5000):
batch = pipeline.request_batch(
BatchRequest(
{
GraphKeys.TEST_POINTS: GraphSpec(
roi=Roi((0, 0, 0), (100, 100, 100))
)
}
)
)
points = batch[GraphKeys.TEST_POINTS].data
roi = batch[GraphKeys.TEST_POINTS].spec.roi
locations = tuple(
[Coordinate(point.location) for point in points.values()]
)
self.assertTrue(
Coordinate([50, 50, 50]) in locations,
f"locations: {tuple([point.location for point in points.values()])}",
)
self.assertTrue(len(points) > 0)
self.assertTrue((1 in points) != (2 in points or 3 in points), points)
for point_id in batch[GraphKeys.TEST_POINTS].data.keys():
if point_id not in histogram:
histogram[point_id] = 1
else:
histogram[node.id] += 1
total = sum(histogram.values())
for k, v in histogram.items():
histogram[k] = float(v) / total
# we should get roughly the same count for each point
for i in histogram.keys():
for j in histogram.keys():
self.assertAlmostEqual(histogram[i], histogram[j], 1, histogram)
| 6,872 | 32.857143 | 90 | py |
gunpowder | gunpowder-master/tests/cases/add_boundary_distance_gradients.py | from .provider_test import ProviderTest
from gunpowder import *
from gunpowder.contrib import AddBoundaryDistanceGradients
import numpy as np
class ExampleSource(BatchProvider):
def setup(self):
self.provides(
ArrayKeys.GT_LABELS,
ArraySpec(
roi=Roi((-40, -40, -40), (160, 160, 160)),
voxel_size=(20, 4, 8),
interpolatable=False,
),
)
def provide(self, request):
batch = Batch()
roi = request[ArrayKeys.GT_LABELS].roi
shape = (roi / self.spec[ArrayKeys.GT_LABELS].voxel_size).shape
spec = self.spec[ArrayKeys.GT_LABELS].copy()
spec.roi = roi
data = np.ones(shape)
data[shape[0] // 2 :, :, :] += 2
data[:, shape[1] // 2 :, :] += 4
data[:, :, shape[2] // 2 :] += 8
batch.arrays[ArrayKeys.GT_LABELS] = Array(data, spec)
return batch
class TestAddBoundaryDistanceGradients(ProviderTest):
def test_output(self):
ArrayKey("GT_BOUNDARY_DISTANCES")
ArrayKey("GT_BOUNDARY_GRADIENTS")
pipeline = ExampleSource() + AddBoundaryDistanceGradients(
label_array_key=ArrayKeys.GT_LABELS,
distance_array_key=ArrayKeys.GT_BOUNDARY_DISTANCES,
gradient_array_key=ArrayKeys.GT_BOUNDARY_GRADIENTS,
)
with build(pipeline):
request = BatchRequest()
request.add(ArrayKeys.GT_LABELS, (120, 16, 64))
request.add(ArrayKeys.GT_BOUNDARY_DISTANCES, (120, 16, 64))
request.add(ArrayKeys.GT_BOUNDARY_GRADIENTS, (120, 16, 64))
batch = pipeline.request_batch(request)
labels = batch.arrays[ArrayKeys.GT_LABELS].data
distances = batch.arrays[ArrayKeys.GT_BOUNDARY_DISTANCES].data
gradients = batch.arrays[ArrayKeys.GT_BOUNDARY_GRADIENTS].data
shape = distances.shape
l_001 = labels[: shape[0] // 2, : shape[1] // 2, shape[2] // 2 :]
l_101 = labels[shape[0] // 2 :, : shape[1] // 2, shape[2] // 2 :]
d_001 = distances[: shape[0] // 2, : shape[1] // 2, shape[2] // 2 :]
d_101 = distances[shape[0] // 2 :, : shape[1] // 2, shape[2] // 2 :]
g_001 = gradients[:, : shape[0] // 2, : shape[1] // 2, shape[2] // 2 :]
g_101 = gradients[:, shape[0] // 2 :, : shape[1] // 2, shape[2] // 2 :]
# print labels
# print
# print distances
# print
# print l_001
# print l_101
# print
# print d_001
# print d_101
# print
# print g_001
# print g_101
self.assertTrue((g_001 == g_101).all())
top = gradients[:, 0 : shape[0] // 2, :]
bot = gradients[:, shape[0] : shape[0] // 2 - 1 : -1, :]
self.assertTrue((top == bot).all())
| 2,932 | 33.505882 | 83 | py |
gunpowder | gunpowder-master/tests/cases/batch.py | import logging
import numpy as np
from gunpowder import (
Array,
ArrayKey,
ArraySpec,
Batch,
Coordinate,
Roi,
)
logger = logging.getLogger(__name__)
def test_get_total_roi_nonspatial_array():
raw = ArrayKey("RAW")
nonspatial = ArrayKey("NONSPATIAL")
voxel_size = Coordinate((1, 2))
roi = Roi((100, 200), (20, 20))
raw_spec = ArraySpec(roi=roi, voxel_size=voxel_size)
nonspatial_spec = ArraySpec(nonspatial=True)
batch = Batch()
batch[raw] = Array(data=np.zeros((20, 10)), spec=raw_spec)
batch[nonspatial] = Array(data=np.zeros((2, 3)), spec=nonspatial_spec)
assert batch.get_total_roi() == roi
| 666 | 20.516129 | 74 | py |
gunpowder | gunpowder-master/tests/cases/deform_augment.py | from gunpowder import (
BatchProvider,
GraphSpec,
Roi,
Coordinate,
ArraySpec,
Batch,
Array,
ArrayKey,
GraphKey,
BatchRequest,
DeformAugment,
build,
)
from gunpowder.graph import Graph, Node
from scipy.ndimage import center_of_mass
import pytest
import numpy as np
class GraphTestSource3D(BatchProvider):
def __init__(self, graph_key: GraphKey, array_key: ArrayKey, array_key2: ArrayKey):
self.graph_key = graph_key
self.array_key = array_key
self.array_key2 = array_key2
def setup(self):
self.nodes = [
Node(id=1, location=np.array([0, 0.5, 0])),
Node(id=2, location=np.array([0, 10.5, 0])),
Node(id=3, location=np.array([0, 20.5, 0])),
Node(id=4, location=np.array([0, 30.5, 0])),
Node(id=5, location=np.array([0, 40.5, 0])),
Node(id=6, location=np.array([0, 50.5, 0])),
]
self.provides(
self.graph_key,
GraphSpec(roi=Roi((-100, -100, -100), (200, 200, 200))),
)
self.provides(
self.array_key,
ArraySpec(
roi=Roi((-100, -100, -100), (200, 200, 200)),
voxel_size=Coordinate((4, 1, 1)),
interpolatable=False,
),
)
self.provides(
self.array_key2,
ArraySpec(
roi=Roi((-100, -100, -100), (200, 200, 200)),
voxel_size=Coordinate((1, 2, 1)),
interpolatable=False,
),
)
def node_to_voxel(self, array_roi, voxel_size, location):
# location is in world units, get it into voxels
location = location / voxel_size
# shift location relative to beginning of array roi
location -= array_roi.begin / voxel_size
return tuple(slice(int(l - 1), int(l + 2)) for l in location)
def provide(self, request):
batch = Batch()
roi_graph = request[self.graph_key].roi
roi_voxel = request[self.array_key].roi // self.spec[self.array_key].voxel_size
data = np.zeros(
(request[self.array_key].roi // self.spec[self.array_key].voxel_size).shape,
dtype=np.uint32,
)
for node in self.nodes:
loc = self.node_to_voxel(
request[self.array_key].roi,
self.spec[self.array_key].voxel_size,
node.location,
)
data[loc] = node.id
data2 = np.zeros(
(
request[self.array_key2].roi // self.spec[self.array_key2].voxel_size
).shape,
dtype=np.uint32,
)
for node in self.nodes:
loc = self.node_to_voxel(
request[self.array_key2].roi,
self.spec[self.array_key2].voxel_size,
node.location,
)
data2[loc] = node.id
spec = self.spec[self.array_key].copy()
spec.roi = request[self.array_key].roi
batch.arrays[self.array_key] = Array(data, spec=spec)
spec2 = self.spec[self.array_key2].copy()
spec2.roi = request[self.array_key2].roi
batch.arrays[self.array_key2] = Array(data2, spec=spec2)
nodes = []
for node in self.nodes:
if roi_graph.contains(node.location):
nodes.append(node)
batch.graphs[self.graph_key] = Graph(
nodes=nodes, edges=[], spec=GraphSpec(roi=roi_graph)
)
return batch
@pytest.mark.parametrize("rotate", [True, False])
@pytest.mark.parametrize("spatial_dims", [2, 3])
@pytest.mark.parametrize("fast_points", [True, False])
def test_3d_basics(rotate, spatial_dims, fast_points):
test_labels = ArrayKey("TEST_LABELS")
test_labels2 = ArrayKey("TEST_LABELS2")
test_graph = GraphKey("TEST_GRAPH")
pipeline = GraphTestSource3D(test_graph, test_labels, test_labels2) + DeformAugment(
[10] * spatial_dims,
[1] * spatial_dims,
graph_raster_voxel_size=[1] * spatial_dims,
rotate=rotate,
spatial_dims=spatial_dims,
use_fast_points_transform=fast_points,
)
for _ in range(5):
with build(pipeline):
request_roi = Roi((-20, -20, -20), (40, 40, 40))
request = BatchRequest()
request[test_labels] = ArraySpec(roi=request_roi)
request[test_labels2] = ArraySpec(roi=request_roi / 2)
request[test_graph] = GraphSpec(roi=request_roi)
batch = pipeline.request_batch(request)
labels = batch[test_labels]
labels2 = batch[test_labels2]
graph = batch[test_graph]
assert Node(id=1, location=np.array([0, 0, 0])) in list(graph.nodes)
# graph should have moved together with the voxels
for node in graph.nodes:
loc = node.location
if labels.spec.roi.contains(loc):
loc = (loc - labels.spec.roi.begin) / labels.spec.voxel_size
loc = np.array(loc)
com = center_of_mass(labels.data == node.id)
assert (
np.linalg.norm(com - loc)
< np.linalg.norm(labels.spec.voxel_size) * 2
), (com, loc)
loc2 = node.location
if labels2.spec.roi.contains(loc2):
loc2 = (loc2 - labels2.spec.roi.begin) / labels2.spec.voxel_size
loc2 = np.array(loc2)
com2 = center_of_mass(labels2.data == node.id)
assert (
np.linalg.norm(com2 - loc2)
< np.linalg.norm(labels2.spec.voxel_size) * 2
), (com2, loc2)
| 5,826 | 31.920904 | 88 | py |
gunpowder | gunpowder-master/tests/cases/snapshot.py | from gunpowder import (
GraphKey,
GraphKeys,
GraphSpec,
Graph,
ArrayKey,
ArrayKeys,
ArraySpec,
Array,
RasterizeGraph,
Snapshot,
BatchProvider,
BatchRequest,
Batch,
Coordinate,
Roi,
build,
)
import numpy as np
import unittest
import tempfile
import shutil
from pathlib import Path
import h5py
class ExampleSource(BatchProvider):
def __init__(self, keys, specs, every=2):
self.keys = keys
self.specs = specs
self.n = 0
self.every = every
def setup(self):
for key, spec in zip(self.keys, self.specs):
self.provides(key, spec)
def provide(self, request):
outputs = Batch()
if self.n % self.every == 0:
assert GraphKeys.TEST_GRAPH in request
else:
assert GraphKeys.TEST_GRAPH not in request
for key, spec in request.items():
if isinstance(key, GraphKey):
outputs[key] = Graph([], [], spec)
if isinstance(key, ArrayKey):
spec.voxel_size = self.spec[key].voxel_size
outputs[key] = Array(np.zeros(spec.roi.shape, dtype=spec.dtype), spec)
self.n += 1
return outputs
class TestSnapshot(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_3d(self):
test_graph = GraphKey("TEST_GRAPH")
graph_spec = GraphSpec(roi=Roi((0, 0, 0), (5, 5, 5)))
test_array = ArrayKey("TEST_ARRAY")
array_spec = ArraySpec(
roi=Roi((0, 0, 0), (5, 5, 5)), voxel_size=Coordinate((1, 1, 1))
)
test_array2 = ArrayKey("TEST_ARRAY2")
array2_spec = ArraySpec(
roi=Roi((0, 0, 0), (5, 5, 5)), voxel_size=Coordinate((1, 1, 1))
)
snapshot_request = BatchRequest()
snapshot_request.add(test_graph, Coordinate((5, 5, 5)))
pipeline = ExampleSource(
[test_graph, test_array, test_array2], [graph_spec, array_spec, array2_spec]
) + Snapshot(
{
test_graph: "graphs/graph",
test_array: "volumes/array",
test_array2: "volumes/array2",
},
output_dir=str(self.test_dir),
every=2,
additional_request=snapshot_request,
output_filename="snapshot.hdf",
)
snapshot_file_path = Path(self.test_dir, "snapshot.hdf")
with build(pipeline):
request = BatchRequest()
roi = Roi((0, 0, 0), (5, 5, 5))
request[test_array] = ArraySpec(roi=roi)
request[test_array2] = ArraySpec(roi=roi)
pipeline.request_batch(request)
assert snapshot_file_path.exists()
f = h5py.File(snapshot_file_path, "r+")
assert f["volumes/array"] is not None
assert f["graphs/graph-ids"] is not None
snapshot_file_path.unlink()
pipeline.request_batch(request)
assert not snapshot_file_path.exists()
| 3,115 | 26.821429 | 88 | py |
gunpowder | gunpowder-master/tests/cases/simple_augment.py | from gunpowder import (
Batch,
BatchProvider,
BatchRequest,
Array,
ArrayKey,
ArraySpec,
Graph,
GraphKey,
GraphSpec,
Node,
Coordinate,
Roi,
SimpleAugment,
MergeProvider,
build,
)
import numpy as np
from .helper_sources import GraphSource, ArraySource
def test_mirror():
voxel_size = Coordinate((20, 20))
graph_key = GraphKey("GRAPH")
array_key = ArrayKey("ARRAY")
graph = Graph(
[Node(id=1, location=np.array([450, 550]))],
[],
GraphSpec(roi=Roi((100, 200), (800, 600))),
)
data = np.zeros([40, 30])
data[17, 17] = 1
array = Array(
data, ArraySpec(roi=Roi((100, 200), (800, 600)), voxel_size=voxel_size)
)
default_pipeline = (
(GraphSource(graph_key, graph), ArraySource(array_key, array))
+ MergeProvider()
+ SimpleAugment(mirror_only=[0, 1], transpose_only=[], mirror_probs=[0, 0])
)
mirror_pipeline = (
(GraphSource(graph_key, graph), ArraySource(array_key, array))
+ MergeProvider()
+ SimpleAugment(mirror_only=[0, 1], transpose_only=[], mirror_probs=[1, 1])
)
request = BatchRequest()
request[graph_key] = GraphSpec(roi=Roi((400, 500), (200, 300)))
request[array_key] = ArraySpec(roi=Roi((400, 500), (200, 300)))
with build(default_pipeline):
expected_location = [450, 550]
batch = default_pipeline.request_batch(request)
assert len(list(batch[graph_key].nodes)) == 1
node = list(batch[graph_key].nodes)[0]
assert all(np.isclose(node.location, expected_location))
node_voxel_index = Coordinate(
(node.location - batch[array_key].spec.roi.offset) / voxel_size
)
assert batch[array_key].data[node_voxel_index] == 1
with build(mirror_pipeline):
expected_location = [550, 750]
batch = mirror_pipeline.request_batch(request)
assert len(list(batch[graph_key].nodes)) == 1
node = list(batch[graph_key].nodes)[0]
assert all(np.isclose(node.location, expected_location))
node_voxel_index = Coordinate(
(node.location - batch[array_key].spec.roi.offset) / voxel_size
)
assert (
batch[array_key].data[node_voxel_index] == 1
), f"Node at {np.where(batch[array_key].data == 1)} not {node_voxel_index}"
def test_transpose():
voxel_size = Coordinate((20, 20))
graph_key = GraphKey("GRAPH")
array_key = ArrayKey("ARRAY")
graph = Graph(
[Node(id=1, location=np.array([450, 550]))],
[],
GraphSpec(roi=Roi((100, 200), (800, 600))),
)
data = np.zeros([40, 30])
data[17, 17] = 1
array = Array(
data, ArraySpec(roi=Roi((100, 200), (800, 600)), voxel_size=voxel_size)
)
default_pipeline = (
(GraphSource(graph_key, graph), ArraySource(array_key, array))
+ MergeProvider()
+ SimpleAugment(mirror_only=[], transpose_only=[0, 1], transpose_probs=[0, 0])
)
transpose_pipeline = (
(GraphSource(graph_key, graph), ArraySource(array_key, array))
+ MergeProvider()
+ SimpleAugment(mirror_only=[], transpose_only=[0, 1], transpose_probs=[1, 1])
)
request = BatchRequest()
request[graph_key] = GraphSpec(roi=Roi((400, 500), (200, 300)))
request[array_key] = ArraySpec(roi=Roi((400, 500), (200, 300)))
with build(default_pipeline):
expected_location = [450, 550]
batch = default_pipeline.request_batch(request)
assert len(list(batch[graph_key].nodes)) == 1
node = list(batch[graph_key].nodes)[0]
assert all(np.isclose(node.location, expected_location))
node_voxel_index = Coordinate(
(node.location - batch[array_key].spec.roi.offset) / voxel_size
)
assert (
batch[array_key].data[node_voxel_index] == 1
), f"Node at {np.where(batch[array_key].data == 1)} not {node_voxel_index}"
with build(transpose_pipeline):
expected_location = [410, 590]
batch = transpose_pipeline.request_batch(request)
assert len(list(batch[graph_key].nodes)) == 1
node = list(batch[graph_key].nodes)[0]
assert all(np.isclose(node.location, expected_location))
node_voxel_index = Coordinate(
(node.location - batch[array_key].spec.roi.offset) / voxel_size
)
assert (
batch[array_key].data[node_voxel_index] == 1
), f"Node at {np.where(batch[array_key].data == 1)} not {node_voxel_index}"
def test_mirror_and_transpose():
voxel_size = Coordinate((20, 20))
graph_key = GraphKey("GRAPH")
array_key = ArrayKey("ARRAY")
graph = Graph(
[Node(id=1, location=np.array([450, 550]))],
[],
GraphSpec(roi=Roi((100, 200), (800, 600))),
)
data = np.zeros([40, 30])
data[17, 17] = 1
array = Array(
data, ArraySpec(roi=Roi((100, 200), (800, 600)), voxel_size=voxel_size)
)
default_pipeline = (
(GraphSource(graph_key, graph), ArraySource(array_key, array))
+ MergeProvider()
+ SimpleAugment(
mirror_only=[0, 1],
transpose_only=[0, 1],
mirror_probs=[0, 0],
transpose_probs={(0, 1): 1},
)
)
augmented_pipeline = (
(GraphSource(graph_key, graph), ArraySource(array_key, array))
+ MergeProvider()
+ SimpleAugment(
mirror_only=[0, 1],
transpose_only=[0, 1],
mirror_probs=[0, 1],
transpose_probs={(1, 0): 1},
)
)
request = BatchRequest()
request[graph_key] = GraphSpec(roi=Roi((400, 500), (200, 300)))
request[array_key] = ArraySpec(roi=Roi((400, 500), (200, 300)))
with build(default_pipeline):
expected_location = [450, 550]
batch = default_pipeline.request_batch(request)
assert len(list(batch[graph_key].nodes)) == 1
node = list(batch[graph_key].nodes)[0]
assert all(np.isclose(node.location, expected_location))
node_voxel_index = Coordinate(
(node.location - batch[array_key].spec.roi.offset) / voxel_size
)
assert batch[array_key].data[node_voxel_index] == 1
with build(augmented_pipeline):
expected_location = [590, 590]
batch = augmented_pipeline.request_batch(request)
assert len(list(batch[graph_key].nodes)) == 1
node = list(batch[graph_key].nodes)[0]
assert all(np.isclose(node.location, expected_location))
node_voxel_index = Coordinate(
(np.array(expected_location) - batch[array_key].spec.roi.offset)
/ voxel_size
)
assert (
batch[array_key].data[node_voxel_index] == 1
), f"Node at {np.where(batch[array_key].data == 1)} not {node_voxel_index}"
def test_mismatched_voxel_multiples():
"""
Ensure we don't shift by half a voxel when transposing 2 axes.
If voxel_size = [2, 2], and we transpose array of shape [4, 6]:
center = total_roi.center -> [2, 3]
# Get distance from center, then transpose
dist_to_center = center - roi.offset -> [2, 3]
dist_to_center = transpose(dist_to_center) -> [3, 2]
# Using the transposed distance to center, get the offset.
new_offset = center - dist_to_center -> [-1, 1]
shape = transpose(shape) -> [6, 4]
original = ((0, 0), (4, 6))
transposed = ((-1, 1), (6, 4))
This result is what we would expect from tranposing, but no longer fits the voxel grid.
dist_to_center should be limited to multiples of the lcm_voxel_size.
instead we should get:
original = ((0, 0), (4, 6))
transposed = ((0, 0), (6, 4))
"""
test_array = ArrayKey("TEST_ARRAY")
data = np.zeros([3, 3])
data[
2, 1
] = 1 # voxel has Roi((4, 2) (2, 2)). Contained in Roi((0, 0), (6, 4)). at 2, 1
source = ArraySource(
test_array,
Array(
data,
ArraySpec(roi=Roi((0, 0), (6, 6)), voxel_size=(2, 2)),
),
)
pipeline = source + SimpleAugment(
mirror_only=[], transpose_only=[0, 1], transpose_probs={(1, 0): 1}
)
with build(pipeline):
request = BatchRequest()
request[test_array] = ArraySpec(roi=Roi((0, 0), (4, 6)))
batch = pipeline.request_batch(request)
data = batch[test_array].data
assert data[1, 2] == 1, f"{data}"
| 8,524 | 31.788462 | 91 | py |
gunpowder | gunpowder-master/tests/cases/profiling.py | from .provider_test import ProviderTest
from gunpowder import *
import time
class DelayNode(BatchFilter):
def __init__(self, time_prepare, time_process):
self.time_prepare = time_prepare
self.time_process = time_process
def prepare(self, request):
time.sleep(self.time_prepare)
deps = request
return deps
def process(self, batch, request):
time.sleep(self.time_process)
class TestProfiling(ProviderTest):
def test_profiling(self):
pipeline = (
self.test_source
+ DelayNode(0.1, 0.2)
+ PrintProfilingStats(every=2)
+ DelayNode(0.2, 0.3)
)
with build(pipeline):
for i in range(5):
batch = pipeline.request_batch(self.test_request)
profiling_stats = batch.profiling_stats
summary = profiling_stats.get_timing_summary("DelayNode", "prepare")
# is the timing for each pass correct?
self.assertGreaterEqual(summary.min(), 0.1)
self.assertLessEqual(summary.min(), 0.2 + 0.1) # bit of tolerance
summary = profiling_stats.get_timing_summary("DelayNode", "process")
self.assertGreaterEqual(summary.min(), 0.2)
self.assertLessEqual(summary.min(), 0.3 + 0.1) # bit of tolerance
# is the upstream time correct?
self.assertGreaterEqual(
profiling_stats.span_time(), 0.1 + 0.2 + 0.2 + 0.3
) # total time spend upstream
self.assertLessEqual(
profiling_stats.span_time(), 0.1 + 0.2 + 0.2 + 0.3 + 0.1
) # plus bit of tolerance
| 1,619 | 29 | 76 | py |
gunpowder | gunpowder-master/tests/cases/graph.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
BatchRequest,
BatchFilter,
Batch,
Node,
Edge,
Graph,
GraphSpec,
GraphKey,
GraphKeys,
build,
Roi,
Coordinate,
)
import numpy as np
class ExampleGraphSource(BatchProvider):
def __init__(self):
self.dtype = float
self.__vertices = [
Node(id=1, location=np.array([1, 1, 1], dtype=self.dtype)),
Node(id=2, location=np.array([500, 500, 500], dtype=self.dtype)),
Node(id=3, location=np.array([550, 550, 550], dtype=self.dtype)),
]
self.__edges = [Edge(1, 2), Edge(2, 3)]
self.__spec = GraphSpec(
roi=Roi(Coordinate([-500, -500, -500]), Coordinate([1500, 1500, 1500]))
)
self.graph = Graph(self.__vertices, self.__edges, self.__spec)
def setup(self):
self.provides(GraphKeys.TEST_GRAPH, self.__spec)
def provide(self, request):
batch = Batch()
roi = request[GraphKeys.TEST_GRAPH].roi
sub_graph = self.graph.crop(roi)
batch[GraphKeys.TEST_GRAPH] = sub_graph
return batch
class GrowFilter(BatchFilter):
def prepare(self, request):
grow = Coordinate([50, 50, 50])
for key, spec in request.items():
spec.roi = spec.roi.grow(grow, grow)
request[key] = spec
return request
def process(self, batch, request):
for key, spec in request.items():
batch[key] = batch[key].crop(spec.roi).trim(spec.roi)
return batch
class TestGraphs(ProviderTest):
@property
def edges(self):
return [Edge(0, 1), Edge(1, 2), Edge(2, 3), Edge(3, 4), Edge(4, 0)]
@property
def nodes(self):
return [
Node(0, location=np.array([0, 0, 0], dtype=self.spec.dtype)),
Node(1, location=np.array([1, 1, 1], dtype=self.spec.dtype)),
Node(2, location=np.array([2, 2, 2], dtype=self.spec.dtype)),
Node(3, location=np.array([3, 3, 3], dtype=self.spec.dtype)),
Node(4, location=np.array([4, 4, 4], dtype=self.spec.dtype)),
]
@property
def spec(self):
return GraphSpec(
roi=Roi(Coordinate([0, 0, 0]), Coordinate([5, 5, 5])), directed=True
)
def test_output(self):
GraphKey("TEST_GRAPH")
pipeline = ExampleGraphSource() + GrowFilter()
with build(pipeline):
batch = pipeline.request_batch(
BatchRequest(
{GraphKeys.TEST_GRAPH: GraphSpec(roi=Roi((0, 0, 0), (50, 50, 50)))}
)
)
graph = batch[GraphKeys.TEST_GRAPH]
expected_vertices = (
Node(id=1, location=np.array([1.0, 1.0, 1.0], dtype=float)),
Node(
id=2,
location=np.array([50.0, 50.0, 50.0], dtype=float),
temporary=True,
),
)
seen_vertices = tuple(graph.nodes)
self.assertCountEqual(
[v.original_id for v in expected_vertices],
[v.original_id for v in seen_vertices],
)
for expected, actual in zip(
sorted(expected_vertices, key=lambda v: tuple(v.location)),
sorted(seen_vertices, key=lambda v: tuple(v.location)),
):
assert all(np.isclose(expected.location, actual.location))
batch = pipeline.request_batch(
BatchRequest(
{
GraphKeys.TEST_GRAPH: GraphSpec(
roi=Roi((25, 25, 25), (500, 500, 500))
)
}
)
)
graph = batch[GraphKeys.TEST_GRAPH]
expected_vertices = (
Node(
id=1,
location=np.array([25.0, 25.0, 25.0], dtype=float),
temporary=True,
),
Node(id=2, location=np.array([500.0, 500.0, 500.0], dtype=float)),
Node(
id=3,
location=np.array([525.0, 525.0, 525.0], dtype=float),
temporary=True,
),
)
seen_vertices = tuple(graph.nodes)
self.assertCountEqual(
[v.original_id for v in expected_vertices],
[v.original_id for v in seen_vertices],
)
for expected, actual in zip(
sorted(expected_vertices, key=lambda v: tuple(v.location)),
sorted(seen_vertices, key=lambda v: tuple(v.location)),
):
assert all(np.isclose(expected.location, actual.location))
def test_neighbors(self):
# directed
d_spec = self.spec
# undirected
ud_spec = self.spec
ud_spec.directed = False
directed = Graph(self.nodes, self.edges, d_spec)
undirected = Graph(self.nodes, self.edges, ud_spec)
self.assertCountEqual(
directed.neighbors(self.nodes[0]), undirected.neighbors(self.nodes[0])
)
def test_crop(self):
g = Graph(self.nodes, self.edges, self.spec)
sub_g = g.crop(Roi(Coordinate([1, 1, 1]), Coordinate([3, 3, 3])))
self.assertEqual(g.spec.roi, self.spec.roi)
self.assertEqual(
sub_g.spec.roi, Roi(Coordinate([1, 1, 1]), Coordinate([3, 3, 3]))
)
sub_g.spec.directed = False
self.assertTrue(g.spec.directed)
self.assertFalse(sub_g.spec.directed)
def test_nodes():
initial_locations = {
1: np.array([1, 1, 1], dtype=np.float32),
2: np.array([500, 500, 500], dtype=np.float32),
3: np.array([550, 550, 550], dtype=np.float32),
}
replacement_locations = {
1: np.array([0, 0, 0], dtype=np.float32),
2: np.array([50, 50, 50], dtype=np.float32),
3: np.array([55, 55, 55], dtype=np.float32),
}
nodes = [
Node(id=id, location=location) for id, location in initial_locations.items()
]
edges = [Edge(1, 2), Edge(2, 3)]
spec = GraphSpec(
roi=Roi(Coordinate([-500, -500, -500]), Coordinate([1500, 1500, 1500]))
)
graph = Graph(nodes, edges, spec)
for node in graph.nodes:
node.location = replacement_locations[node.id]
for node in graph.nodes:
assert all(np.isclose(node.location, replacement_locations[node.id]))
| 6,554 | 30.97561 | 87 | py |
gunpowder | gunpowder-master/tests/cases/balance_labels.py | from .provider_test import ProviderTest
from gunpowder import *
import numpy as np
class ExampleSource(BatchProvider):
def setup(self):
for identifier in [
ArrayKeys.GT_AFFINITIES,
ArrayKeys.GT_AFFINITIES_MASK,
ArrayKeys.GT_IGNORE,
]:
self.provides(
identifier,
ArraySpec(roi=Roi((0, 0, 0), (2000, 200, 200)), voxel_size=(20, 2, 2)),
)
def provide(self, request):
batch = Batch()
roi = request[ArrayKeys.GT_AFFINITIES].roi
shape_vx = roi.shape // self.spec[ArrayKeys.GT_AFFINITIES].voxel_size
spec = self.spec[ArrayKeys.GT_AFFINITIES].copy()
spec.roi = roi
batch.arrays[ArrayKeys.GT_AFFINITIES] = Array(
np.random.randint(0, 2, (3,) + shape_vx), spec
)
batch.arrays[ArrayKeys.GT_AFFINITIES_MASK] = Array(
np.random.randint(0, 2, (3,) + shape_vx), spec
)
batch.arrays[ArrayKeys.GT_IGNORE] = Array(
np.random.randint(0, 2, (3,) + shape_vx), spec
)
return batch
class TestBalanceLabels(ProviderTest):
def test_output(self):
pipeline = ExampleSource() + BalanceLabels(
labels=ArrayKeys.GT_AFFINITIES,
scales=ArrayKeys.LOSS_SCALE,
mask=[ArrayKeys.GT_AFFINITIES_MASK, ArrayKeys.GT_IGNORE],
)
with build(pipeline):
# check correct scaling on 10 random samples
for i in range(10):
request = BatchRequest()
request.add(ArrayKeys.GT_AFFINITIES, (400, 30, 34))
request.add(ArrayKeys.GT_AFFINITIES_MASK, (400, 30, 34))
request.add(ArrayKeys.GT_IGNORE, (400, 30, 34))
request.add(ArrayKeys.LOSS_SCALE, (400, 30, 34))
batch = pipeline.request_batch(request)
self.assertTrue(ArrayKeys.LOSS_SCALE in batch.arrays)
affs = batch.arrays[ArrayKeys.GT_AFFINITIES].data
scale = batch.arrays[ArrayKeys.LOSS_SCALE].data
mask = batch.arrays[ArrayKeys.GT_AFFINITIES_MASK].data
ignore = batch.arrays[ArrayKeys.GT_IGNORE].data
# combine mask and ignore
mask *= ignore
self.assertTrue((scale[mask == 1] > 0).all())
self.assertTrue((scale[mask == 0] == 0).all())
num_masked_out = affs.size - mask.sum()
num_masked_in = affs.size - num_masked_out
num_pos = (affs * mask).sum()
num_neg = affs.size - num_masked_out - num_pos
frac_pos = float(num_pos) / num_masked_in if num_masked_in > 0 else 0
frac_pos = min(0.95, max(0.05, frac_pos))
frac_neg = 1.0 - frac_pos
w_pos = 1.0 / (2.0 * frac_pos)
w_neg = 1.0 / (2.0 * frac_neg)
self.assertAlmostEqual((scale * mask * affs).sum(), w_pos * num_pos, 3)
self.assertAlmostEqual(
(scale * mask * (1 - affs)).sum(), w_neg * num_neg, 3
)
# check if LOSS_SCALE is omitted if not requested
del request[ArrayKeys.LOSS_SCALE]
batch = pipeline.request_batch(request)
self.assertTrue(ArrayKeys.LOSS_SCALE not in batch.arrays)
# same using a slab for balancing
pipeline = ExampleSource() + BalanceLabels(
labels=ArrayKeys.GT_AFFINITIES,
scales=ArrayKeys.LOSS_SCALE,
mask=[ArrayKeys.GT_AFFINITIES_MASK, ArrayKeys.GT_IGNORE],
slab=(1, -1, -1, -1),
) # every channel individually
with build(pipeline):
# check correct scaling on 10 random samples
for i in range(10):
request = BatchRequest()
request.add(ArrayKeys.GT_AFFINITIES, (400, 30, 34))
request.add(ArrayKeys.GT_AFFINITIES_MASK, (400, 30, 34))
request.add(ArrayKeys.GT_IGNORE, (400, 30, 34))
request.add(ArrayKeys.LOSS_SCALE, (400, 30, 34))
batch = pipeline.request_batch(request)
self.assertTrue(ArrayKeys.LOSS_SCALE in batch.arrays)
for c in range(3):
affs = batch.arrays[ArrayKeys.GT_AFFINITIES].data[c]
scale = batch.arrays[ArrayKeys.LOSS_SCALE].data[c]
mask = batch.arrays[ArrayKeys.GT_AFFINITIES_MASK].data[c]
ignore = batch.arrays[ArrayKeys.GT_IGNORE].data[c]
# combine mask and ignore
mask *= ignore
self.assertTrue((scale[mask == 1] > 0).all())
self.assertTrue((scale[mask == 0] == 0).all())
num_masked_out = affs.size - mask.sum()
num_masked_in = affs.size - num_masked_out
num_pos = (affs * mask).sum()
num_neg = affs.size - num_masked_out - num_pos
frac_pos = (
float(num_pos) / num_masked_in if num_masked_in > 0 else 0
)
frac_pos = min(0.95, max(0.05, frac_pos))
frac_neg = 1.0 - frac_pos
w_pos = 1.0 / (2.0 * frac_pos)
w_neg = 1.0 / (2.0 * frac_neg)
self.assertAlmostEqual(
(scale * mask * affs).sum(), w_pos * num_pos, 3
)
self.assertAlmostEqual(
(scale * mask * (1 - affs)).sum(), w_neg * num_neg, 3
)
| 5,703 | 37.281879 | 87 | py |
gunpowder | gunpowder-master/tests/cases/jax_train.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
BatchRequest,
ArraySpec,
Roi,
Coordinate,
ArrayKeys,
ArrayKey,
Array,
Batch,
Scan,
PreCache,
build,
)
from gunpowder.ext import jax, haiku, optax, NoSuchModule
from gunpowder.jax import Train, Predict, GenericJaxModel
from unittest import skipIf, expectedFailure
import numpy as np
import logging
# use CPU for JAX tests and avoid GPU compatibility
if not isinstance(jax, NoSuchModule):
jax.config.update("jax_platform_name", "cpu")
class ExampleJaxTrain2DSource(BatchProvider):
def __init__(self):
pass
def setup(self):
spec = ArraySpec(
roi=Roi((0, 0), (17, 17)),
dtype=np.float32,
interpolatable=True,
voxel_size=(1, 1),
)
self.provides(ArrayKeys.A, spec)
def provide(self, request):
batch = Batch()
spec = self.spec[ArrayKeys.A]
x = np.array(list(range(17)), dtype=np.float32).reshape([17, 1])
x = x + x.T
batch.arrays[ArrayKeys.A] = Array(x, spec).crop(request[ArrayKeys.A].roi)
return batch
class ExampleJaxTrainSource(BatchProvider):
def setup(self):
spec = ArraySpec(
roi=Roi((0, 0), (2, 2)),
dtype=np.float32,
interpolatable=True,
voxel_size=(1, 1),
)
self.provides(ArrayKeys.A, spec)
self.provides(ArrayKeys.B, spec)
spec = ArraySpec(nonspatial=True)
self.provides(ArrayKeys.C, spec)
def provide(self, request):
batch = Batch()
spec = self.spec[ArrayKeys.A]
spec.roi = request[ArrayKeys.A].roi
batch.arrays[ArrayKeys.A] = Array(
np.array([[0, 1], [2, 3]], dtype=np.float32), spec
)
spec = self.spec[ArrayKeys.B]
spec.roi = request[ArrayKeys.B].roi
batch.arrays[ArrayKeys.B] = Array(
np.array([[0, 1], [2, 3]], dtype=np.float32), spec
)
spec = self.spec[ArrayKeys.C]
batch.arrays[ArrayKeys.C] = Array(np.array([1], dtype=np.float32), spec)
return batch
@skipIf(isinstance(jax, NoSuchModule), "Jax is not installed")
class TestJaxTrain(ProviderTest):
def test_output(self):
logging.getLogger("gunpowder.jax.nodes.train").setLevel(logging.INFO)
checkpoint_basename = self.path_to("model")
ArrayKey("A")
ArrayKey("B")
ArrayKey("C")
ArrayKey("C_PREDICTED")
ArrayKey("C_GRADIENT")
class ExampleModel(GenericJaxModel):
def __init__(self, is_training):
super().__init__(is_training)
def _linear(x):
return haiku.Linear(1, False)(x)
self.linear = haiku.without_apply_rng(haiku.transform(_linear))
self.opt = optax.sgd(learning_rate=1e-7, momentum=0.999)
def initialize(self, rng_key, inputs):
a = inputs["a"].reshape(-1)
b = inputs["b"].reshape(-1)
weight = self.linear.init(rng_key, a * b)
opt_state = self.opt.init(weight)
return (weight, opt_state)
def forward(self, params, inputs):
a = inputs["a"].reshape(-1)
b = inputs["b"].reshape(-1)
return {"c": self.linear.apply(params[0], a * b)}
def _loss_fn(self, weight, a, b, c):
c_pred = self.linear.apply(weight, a * b)
loss = optax.l2_loss(predictions=c_pred, targets=c) * 2
loss_mean = loss.mean()
return loss_mean, (c_pred, loss, loss_mean)
def _apply_optimizer(self, params, grads):
updates, new_opt_state = self.opt.update(grads, params[1])
new_weight = optax.apply_updates(params[0], updates)
return new_weight, new_opt_state
def train_step(self, params, inputs, pmapped=False):
a = inputs["a"].reshape(-1)
b = inputs["b"].reshape(-1)
c = inputs["c"].reshape(-1)
grads, (c_pred, loss, loss_mean) = jax.grad(
self._loss_fn, has_aux=True
)(params[0], a, b, c)
new_weight, new_opt_state = self._apply_optimizer(params, grads)
new_params = (new_weight, new_opt_state)
outputs = {
"c_pred": c_pred,
"grad": loss,
}
return new_params, outputs, loss_mean
model = ExampleModel(is_training=False)
source = ExampleJaxTrainSource()
train = Train(
model=model,
inputs={"a": ArrayKeys.A, "b": ArrayKeys.B, "c": ArrayKeys.C},
outputs={"c_pred": ArrayKeys.C_PREDICTED, "grad": ArrayKeys.C_GRADIENT},
array_specs={
ArrayKeys.C_PREDICTED: ArraySpec(nonspatial=True),
ArrayKeys.C_GRADIENT: ArraySpec(nonspatial=True),
},
checkpoint_basename=checkpoint_basename,
save_every=100,
spawn_subprocess=True,
n_devices=1,
)
pipeline = source + train
request = BatchRequest(
{
ArrayKeys.A: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.B: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.C: ArraySpec(nonspatial=True),
ArrayKeys.C_PREDICTED: ArraySpec(nonspatial=True),
ArrayKeys.C_GRADIENT: ArraySpec(nonspatial=True),
}
)
# train for a couple of iterations
with build(pipeline):
batch = pipeline.request_batch(request)
for i in range(200 - 1):
loss1 = batch.loss
batch = pipeline.request_batch(request)
loss2 = batch.loss
self.assertLess(loss2, loss1)
# resume training
with build(pipeline):
for i in range(100):
loss1 = batch.loss
batch = pipeline.request_batch(request)
loss2 = batch.loss
self.assertLess(loss2, loss1)
@skipIf(isinstance(jax, NoSuchModule), "Jax is not installed")
class TestJaxPredict(ProviderTest):
def test_output(self):
logging.getLogger("gunpowder.jax.nodes.predict").setLevel(logging.INFO)
a = ArrayKey("A")
b = ArrayKey("B")
c = ArrayKey("C")
c_pred = ArrayKey("C_PREDICTED")
d_pred = ArrayKey("D_PREDICTED")
class ExampleModel(GenericJaxModel):
def __init__(self, is_training):
super().__init__(is_training)
def _linear(x):
return haiku.Linear(1, False)(x)
self.linear = haiku.without_apply_rng(haiku.transform(_linear))
def initialize(self, rng_key, inputs):
a = inputs["a"].reshape(-1)
b = inputs["b"].reshape(-1)
weight = self.linear.init(rng_key, a * b)
weight["linear"]["w"] = (
weight["linear"]["w"].at[:].set(np.array([[1], [1], [1], [1]]))
)
return weight
def forward(self, params, inputs):
a = inputs["a"].reshape(-1)
b = inputs["b"].reshape(-1)
c_pred = self.linear.apply(params, a * b)
d_pred = c_pred * 2
return {"c": c_pred, "d": d_pred}
model = ExampleModel(is_training=False)
source = ExampleJaxTrainSource()
predict = Predict(
model=model,
inputs={"a": a, "b": b},
outputs={"c": c_pred, "d": d_pred},
array_specs={
c: ArraySpec(nonspatial=True),
c_pred: ArraySpec(nonspatial=True),
d_pred: ArraySpec(nonspatial=True),
},
spawn_subprocess=True,
)
pipeline = source + predict
request = BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0), (2, 2))),
b: ArraySpec(roi=Roi((0, 0), (2, 2))),
c: ArraySpec(nonspatial=True),
c_pred: ArraySpec(nonspatial=True),
d_pred: ArraySpec(nonspatial=True),
}
)
# train for a couple of iterations
with build(pipeline):
batch1 = pipeline.request_batch(request)
batch2 = pipeline.request_batch(request)
assert np.isclose(batch1[c_pred].data, batch2[c_pred].data)
assert np.isclose(batch1[c_pred].data, 1 + 4 + 9)
assert np.isclose(batch2[d_pred].data, 2 * (1 + 4 + 9))
| 8,810 | 31.274725 | 84 | py |
gunpowder | gunpowder-master/tests/cases/prepare_malis.py | from gunpowder import *
from gunpowder.contrib import PrepareMalis
import numpy as np
from .provider_test import ProviderTest
class ExampleSourcePrepareMalis(BatchProvider):
def setup(self):
self.provides(
ArrayKeys.GT_LABELS,
ArraySpec(
roi=Roi((0, 0, 0), (90, 90, 90)),
voxel_size=(1, 1, 1),
interpolatable=False,
),
)
self.provides(
ArrayKeys.GT_IGNORE,
ArraySpec(
roi=Roi((0, 0, 0), (90, 90, 90)),
voxel_size=(1, 1, 1),
interpolatable=False,
),
)
def provide(self, request):
batch = Batch()
if ArrayKeys.GT_LABELS in request:
gt_labels_roi = request[ArrayKeys.GT_LABELS].roi
gt_labels_shape = gt_labels_roi.shape
data_labels = np.ones(gt_labels_shape)
data_labels[gt_labels_shape[0] // 2 :, :, :] = 2
spec = self.spec[ArrayKeys.GT_LABELS].copy()
spec.roi = gt_labels_roi
batch.arrays[ArrayKeys.GT_LABELS] = Array(data_labels, spec)
if ArrayKeys.GT_IGNORE in request:
gt_ignore_roi = request[ArrayKeys.GT_IGNORE].roi
gt_ignore_shape = gt_ignore_roi.shape
data_gt_ignore = np.ones(gt_ignore_shape)
data_gt_ignore[:, gt_ignore_shape[1] // 6 :, :] = 0
spec = self.spec[ArrayKeys.GT_IGNORE].copy()
spec.roi = gt_ignore_roi
batch.arrays[ArrayKeys.GT_IGNORE] = Array(data_gt_ignore, spec)
return batch
class TestPrepareMalis(ProviderTest):
def test_output(self):
ArrayKey("MALIS_COMP_LABEL")
pipeline_with_ignore = ExampleSourcePrepareMalis() + PrepareMalis(
ArrayKeys.GT_LABELS,
ArrayKeys.MALIS_COMP_LABEL,
ignore_array_key=ArrayKeys.GT_IGNORE,
)
pipeline_without_ignore = ExampleSourcePrepareMalis() + PrepareMalis(
ArrayKeys.GT_LABELS,
ArrayKeys.MALIS_COMP_LABEL,
)
# test that MALIS_COMP_LABEL not in batch if not in request
with build(pipeline_with_ignore):
request = BatchRequest()
request.add(ArrayKeys.GT_LABELS, (90, 90, 90))
request.add(ArrayKeys.GT_IGNORE, (90, 90, 90))
batch = pipeline_with_ignore.request_batch(request)
# test if array added to batch
self.assertTrue(ArrayKeys.MALIS_COMP_LABEL not in batch.arrays)
# test usage with gt_ignore
with build(pipeline_with_ignore):
request = BatchRequest()
request.add(ArrayKeys.GT_LABELS, (90, 90, 90))
request.add(ArrayKeys.GT_IGNORE, (90, 90, 90))
request.add(ArrayKeys.MALIS_COMP_LABEL, (90, 90, 90))
batch = pipeline_with_ignore.request_batch(request)
# test if array added to batch
self.assertTrue(ArrayKeys.MALIS_COMP_LABEL in batch.arrays)
# test if gt_ignore considered for gt_neg_pass ([0, ...]) and not for gt_pos_pass ([1, ...])
ignored_locations = np.where(batch.arrays[ArrayKeys.GT_IGNORE].data == 0)
# gt_neg_pass
self.assertTrue(
(
batch.arrays[ArrayKeys.MALIS_COMP_LABEL].data[0, ...][
ignored_locations
]
== 3
).all()
)
self.assertFalse(
(
np.array_equal(
batch.arrays[ArrayKeys.MALIS_COMP_LABEL].data[0, ...],
batch.arrays[ArrayKeys.GT_LABELS].data,
)
)
)
# gt_pos_pass
self.assertFalse(
(
batch.arrays[ArrayKeys.MALIS_COMP_LABEL].data[1, ...][
ignored_locations
]
== 3
).all()
)
self.assertTrue(
(
np.array_equal(
batch.arrays[ArrayKeys.MALIS_COMP_LABEL].data[1, ...],
batch.arrays[ArrayKeys.GT_LABELS].data,
)
)
)
# Test ignore without requesting ignore array
request = BatchRequest()
request.add(ArrayKeys.GT_LABELS, (90, 90, 90))
request.add(ArrayKeys.MALIS_COMP_LABEL, (90, 90, 90))
batch = pipeline_with_ignore.request_batch(request)
# test if array added to batch
self.assertTrue(ArrayKeys.MALIS_COMP_LABEL in batch.arrays)
# gt_neg_pass
self.assertTrue(
(
batch.arrays[ArrayKeys.MALIS_COMP_LABEL].data[0, ...][
ignored_locations
]
== 3
).all()
)
self.assertFalse(
(
np.array_equal(
batch.arrays[ArrayKeys.MALIS_COMP_LABEL].data[0, ...],
batch.arrays[ArrayKeys.GT_LABELS].data,
)
)
)
# gt_pos_pass
self.assertFalse(
(
batch.arrays[ArrayKeys.MALIS_COMP_LABEL].data[1, ...][
ignored_locations
]
== 3
).all()
)
self.assertTrue(
(
np.array_equal(
batch.arrays[ArrayKeys.MALIS_COMP_LABEL].data[1, ...],
batch.arrays[ArrayKeys.GT_LABELS].data,
)
)
)
# test usage without gt_ignore
with build(pipeline_without_ignore):
request = BatchRequest()
request.add(ArrayKeys.GT_LABELS, (90, 90, 90))
request.add(ArrayKeys.MALIS_COMP_LABEL, (90, 90, 90))
batch = pipeline_without_ignore.request_batch(request)
# test if array added to batch
self.assertTrue(ArrayKeys.MALIS_COMP_LABEL in batch.arrays)
# test if gt_ignore considered for gt_neg_pass ([0, ;;;]) and not for gt_pos_pass ([1, ...])
# gt_neg_pass
self.assertTrue(
(
np.array_equal(
batch.arrays[ArrayKeys.MALIS_COMP_LABEL].data[0, ...],
batch.arrays[ArrayKeys.GT_LABELS].data,
)
)
)
# gt_pos_pass
self.assertTrue(
(
np.array_equal(
batch.arrays[ArrayKeys.MALIS_COMP_LABEL].data[1, ...],
batch.arrays[ArrayKeys.GT_LABELS].data,
)
)
)
| 6,991 | 33.44335 | 104 | py |
gunpowder | gunpowder-master/tests/cases/graph_source.py | from .provider_test import ProviderTest
from gunpowder import (
BatchRequest,
Node,
Edge,
GraphSpec,
GraphKey,
GraphKeys,
GraphSource,
build,
Roi,
Coordinate,
)
import numpy as np
import networkx as nx
class DummyDaisyGraphProvider:
"""Dummy graph provider mimicing daisy.SharedGraphProvider.
Must have directed attribute, __getitem__(roi) that returns networkx
graph, and position_attribute.
"""
def __init__(self, nodes, edges, directed=False):
self.nodes = nodes
self.edges = edges
self.directed = directed
self.position_attribute = "location"
def __getitem__(self, roi):
if self.directed:
graph = nx.DiGraph()
else:
graph = nx.Graph()
for node in self.nodes:
if roi.contains(node.location):
graph.add_node(node.id, location=node.location)
for edge in self.edges:
if edge.u in graph.nodes:
graph.add_edge(edge.u, edge.v)
return graph
class TestGraphSource(ProviderTest):
@property
def edges(self):
return [Edge(0, 1), Edge(1, 2), Edge(2, 3), Edge(3, 4), Edge(4, 0)]
@property
def nodes(self):
return [
Node(0, location=np.array([0, 0, 0], dtype=self.spec.dtype)),
Node(1, location=np.array([1, 1, 1], dtype=self.spec.dtype)),
Node(2, location=np.array([2, 2, 2], dtype=self.spec.dtype)),
Node(3, location=np.array([3, 3, 3], dtype=self.spec.dtype)),
Node(4, location=np.array([4, 4, 4], dtype=self.spec.dtype)),
]
@property
def spec(self):
return GraphSpec(
roi=Roi(Coordinate([0, 0, 0]), Coordinate([5, 5, 5])), directed=True
)
def test_output(self):
GraphKey("TEST_GRAPH")
dummy_provider = DummyDaisyGraphProvider(self.nodes, self.edges, directed=True)
graph_source = GraphSource(dummy_provider, GraphKeys.TEST_GRAPH, self.spec)
pipeline = graph_source
with build(pipeline):
batch = pipeline.request_batch(
BatchRequest(
{GraphKeys.TEST_GRAPH: GraphSpec(roi=Roi((0, 0, 0), (5, 5, 5)))}
)
)
graph = batch[GraphKeys.TEST_GRAPH]
expected_vertices = self.nodes
seen_vertices = tuple(graph.nodes)
self.assertCountEqual(
[v.id for v in expected_vertices],
[v.id for v in seen_vertices],
)
for expected, actual in zip(
sorted(expected_vertices, key=lambda v: tuple(v.location)),
sorted(seen_vertices, key=lambda v: tuple(v.location)),
):
assert all(np.isclose(expected.location, actual.location))
batch = pipeline.request_batch(
BatchRequest(
{GraphKeys.TEST_GRAPH: GraphSpec(roi=Roi((2, 2, 2), (3, 3, 3)))}
)
)
graph = batch[GraphKeys.TEST_GRAPH]
expected_vertices = (
Node(2, location=np.array([2, 2, 2], dtype=self.spec.dtype)),
Node(3, location=np.array([3, 3, 3], dtype=self.spec.dtype)),
Node(4, location=np.array([4, 4, 4], dtype=self.spec.dtype)),
)
seen_vertices = tuple(graph.nodes)
print(seen_vertices)
self.assertCountEqual(
[v.id for v in expected_vertices],
[v.id for v in seen_vertices],
)
for expected, actual in zip(
sorted(expected_vertices, key=lambda v: tuple(v.location)),
sorted(seen_vertices, key=lambda v: tuple(v.location)),
):
assert all(np.isclose(expected.location, actual.location))
| 3,874 | 32.119658 | 87 | py |
gunpowder | gunpowder-master/tests/cases/random_location_graph.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
Graph,
Node,
GraphSpec,
GraphKey,
GraphKeys,
Roi,
Batch,
BatchRequest,
RandomLocation,
build,
BatchFilter,
)
import numpy as np
import logging
logger = logging.getLogger(__name__)
class BatchTester(BatchFilter):
def __init__(self, roi_to_match, exact=True):
self.roi_to_match = roi_to_match
self.exact = exact
self.visted = False
def prepare(self, request):
for key, v in request.items():
logger.debug(f"PREPARE TESTBATCH ======== {key} ROI: {self.spec[key].roi}")
def process(self, batch, request):
if self.visted:
for key, graph in batch.graphs.items():
logger.debug(
f"PROCESS TESTBATCH ======== {key}: {graph.spec.roi} {graph}"
)
if self.exact:
assert (
graph.spec.roi == self.roi_to_match
), "graph roi does not match possible roi"
else:
assert self.roi_to_match.contains(
batch[GraphKeys.TEST_GRAPH].spec.roi
), "batch is not contained in possible roi"
else:
self.visted = True
class SourceGraphLocation(BatchProvider):
def __init__(self):
self.graph = Graph(
[Node(id=1, location=np.array([500, 500, 500]))],
[],
GraphSpec(roi=Roi((0, 0, 0), (1000, 1000, 1000))),
)
def setup(self):
self.provides(GraphKeys.TEST_GRAPH, self.graph.spec)
def provide(self, request):
batch = Batch()
roi = request[GraphKeys.TEST_GRAPH].roi
batch[GraphKeys.TEST_GRAPH] = self.graph.crop(roi).trim(roi)
return batch
class TestRandomLocationGraph(ProviderTest):
def test_dim_size_1(self):
GraphKey("TEST_GRAPH")
upstream_roi = Roi((500, 401, 401), (1, 200, 200))
pipeline = (
SourceGraphLocation()
+ BatchTester(upstream_roi, exact=False)
+ RandomLocation(ensure_nonempty=GraphKeys.TEST_GRAPH)
)
# count the number of times we get each node
with build(pipeline):
for i in range(500):
batch = pipeline.request_batch(
BatchRequest(
{
GraphKeys.TEST_GRAPH: GraphSpec(
roi=Roi((0, 0, 0), (1, 100, 100))
)
}
)
)
assert len(list(batch[GraphKeys.TEST_GRAPH].nodes)) == 1
def test_req_full_roi(self):
GraphKey("TEST_GRAPH")
possible_roi = Roi((0, 0, 0), (1000, 1000, 1000))
pipeline = (
SourceGraphLocation()
+ BatchTester(possible_roi, exact=False)
+ RandomLocation(ensure_nonempty=GraphKeys.TEST_GRAPH)
)
with build(pipeline):
batch = pipeline.request_batch(
BatchRequest(
{
GraphKeys.TEST_GRAPH: GraphSpec(
roi=Roi((0, 0, 0), (1000, 1000, 1000))
)
}
)
)
assert len(list(batch[GraphKeys.TEST_GRAPH].nodes)) == 1
def test_roi_one_point(self):
GraphKey("TEST_GRAPH")
upstream_roi = Roi((500, 500, 500), (1, 1, 1))
pipeline = (
SourceGraphLocation()
+ BatchTester(upstream_roi, exact=True)
+ RandomLocation(ensure_nonempty=GraphKeys.TEST_GRAPH)
)
with build(pipeline):
for i in range(500):
batch = pipeline.request_batch(
BatchRequest(
{GraphKeys.TEST_GRAPH: GraphSpec(roi=Roi((0, 0, 0), (1, 1, 1)))}
)
)
assert len(list(batch[GraphKeys.TEST_GRAPH].nodes)) == 1
def test_iso_roi(self):
GraphKey("TEST_GRAPH")
upstream_roi = Roi((401, 401, 401), (200, 200, 200))
pipeline = (
SourceGraphLocation()
+ BatchTester(upstream_roi, exact=False)
+ RandomLocation(ensure_nonempty=GraphKeys.TEST_GRAPH)
)
with build(pipeline):
for i in range(500):
batch = pipeline.request_batch(
BatchRequest(
{
GraphKeys.TEST_GRAPH: GraphSpec(
roi=Roi((0, 0, 0), (100, 100, 100))
)
}
)
)
assert len(list(batch[GraphKeys.TEST_GRAPH].nodes)) == 1
| 4,869 | 29.248447 | 88 | py |
gunpowder | gunpowder-master/tests/cases/random_provider.py | from gunpowder import (
RandomProvider,
Roi,
ArrayKey,
ArraySpec,
Array,
Roi,
BatchRequest,
build,
)
import numpy as np
from .helper_sources import ArraySource
def test_output():
a = ArrayKey("A")
source_a = ArraySource(
a,
Array(
np.zeros((5, 15, 25), dtype=np.uint8),
spec=ArraySpec(
roi=Roi((0, 0, 0), (50, 60, 50)), voxel_size=(10, 4, 2), dtype=np.uint8
),
),
)
source_b = ArraySource(
a,
Array(
np.ones((5, 15, 25), dtype=np.uint8),
spec=ArraySpec(
roi=Roi((0, 0, 0), (50, 60, 50)), voxel_size=(10, 4, 2), dtype=np.uint8
),
),
)
random_provider = ArrayKey("RANDOM_PROVIDER")
pipeline = (source_a, source_b) + RandomProvider(
random_provider_key=random_provider
)
with build(pipeline):
possibilities = set([0, 1])
seen = set()
for i in range(10):
batch = pipeline.request_batch(
BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20))),
random_provider: ArraySpec(nonspatial=True),
}
)
)
value = batch.arrays[a].data[0, 0, 0]
assert value in possibilities
assert batch.arrays[random_provider].data.item() == value
seen.add(value)
if len(possibilities - seen) == 0:
break
assert seen == possibilities
| 1,597 | 24.365079 | 87 | py |
gunpowder | gunpowder-master/tests/cases/random_location.py | from gunpowder import (
RandomLocation,
BatchProvider,
Roi,
Coordinate,
ArrayKeys,
ArrayKey,
ArraySpec,
Array,
Roi,
Coordinate,
Batch,
BatchRequest,
BatchProvider,
RandomLocation,
MergeProvider,
build,
)
import numpy as np
from gunpowder.pipeline import PipelineRequestError
import pytest
class ExampleSourceRandomLocation(BatchProvider):
def __init__(self, array):
self.array = array
self.roi = Roi((-200, -20, -20), (1000, 100, 100))
self.data_shape = (60, 60, 60)
self.voxel_size = Coordinate(20, 2, 2)
x = np.linspace(-10, 49, 60).reshape((-1, 1, 1))
self.data = x + x.transpose([1, 2, 0]) + x.transpose([2, 0, 1])
def setup(self):
self.provides(self.array, ArraySpec(roi=self.roi, voxel_size=self.voxel_size))
def provide(self, request):
batch = Batch()
spec = request[self.array].copy()
spec.voxel_size = self.voxel_size
start = (request[self.array].roi.begin / self.voxel_size) + 10
end = (request[self.array].roi.end / self.voxel_size) + 10
data_slices = tuple(map(slice, start, end))
data = self.data[data_slices]
batch.arrays[self.array] = Array(data=data, spec=spec)
return batch
class CustomRandomLocation(RandomLocation):
def __init__(self, array, *args, **kwargs):
super().__init__(*args, **kwargs)
self.array = array
# only accept random locations that contain (0, 0, 0)
def accepts(self, request):
return request.array_specs[self.array].roi.contains((0, 0, 0))
def test_output():
a = ArrayKey("A")
b = ArrayKey("B")
random_shift_key = ArrayKey("RANDOM_SHIFT")
source_a = ExampleSourceRandomLocation(a)
source_b = ExampleSourceRandomLocation(b)
pipeline = (
(source_a, source_b)
+ MergeProvider()
+ CustomRandomLocation(a, random_store_key=random_shift_key)
)
pipeline_no_random = (source_a, source_b) + MergeProvider()
with build(pipeline), build(pipeline_no_random):
sums = set()
for i in range(10):
batch = pipeline.request_batch(
BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20))),
b: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20))),
random_shift_key: ArraySpec(nonspatial=True),
}
)
)
assert 0 in batch.arrays[a].data
assert 0 in batch.arrays[b].data
# check that we can repeat this request without the random location
batch_no_random = pipeline_no_random.request_batch(
BatchRequest(
{
a: ArraySpec(
roi=Roi(batch[random_shift_key].data, (20, 20, 20))
),
b: ArraySpec(
roi=Roi(batch[random_shift_key].data, (20, 20, 20))
),
random_shift_key: ArraySpec(nonspatial=True),
}
)
)
assert batch_no_random.arrays[a].data.sum() == batch.arrays[a].data.sum()
sums.add(batch[a].data.sum())
# Request a ROI with the same shape as the entire ROI
full_roi_a = Roi((0, 0, 0), source_a.roi.shape)
full_roi_b = Roi((0, 0, 0), source_b.roi.shape)
batch = pipeline.request_batch(
BatchRequest(
{a: ArraySpec(roi=full_roi_a), b: ArraySpec(roi=full_roi_b)}
)
)
assert len(sums) > 1
def test_output():
a = ArrayKey("A")
b = ArrayKey("B")
source_a = ExampleSourceRandomLocation(a)
source_b = ExampleSourceRandomLocation(b)
pipeline = (source_a, source_b) + MergeProvider() + CustomRandomLocation(a)
with build(pipeline):
for i in range(10):
batch = pipeline.request_batch(
BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20))),
b: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20))),
}
)
)
assert 0 in batch.arrays[a].data
assert 0 in batch.arrays[b].data
# Request a ROI with the same shape as the entire ROI
full_roi_a = Roi((0, 0, 0), source_a.roi.shape)
full_roi_b = Roi((0, 0, 0), source_b.roi.shape)
batch = pipeline.request_batch(
BatchRequest(
{a: ArraySpec(roi=full_roi_a), b: ArraySpec(roi=full_roi_b)}
)
)
def test_random_seed():
raw = ArrayKey("RAW")
pipeline = ExampleSourceRandomLocation(raw) + CustomRandomLocation(raw)
with build(pipeline):
seeded_sums = []
unseeded_sums = []
for i in range(10):
batch_seeded = pipeline.request_batch(
BatchRequest(
{raw: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20)))},
random_seed=10,
)
)
seeded_sums.append(batch_seeded[raw].data.sum())
batch_unseeded = pipeline.request_batch(
BatchRequest({raw: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20)))})
)
unseeded_sums.append(batch_unseeded[raw].data.sum())
assert len(set(seeded_sums)) == 1
assert len(set(unseeded_sums)) > 1
def test_impossible():
a = ArrayKey("A")
b = ArrayKey("B")
null_key = ArrayKey("NULL")
source_a = ExampleSourceRandomLocation(a)
source_b = ExampleSourceRandomLocation(b)
pipeline = (source_a, source_b) + MergeProvider() + CustomRandomLocation(null_key)
with build(pipeline):
with pytest.raises(PipelineRequestError):
batch = pipeline.request_batch(
BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0, 0), (200, 20, 20))),
b: ArraySpec(roi=Roi((1000, 100, 100), (220, 22, 22))),
}
)
)
| 6,278 | 30.552764 | 86 | py |
gunpowder | gunpowder-master/tests/cases/helper_sources.py | from gunpowder import BatchProvider, GraphKey, Graph, ArrayKey, Array, Batch
import copy
class ArraySource(BatchProvider):
def __init__(self, key: ArrayKey, array: Array):
self.key = key
self.array = array
def setup(self):
self.provides(self.key, self.array.spec.copy())
def provide(self, request):
outputs = Batch()
outputs[self.key] = copy.deepcopy(self.array.crop(request[self.key].roi))
return outputs
class GraphSource(BatchProvider):
def __init__(self, key: GraphKey, graph: Graph):
self.key = key
self.graph = graph
def setup(self):
self.provides(self.key, self.graph.spec)
def provide(self, request):
outputs = Batch()
outputs[self.key] = copy.deepcopy(
self.graph.crop(request[self.key].roi).trim(request[self.key].roi)
)
return outputs
| 895 | 25.352941 | 81 | py |
gunpowder | gunpowder-master/tests/cases/torch_train.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
BatchRequest,
ArraySpec,
Roi,
Coordinate,
ArrayKeys,
ArrayKey,
Array,
Batch,
Scan,
PreCache,
build,
)
from gunpowder.ext import torch, NoSuchModule
from gunpowder.torch import Train, Predict
from unittest import skipIf, expectedFailure
import numpy as np
import logging
class ExampleTorchTrain2DSource(BatchProvider):
def __init__(self):
pass
def setup(self):
spec = ArraySpec(
roi=Roi((0, 0), (17, 17)),
dtype=np.float32,
interpolatable=True,
voxel_size=(1, 1),
)
self.provides(ArrayKeys.A, spec)
def provide(self, request):
batch = Batch()
spec = self.spec[ArrayKeys.A]
x = np.array(list(range(17)), dtype=np.float32).reshape([17, 1])
x = x + x.T
batch.arrays[ArrayKeys.A] = Array(x, spec).crop(request[ArrayKeys.A].roi)
return batch
class ExampleTorchTrainSource(BatchProvider):
def setup(self):
spec = ArraySpec(
roi=Roi((0, 0), (2, 2)),
dtype=np.float32,
interpolatable=True,
voxel_size=(1, 1),
)
self.provides(ArrayKeys.A, spec)
self.provides(ArrayKeys.B, spec)
spec = ArraySpec(nonspatial=True)
self.provides(ArrayKeys.C, spec)
def provide(self, request):
batch = Batch()
spec = self.spec[ArrayKeys.A]
spec.roi = request[ArrayKeys.A].roi
batch.arrays[ArrayKeys.A] = Array(
np.array([[0, 1], [2, 3]], dtype=np.float32), spec
)
spec = self.spec[ArrayKeys.B]
spec.roi = request[ArrayKeys.B].roi
batch.arrays[ArrayKeys.B] = Array(
np.array([[0, 1], [2, 3]], dtype=np.float32), spec
)
spec = self.spec[ArrayKeys.C]
batch.arrays[ArrayKeys.C] = Array(np.array([1], dtype=np.float32), spec)
return batch
@skipIf(isinstance(torch, NoSuchModule), "torch is not installed")
class TestTorchTrain(ProviderTest):
def test_output(self):
logging.getLogger("gunpowder.torch.nodes.train").setLevel(logging.INFO)
checkpoint_basename = self.path_to("model")
ArrayKey("A")
ArrayKey("B")
ArrayKey("C")
ArrayKey("C_PREDICTED")
ArrayKey("C_GRADIENT")
class ExampleModel(torch.nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.linear = torch.nn.Linear(4, 1, False)
def forward(self, a, b):
a = a.reshape(-1)
b = b.reshape(-1)
return self.linear(a * b)
model = ExampleModel()
loss = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-7, momentum=0.999)
source = ExampleTorchTrainSource()
train = Train(
model=model,
optimizer=optimizer,
loss=loss,
inputs={"a": ArrayKeys.A, "b": ArrayKeys.B},
loss_inputs={0: ArrayKeys.C_PREDICTED, 1: ArrayKeys.C},
outputs={0: ArrayKeys.C_PREDICTED},
gradients={0: ArrayKeys.C_GRADIENT},
array_specs={
ArrayKeys.C_PREDICTED: ArraySpec(nonspatial=True),
ArrayKeys.C_GRADIENT: ArraySpec(nonspatial=True),
},
checkpoint_basename=checkpoint_basename,
save_every=100,
spawn_subprocess=True,
)
pipeline = source + train
request = BatchRequest(
{
ArrayKeys.A: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.B: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.C: ArraySpec(nonspatial=True),
ArrayKeys.C_PREDICTED: ArraySpec(nonspatial=True),
ArrayKeys.C_GRADIENT: ArraySpec(nonspatial=True),
}
)
# train for a couple of iterations
with build(pipeline):
batch = pipeline.request_batch(request)
for i in range(200 - 1):
loss1 = batch.loss
batch = pipeline.request_batch(request)
loss2 = batch.loss
self.assertLess(loss2, loss1)
# resume training
with build(pipeline):
for i in range(100):
loss1 = batch.loss
batch = pipeline.request_batch(request)
loss2 = batch.loss
self.assertLess(loss2, loss1)
@skipIf(isinstance(torch, NoSuchModule), "torch is not installed")
class TestTorchPredict(ProviderTest):
def test_output(self):
logging.getLogger("gunpowder.torch.nodes.predict").setLevel(logging.INFO)
a = ArrayKey("A")
b = ArrayKey("B")
c = ArrayKey("C")
c_pred = ArrayKey("C_PREDICTED")
d_pred = ArrayKey("D_PREDICTED")
class ExampleModel(torch.nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.linear = torch.nn.Linear(4, 1, False)
self.linear.weight.data = torch.Tensor([1, 1, 1, 1])
def forward(self, a, b):
a = a.reshape(-1)
b = b.reshape(-1)
c_pred = self.linear(a * b)
d_pred = c_pred * 2
return d_pred
model = ExampleModel()
source = ExampleTorchTrainSource()
predict = Predict(
model=model,
inputs={"a": a, "b": b},
outputs={"linear": c_pred, 0: d_pred},
array_specs={
c: ArraySpec(nonspatial=True),
c_pred: ArraySpec(nonspatial=True),
d_pred: ArraySpec(nonspatial=True),
},
spawn_subprocess=True,
)
pipeline = source + predict
request = BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0), (2, 2))),
b: ArraySpec(roi=Roi((0, 0), (2, 2))),
c: ArraySpec(nonspatial=True),
c_pred: ArraySpec(nonspatial=True),
d_pred: ArraySpec(nonspatial=True),
}
)
# train for a couple of iterations
with build(pipeline):
batch1 = pipeline.request_batch(request)
batch2 = pipeline.request_batch(request)
assert np.isclose(batch1[c_pred].data, batch2[c_pred].data)
assert np.isclose(batch1[c_pred].data, 1 + 4 + 9)
assert np.isclose(batch2[d_pred].data, 2 * (1 + 4 + 9))
if not isinstance(torch, NoSuchModule):
class ExampleModel(torch.nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.linear = torch.nn.Conv2d(1, 1, 3)
def forward(self, a):
a = a.unsqueeze(0).unsqueeze(0)
pred = self.linear(a)
a = a.squeeze(0).squeeze(0)
pred = pred.squeeze(0).squeeze(0)
return pred
@skipIf(isinstance(torch, NoSuchModule), "torch is not installed")
class TestTorchPredictMultiprocessing(ProviderTest):
def test_scan(self):
logging.getLogger("gunpowder.torch.nodes.predict").setLevel(logging.INFO)
a = ArrayKey("A")
pred = ArrayKey("PRED")
model = ExampleModel()
reference_request = BatchRequest()
reference_request[a] = ArraySpec(roi=Roi((0, 0), (7, 7)))
reference_request[pred] = ArraySpec(roi=Roi((1, 1), (5, 5)))
source = ExampleTorchTrain2DSource()
predict = Predict(
model=model,
inputs={"a": a},
outputs={0: pred},
array_specs={pred: ArraySpec()},
)
pipeline = source + predict + Scan(reference_request, num_workers=2)
request = BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0), (17, 17))),
pred: ArraySpec(roi=Roi((0, 0), (15, 15))),
}
)
# train for a couple of iterations
with build(pipeline):
batch = pipeline.request_batch(request)
assert pred in batch
def test_precache(self):
logging.getLogger("gunpowder.torch.nodes.predict").setLevel(logging.INFO)
a = ArrayKey("A")
pred = ArrayKey("PRED")
model = ExampleModel()
reference_request = BatchRequest()
reference_request[a] = ArraySpec(roi=Roi((0, 0), (7, 7)))
reference_request[pred] = ArraySpec(roi=Roi((1, 1), (5, 5)))
source = ExampleTorchTrain2DSource()
predict = Predict(
model=model,
inputs={"a": a},
outputs={0: pred},
array_specs={pred: ArraySpec()},
)
pipeline = source + predict + PreCache(cache_size=3, num_workers=2)
request = BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0), (17, 17))),
pred: ArraySpec(roi=Roi((0, 0), (15, 15))),
}
)
# train for a couple of iterations
with build(pipeline):
batch = pipeline.request_batch(request)
assert pred in batch
| 9,228 | 29.259016 | 81 | py |
gunpowder | gunpowder-master/tests/cases/specified_location.py | # from .provider_test import ProviderTest, ExampleSource
from gunpowder import (
BatchProvider,
ArrayKeys,
ArraySpec,
Roi,
Batch,
Coordinate,
SpecifiedLocation,
build,
BatchRequest,
Array,
ArrayKey,
)
import numpy as np
import unittest
class ExampleSourceSpecifiedLocation(BatchProvider):
def __init__(self, roi, voxel_size):
self.voxel_size = Coordinate(voxel_size)
self.roi = roi
size = self.roi.shape / self.voxel_size
self.data = np.arange(np.prod(size)).reshape(size)
def setup(self):
self.provides(
ArrayKeys.RAW, ArraySpec(roi=self.roi, voxel_size=self.voxel_size)
)
def provide(self, request):
batch = Batch()
spec = request[ArrayKeys.RAW].copy()
spec.voxel_size = self.voxel_size
size = spec.roi.shape / spec.voxel_size
offset = spec.roi.offset / spec.voxel_size
slce = tuple(slice(o, o + s) for o, s in zip(offset, size))
batch.arrays[ArrayKeys.RAW] = Array(data=self.data[slce], spec=spec)
return batch
class TestSpecifiedLocation(unittest.TestCase):
def setUp(self):
ArrayKey("RAW")
def test_simple(self):
locations = [[0, 0, 0], [100, 100, 100], [91, 20, 20], [42, 24, 57]]
pipeline = ExampleSourceSpecifiedLocation(
roi=Roi((0, 0, 0), (100, 100, 100)), voxel_size=(1, 1, 1)
) + SpecifiedLocation(
locations, choose_randomly=False, extra_data=None, jitter=None
)
with build(pipeline):
batch = pipeline.request_batch(
BatchRequest(
{ArrayKeys.RAW: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20)))}
)
)
# first three locations are skipped
# fourth should start at [32, 14, 47] of self.data
self.assertEqual(batch.arrays[ArrayKeys.RAW].data[0, 0, 0], 321447)
def test_voxel_size(self):
locations = [[0, 0, 0], [91, 20, 20], [42, 24, 57]]
pipeline = ExampleSourceSpecifiedLocation(
roi=Roi((0, 0, 0), (100, 100, 100)), voxel_size=(5, 2, 2)
) + SpecifiedLocation(
locations, choose_randomly=False, extra_data=None, jitter=None
)
with build(pipeline):
batch = pipeline.request_batch(
BatchRequest(
{ArrayKeys.RAW: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20)))}
)
)
# first locations is skipped
# second should start at [80/5, 10/2, 10/2] = [16, 5, 5]
self.assertEqual(batch.arrays[ArrayKeys.RAW].data[0, 0, 0], 40255)
batch = pipeline.request_batch(
BatchRequest(
{ArrayKeys.RAW: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20)))}
)
)
# third should start at [30/5, 14/2, 48/2] = [6, 7, 23]
self.assertEqual(batch.arrays[ArrayKeys.RAW].data[0, 0, 0], 15374)
def test_jitter_and_random(self):
locations = [[0, 0, 0], [91, 20, 20], [42, 24, 57]]
pipeline = ExampleSourceSpecifiedLocation(
roi=Roi((0, 0, 0), (100, 100, 100)), voxel_size=(5, 2, 2)
) + SpecifiedLocation(
locations, choose_randomly=True, extra_data=None, jitter=(5, 5, 5)
)
with build(pipeline):
batch = pipeline.request_batch(
BatchRequest(
{ArrayKeys.RAW: ArraySpec(roi=Roi((0, 0, 0), (20, 20, 20)))}
)
)
# Unclear what result should be, so no errors means passing
self.assertTrue(batch.arrays[ArrayKeys.RAW].data[0, 0, 0] > 0)
| 3,726 | 32.276786 | 80 | py |
gunpowder | gunpowder-master/tests/cases/__init__.py | 0 | 0 | 0 | py | |
gunpowder | gunpowder-master/tests/cases/tensorflow_train.py | from .provider_test import ProviderTest
from gunpowder import (
ArraySpec,
ArrayKeys,
ArrayKey,
Array,
Roi,
BatchProvider,
Batch,
BatchRequest,
build,
)
from gunpowder.ext import tensorflow, NoSuchModule
from gunpowder.tensorflow import Train, Predict, LocalServer
import multiprocessing
import numpy as np
from unittest import skipIf
class ExampleTensorflowTrainSource(BatchProvider):
def setup(self):
spec = ArraySpec(
roi=Roi((0, 0), (2, 2)),
dtype=np.float32,
interpolatable=True,
voxel_size=(1, 1),
)
self.provides(ArrayKeys.A, spec)
self.provides(ArrayKeys.B, spec)
def provide(self, request):
batch = Batch()
spec = self.spec[ArrayKeys.A]
spec.roi = request[ArrayKeys.A].roi
batch.arrays[ArrayKeys.A] = Array(
np.array([[0, 1], [2, 3]], dtype=np.float32), spec
)
spec = self.spec[ArrayKeys.B]
spec.roi = request[ArrayKeys.B].roi
batch.arrays[ArrayKeys.B] = Array(
np.array([[0, 1], [2, 3]], dtype=np.float32), spec
)
return batch
@skipIf(isinstance(tensorflow, NoSuchModule), "tensorflow is not installed")
class TestTensorflowTrain(ProviderTest):
def create_meta_graph(self, meta_base):
"""
:param meta_base: Base name (no extension) for meta graph path
:return:
"""
def mknet():
import tensorflow as tf
# create a tf graph
a = tf.placeholder(tf.float32, shape=(2, 2))
b = tf.placeholder(tf.float32, shape=(2, 2))
v = tf.Variable(1, dtype=tf.float32)
c = a * b * v
# dummy "loss"
loss = tf.norm(c)
# dummy optimizer
opt = tf.train.AdamOptimizer()
optimizer = opt.minimize(loss)
tf.train.export_meta_graph(filename=meta_base + ".meta")
with open(meta_base + ".names", "w") as f:
for x in [a, b, c, optimizer, loss]:
f.write(x.name + "\n")
mknet_proc = multiprocessing.Process(target=mknet)
mknet_proc.start()
mknet_proc.join()
with open(meta_base + ".names") as f:
names = [line.strip("\n") for line in f]
return names
def test_output(self):
meta_base = self.path_to("tf_graph")
ArrayKey("A")
ArrayKey("B")
ArrayKey("C")
ArrayKey("GRADIENT_A")
# create model meta graph file and get input/output names
(a, b, c, optimizer, loss) = self.create_meta_graph(meta_base)
source = ExampleTensorflowTrainSource()
train = Train(
meta_base,
optimizer=optimizer,
loss=loss,
inputs={a: ArrayKeys.A, b: ArrayKeys.B},
outputs={c: ArrayKeys.C},
gradients={a: ArrayKeys.GRADIENT_A},
save_every=100,
)
pipeline = source + train
request = BatchRequest(
{
ArrayKeys.A: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.B: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.C: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.GRADIENT_A: ArraySpec(roi=Roi((0, 0), (2, 2))),
}
)
# train for a couple of iterations
with build(pipeline):
batch = pipeline.request_batch(request)
self.assertAlmostEqual(batch.loss, 9.8994951)
gradient_a = batch.arrays[ArrayKeys.GRADIENT_A].data
self.assertTrue(gradient_a[0, 0] < gradient_a[0, 1])
self.assertTrue(gradient_a[0, 1] < gradient_a[1, 0])
self.assertTrue(gradient_a[1, 0] < gradient_a[1, 1])
for i in range(200 - 1):
loss1 = batch.loss
batch = pipeline.request_batch(request)
loss2 = batch.loss
self.assertLess(loss2, loss1)
# resume training
with build(pipeline):
for i in range(100):
loss1 = batch.loss
batch = pipeline.request_batch(request)
loss2 = batch.loss
self.assertLess(loss2, loss1)
# predict
# source = ExampleTensorflowTrainSource()
# predict = Predict(
# meta_base + '_checkpoint_300',
# inputs={a: ArrayKeys.A, b: ArrayKeys.B},
# outputs={c: ArrayKeys.C},
# max_shared_memory=1024*1024)
# pipeline = source + predict
# request = BatchRequest({
# ArrayKeys.A: ArraySpec(roi=Roi((0, 0), (2, 2))),
# ArrayKeys.B: ArraySpec(roi=Roi((0, 0), (2, 2))),
# ArrayKeys.C: ArraySpec(roi=Roi((0, 0), (2, 2))),
# })
# with build(pipeline):
# prev_c = None
# for i in range(100):
# batch = pipeline.request_batch(request)
# c = batch.arrays[ArrayKeys.C].data
# if prev_c is not None:
# self.assertTrue(np.equal(c, prev_c))
# prev_c = c
| 5,108 | 28.362069 | 76 | py |
gunpowder | gunpowder-master/tests/cases/expected_failures.py | import gunpowder as gp
from gunpowder.nodes.batch_provider import BatchRequestError
from .helper_sources import ArraySource
from funlib.geometry import Roi, Coordinate
import numpy as np
import pytest
@pytest.mark.xfail()
def test_request_too_large():
raw_key = gp.ArrayKey("RAW")
data = np.ones((10, 10))
array = gp.Array(
data, gp.ArraySpec(voxel_size=Coordinate(100, 100), interpolatable=True)
)
source = ArraySource(raw_key, array)
pipeline = (
source
+ gp.Normalize(raw_key)
+ gp.RandomLocation()
+ gp.IntensityAugment(
raw_key, scale_min=0, scale_max=0, shift_min=0.5, shift_max=0.5
)
)
request = gp.BatchRequest()
request.add(raw_key, (10000, 10000))
with gp.build(pipeline):
for i in range(100):
with pytest.raises(BatchRequestError):
batch = pipeline.request_batch(request)
x = batch.arrays[raw_key].data
| 970 | 24.552632 | 80 | py |
gunpowder | gunpowder-master/tests/cases/intensity_scale_shift.py | from .helper_sources import ArraySource
from gunpowder import (
IntensityScaleShift,
ArrayKey,
build,
Normalize,
Array,
ArraySpec,
Roi,
Coordinate,
BatchRequest,
)
import numpy as np
def test_shift():
raw_key = ArrayKey("RAW")
array = Array(
np.ones((10, 10), dtype=np.float32) * 2,
ArraySpec(Roi((0, 0), (10, 10)), Coordinate(1, 1)),
)
source = ArraySource(raw_key, array)
request = BatchRequest()
request.add(raw_key, Coordinate(10, 10))
pipeline = source + IntensityScaleShift(raw_key, 0.5, 10)
with build(pipeline):
batch = pipeline.request_batch(request)
x = batch.arrays[raw_key].data
assert np.isclose(x.min(), 11)
assert np.isclose(x.max(), 11)
| 776 | 20.583333 | 61 | py |
gunpowder | gunpowder-master/tests/cases/hdf5_write.py | from .provider_test import ProviderTest
from gunpowder import *
import numpy as np
from gunpowder.ext import h5py
class Hdf5WriteTestSource(BatchProvider):
def setup(self):
self.provides(
ArrayKeys.RAW,
ArraySpec(
roi=Roi((20000, 2000, 2000), (2000, 200, 200)), voxel_size=(20, 2, 2)
),
)
self.provides(
ArrayKeys.GT_LABELS,
ArraySpec(
roi=Roi((20100, 2010, 2010), (1800, 180, 180)), voxel_size=(20, 2, 2)
),
)
def provide(self, request):
# print("Hdf5WriteTestSource: Got request " + str(request))
batch = Batch()
# have the pixels encode their position
for array_key, spec in request.array_specs.items():
roi = spec.roi
roi_voxel = roi // self.spec[array_key].voxel_size
# print("Hdf5WriteTestSource: Adding " + str(array_key))
# the z,y,x coordinates of the ROI
meshgrids = np.meshgrid(
range(roi_voxel.begin[0], roi_voxel.end[0]),
range(roi_voxel.begin[1], roi_voxel.end[1]),
range(roi_voxel.begin[2], roi_voxel.end[2]),
indexing="ij",
)
data = np.array(meshgrids)
# print("Roi is: " + str(roi))
spec = self.spec[array_key].copy()
spec.roi = roi
batch.arrays[array_key] = Array(data, spec)
return batch
class TestHdf5Write(ProviderTest):
def test_output(self):
path = self.path_to("hdf5_write_test.hdf")
source = Hdf5WriteTestSource()
chunk_request = BatchRequest()
chunk_request.add(ArrayKeys.RAW, (400, 30, 34))
chunk_request.add(ArrayKeys.GT_LABELS, (200, 10, 14))
pipeline = (
source
+ Hdf5Write({ArrayKeys.RAW: "arrays/raw"}, output_filename=path)
+ Scan(chunk_request)
)
with build(pipeline):
raw_spec = pipeline.spec[ArrayKeys.RAW]
labels_spec = pipeline.spec[ArrayKeys.GT_LABELS]
full_request = BatchRequest(
{ArrayKeys.RAW: raw_spec, ArrayKeys.GT_LABELS: labels_spec}
)
batch = pipeline.request_batch(full_request)
# assert that stored HDF dataset equals batch array
with h5py.File(path, "r") as f:
ds = f["arrays/raw"]
batch_raw = batch.arrays[ArrayKeys.RAW]
stored_raw = np.array(ds)
self.assertEqual(
stored_raw.shape[-3:],
batch_raw.spec.roi.shape // batch_raw.spec.voxel_size,
)
self.assertEqual(tuple(ds.attrs["offset"]), batch_raw.spec.roi.offset)
self.assertEqual(tuple(ds.attrs["resolution"]), batch_raw.spec.voxel_size)
self.assertTrue((stored_raw == batch.arrays[ArrayKeys.RAW].data).all())
| 2,939 | 31.307692 | 86 | py |
gunpowder | gunpowder-master/tests/cases/provider_test.py | from gunpowder import *
import shutil
import os
import copy
from warnings import warn
import unittest
from datetime import datetime
from tempfile import mkdtemp
import numpy as np
class ExampleSource(BatchProvider):
def setup(self):
self.provides(
ArrayKeys.RAW,
ArraySpec(
roi=Roi((0, 0, 0), (100, 100, 100)),
voxel_size=Coordinate((1, 1, 1)),
dtype=np.uint8,
interpolatable=True,
),
)
def provide(self, request):
data = np.zeros(
request[ArrayKeys.RAW].roi.shape / self.spec[ArrayKeys.RAW].voxel_size,
dtype=np.uint8,
)
spec = copy.deepcopy(self.spec[ArrayKeys.RAW])
spec.roi = request[ArrayKeys.RAW].roi
batch = Batch()
batch.arrays[ArrayKeys.RAW] = Array(data, spec)
return batch
class TestWithTempFiles(unittest.TestCase):
"""
Usage:
If your test case dumps out any files, use ``self.path_to("path", "to", "my.file")`` to get the path to a directory
in your temporary directory. This will be namespaced by the test class, timestamp and test method, e.g.
>>> self.path_to("path", "to", "my.file")
/tmp/gunpowder_MyTestCase_2018-03-08T18:32:18.967927_r4nd0m/my_test_method/path/to/my.file
Each test method's data will be deleted after the test case is run (regardless of pass, fail or error).
To disable test method data deletion, set ``self._cleanup = False`` anywhere in the test.
The test case directory will be deleted after every test method is run, unless there is data left in it.
Any files written directly to the class output directory (rather than the test output subdirectory) should therefore
be explicitly removed before tearDownClass is called.
To disable data deletion for the whole class (the test case directory and all tests), set ``_cleanup = False`` in the
class definition. N.B. doing this in a method (``type(self)._cleanup = False``) will have unexpected results
depending on the order of test execution.
Subclasses implementing their own setUp, setUpClass, tearDown and tearDownClass should explicitly call the
``super`` method in the method definition.
"""
_output_root = ""
_cleanup = True
def path_to(self, *args):
return type(self).path_to_cls(self._testMethodName, *args)
@classmethod
def path_to_cls(cls, *args):
return os.path.join(cls._output_root, *args)
@classmethod
def setUpClass(cls):
timestamp = datetime.now().isoformat()
cls._output_root = mkdtemp(
prefix="gunpowder_{}_{}_".format(cls.__name__, timestamp)
)
def setUp(self):
os.mkdir(self.path_to())
def tearDown(self):
path = self.path_to()
try:
if self._cleanup:
shutil.rmtree(path)
else:
warn("Directory {} was not deleted".format(path))
except OSError as e:
if "[Errno 2]" in str(e):
pass
else:
raise
@classmethod
def tearDownClass(cls):
try:
if cls._cleanup:
os.rmdir(cls.path_to_cls())
else:
warn("Directory {} was not deleted".format(cls.path_to_cls()))
except OSError as e:
if "[Errno 39]" in str(e):
warn(
"Directory {} could not be deleted as it still had data in it".format(
cls.path_to_cls()
)
)
elif "[Errno 2]" in str(e):
pass
else:
raise
class ProviderTest(TestWithTempFiles):
def setUp(self):
super(ProviderTest, self).setUp()
# create some common array keys to be used by concrete tests
ArrayKey("RAW")
ArrayKey("GT_LABELS")
ArrayKey("GT_AFFINITIES")
ArrayKey("GT_AFFINITIES_MASK")
ArrayKey("GT_MASK")
ArrayKey("GT_IGNORE")
ArrayKey("LOSS_SCALE")
GraphKey("GT_GRAPH")
self.test_source = ExampleSource()
self.test_request = BatchRequest()
self.test_request[ArrayKeys.RAW] = ArraySpec(
roi=Roi((20, 20, 20), (10, 10, 10))
)
| 4,330 | 31.810606 | 121 | py |
gunpowder | gunpowder-master/tests/cases/downsample.py | from .helper_sources import ArraySource
from gunpowder import *
import numpy as np
def test_output():
raw = ArrayKey("RAW")
raw_downsampled = ArrayKey("RAW_DOWNSAMPLED")
gt = ArrayKey("GT")
gt_downsampled = ArrayKey("GT_LABELS_DOWNSAMPLED")
request = BatchRequest()
request.add(raw, (200, 200, 200))
request.add(raw_downsampled, (120, 120, 120))
request.add(gt, (200, 200, 200))
request.add(gt_downsampled, (200, 200, 200))
meshgrids = np.meshgrid(range(0, 250), range(0, 250), range(0, 250), indexing="ij")
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
raw_source = ArraySource(
raw,
Array(
np.stack([data, data]),
ArraySpec(roi=Roi((0, 0, 0), (1000, 1000, 1000)), voxel_size=(4, 4, 4)),
),
)
gt_source = ArraySource(
gt,
Array(
data,
ArraySpec(roi=Roi((0, 0, 0), (1000, 1000, 1000)), voxel_size=(4, 4, 4)),
),
)
pipeline = (
(raw_source, gt_source)
+ MergeProvider()
+ DownSample(raw, 2, raw_downsampled)
+ DownSample(gt, (2, 2, 2), gt_downsampled)
)
with build(pipeline):
batch = pipeline.request_batch(request)
for array_key, array in batch.arrays.items():
# assert that pixels encode their position for supposedly unaltered
# arrays
if array_key in [raw, gt]:
# the z,y,x coordinates of the ROI
roi = array.spec.roi / 4
meshgrids = np.meshgrid(
range(roi.begin[0], roi.end[0]),
range(roi.begin[1], roi.end[1]),
range(roi.begin[2], roi.end[2]),
indexing="ij",
)
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
if array_key == raw:
assert np.array_equal(array.data[0], data), str(array_key)
else:
assert np.array_equal(array.data, data), str(array_key)
elif array_key == raw_downsampled:
assert array.data[0, 0, 0, 0] == 30
assert array.data[1, 1, 0, 0] == 32
elif array_key == gt_downsampled:
assert array.data[0, 0, 0] == 0
assert array.data[1, 0, 0] == 2
else:
assert False, "unexpected array type"
| 2,324 | 29.592105 | 87 | py |
gunpowder | gunpowder-master/tests/cases/hdf5_source.py | from unittest import skipIf
from .provider_test import ProviderTest
from gunpowder import *
import numpy as np
from gunpowder.ext import h5py, zarr, ZarrFile, NoSuchModule
class Hdf5LikeSourceTestMixin(object):
"""This class is to be used as a mixin for ProviderTest classes testing HDF5, N5 and Zarr
batch providers.
Subclasses must define ``extension`` and ``SourceUnderTest`` class variables, and an
``_open_writable_file(self, path)`` method. See TestHdf5Source for examples.
"""
extension = None
SourceUnderTest = None
def _open_writable_file(self, path):
raise NotImplementedError("_open_writable_file should be overridden")
def _create_dataset(self, data_file, key, data, chunks=None, **kwargs):
chunks = chunks or data.shape
d = data_file.create_dataset(
key, shape=data.shape, dtype=data.dtype, chunks=chunks
)
d[:] = data
for key, value in kwargs.items():
d.attrs[key] = value
def test_output_2d(self):
path = self.path_to("test_{0}_source.{0}".format(self.extension))
with self._open_writable_file(path) as f:
self._create_dataset(f, "raw", np.zeros((100, 100), dtype=np.float32))
self._create_dataset(
f, "raw_low", np.zeros((10, 10), dtype=np.float32), resolution=(10, 10)
)
self._create_dataset(f, "seg", np.ones((100, 100), dtype=np.uint64))
# read arrays
raw = ArrayKey("RAW")
raw_low = ArrayKey("RAW_LOW")
seg = ArrayKey("SEG")
source = self.SourceUnderTest(
path, {raw: "raw", raw_low: "raw_low", seg: "seg"}
)
with build(source):
batch = source.request_batch(
BatchRequest(
{
raw: ArraySpec(roi=Roi((0, 0), (100, 100))),
raw_low: ArraySpec(roi=Roi((0, 0), (100, 100))),
seg: ArraySpec(roi=Roi((0, 0), (100, 100))),
}
)
)
self.assertTrue(batch.arrays[raw].spec.interpolatable)
self.assertTrue(batch.arrays[raw_low].spec.interpolatable)
self.assertFalse(batch.arrays[seg].spec.interpolatable)
def test_output_3d(self):
path = self.path_to("test_{0}_source.{0}".format(self.extension))
# create a test file
with self._open_writable_file(path) as f:
self._create_dataset(f, "raw", np.zeros((100, 100, 100), dtype=np.float32))
self._create_dataset(
f,
"raw_low",
np.zeros((10, 10, 10), dtype=np.float32),
resolution=(10, 10, 10),
)
self._create_dataset(f, "seg", np.ones((100, 100, 100), dtype=np.uint64))
# read arrays
raw = ArrayKey("RAW")
raw_low = ArrayKey("RAW_LOW")
seg = ArrayKey("SEG")
source = self.SourceUnderTest(
path, {raw: "raw", raw_low: "raw_low", seg: "seg"}
)
with build(source):
batch = source.request_batch(
BatchRequest(
{
raw: ArraySpec(roi=Roi((0, 0, 0), (100, 100, 100))),
raw_low: ArraySpec(roi=Roi((0, 0, 0), (100, 100, 100))),
seg: ArraySpec(roi=Roi((0, 0, 0), (100, 100, 100))),
}
)
)
self.assertTrue(batch.arrays[raw].spec.interpolatable)
self.assertTrue(batch.arrays[raw_low].spec.interpolatable)
self.assertFalse(batch.arrays[seg].spec.interpolatable)
class TestHdf5Source(ProviderTest, Hdf5LikeSourceTestMixin):
extension = "hdf"
SourceUnderTest = Hdf5Source
def _open_writable_file(self, path):
return h5py.File(path, "w")
@skipIf(isinstance(zarr, NoSuchModule), "zarr is not installed")
class TestZarrSource(ProviderTest, Hdf5LikeSourceTestMixin):
extension = "zarr"
SourceUnderTest = ZarrSource
def _open_writable_file(self, path):
return ZarrFile(path, mode="w")
| 4,152 | 34.194915 | 93 | py |
gunpowder | gunpowder-master/tests/cases/upsample.py | from .helper_sources import ArraySource
from gunpowder import *
import numpy as np
def test_output():
raw = ArrayKey("RAW")
raw_upsampled = ArrayKey("RAW_UPSAMPLED")
gt = ArrayKey("GT")
gt_upsampled = ArrayKey("GT_LABELS_UPSAMPLED")
request = BatchRequest()
request.add(raw, (200, 200, 200))
request.add(raw_upsampled, (124, 124, 124))
request.add(gt, (200, 200, 200))
request.add(gt_upsampled, (200, 200, 200))
meshgrids = np.meshgrid(
range(0, 1000, 4), range(0, 1000, 4), range(0, 1000, 4), indexing="ij"
)
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
raw_source = ArraySource(
raw,
Array(
np.stack([data, data]),
ArraySpec(roi=Roi((0, 0, 0), (1000, 1000, 1000)), voxel_size=(4, 4, 4)),
),
)
gt_source = ArraySource(
gt,
Array(
data,
ArraySpec(roi=Roi((0, 0, 0), (1000, 1000, 1000)), voxel_size=(4, 4, 4)),
),
)
pipeline = (
(raw_source, gt_source)
+ MergeProvider()
+ UpSample(raw, 2, raw_upsampled)
+ UpSample(gt, 2, gt_upsampled)
)
with build(pipeline):
batch = pipeline.request_batch(request)
for array_key, array in batch.arrays.items():
# assert that pixels encode their position for supposedly unaltered
# arrays
if array_key in [raw, gt]:
# the z,y,x coordinates of the ROI
meshgrids = np.meshgrid(
range(array.spec.roi.begin[0], array.spec.roi.end[0], 4),
range(array.spec.roi.begin[1], array.spec.roi.end[1], 4),
range(array.spec.roi.begin[2], array.spec.roi.end[2], 4),
indexing="ij",
)
data = meshgrids[0] + meshgrids[1] + meshgrids[2]
if array_key == raw:
assert np.array_equal(array.data[0], data) and np.array_equal(
array.data[1], data
), f"{array.data, data}"
else:
assert np.array_equal(array.data, data), str(array_key)
elif array_key == raw_upsampled:
assert array.data[0, 0, 0, 0] == 108
assert array.data[1, 1, 0, 0] == 112
assert array.data[0, 2, 0, 0] == 112
assert array.data[0, 3, 0, 0] == 116
elif array_key == gt_upsampled:
assert array.data[0, 0, 0] == 0
assert array.data[1, 0, 0] == 0
assert array.data[2, 0, 0] == 4
assert array.data[3, 0, 0] == 4
else:
assert False, "unexpected array type"
| 2,628 | 31.060976 | 84 | py |
gunpowder | gunpowder-master/tests/cases/normalize.py | from .provider_test import ProviderTest
from gunpowder import *
class TestNormalize(ProviderTest):
def test_output(self):
pipeline = self.test_source + Normalize(ArrayKeys.RAW)
with build(pipeline):
batch = pipeline.request_batch(self.test_request)
raw = batch.arrays[ArrayKeys.RAW]
self.assertTrue(raw.data.min() >= 0)
self.assertTrue(raw.data.max() <= 1)
| 429 | 27.666667 | 62 | py |
gunpowder | gunpowder-master/tests/cases/pad.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
BatchRequest,
Batch,
ArrayKeys,
ArraySpec,
Roi,
Coordinate,
GraphKey,
GraphKeys,
GraphSpec,
Array,
ArrayKey,
Pad,
build,
)
import numpy as np
class ExampleSourcePad(BatchProvider):
def setup(self):
self.provides(
ArrayKeys.TEST_LABELS,
ArraySpec(roi=Roi((200, 20, 20), (1800, 180, 180)), voxel_size=(20, 2, 2)),
)
self.provides(
GraphKeys.TEST_GRAPH, GraphSpec(roi=Roi((200, 20, 20), (1800, 180, 180)))
)
def provide(self, request):
batch = Batch()
roi_array = request[ArrayKeys.TEST_LABELS].roi
roi_voxel = roi_array // self.spec[ArrayKeys.TEST_LABELS].voxel_size
data = np.zeros(roi_voxel.shape, dtype=np.uint32)
data[:, ::2] = 100
spec = self.spec[ArrayKeys.TEST_LABELS].copy()
spec.roi = roi_array
batch.arrays[ArrayKeys.TEST_LABELS] = Array(data, spec=spec)
return batch
class TestPad(ProviderTest):
def test_output(self):
graph = GraphKey("TEST_GRAPH")
labels = ArrayKey("TEST_LABELS")
pipeline = (
ExampleSourcePad()
+ Pad(labels, Coordinate((20, 20, 20)), value=1)
+ Pad(graph, Coordinate((10, 10, 10)))
)
with build(pipeline):
self.assertTrue(
pipeline.spec[labels].roi == Roi((180, 0, 0), (1840, 220, 220))
)
self.assertTrue(
pipeline.spec[graph].roi == Roi((190, 10, 10), (1820, 200, 200))
)
batch = pipeline.request_batch(
BatchRequest({labels: ArraySpec(Roi((180, 0, 0), (20, 20, 20)))})
)
self.assertEqual(np.sum(batch.arrays[labels].data), 1 * 10 * 10)
| 1,877 | 25.083333 | 87 | py |
gunpowder | gunpowder-master/tests/cases/precache.py | from .helper_sources import ArraySource
from gunpowder import *
import pytest
import numpy as np
import time
class Delay(BatchFilter):
def __init__(self, delay: float = 1):
self.delay = delay
def prepare(self, request):
time.sleep(self.delay)
return request
def process(self, batch, request):
pass
@pytest.mark.xfail(reason="Speedup is often dependent on hardware")
def test_speedup():
delay = 0.2
n_requests = 16
a_workers = 2
b_workers = 8
perfect_speedup = a_workers / b_workers * delay * n_requests
raw_key = ArrayKey("RAW")
raw_array = Array(
np.zeros([100, 100, 100], dtype=np.uint8),
ArraySpec(
roi=Roi((0, 0, 0), (100, 100, 100)),
voxel_size=Coordinate((1, 1, 1)),
dtype=np.uint8,
interpolatable=True,
),
)
test_source = ArraySource(raw_key, raw_array)
pipeline_a = test_source + Delay() + PreCache(num_workers=a_workers)
pipeline_b = test_source + Delay() + PreCache(num_workers=b_workers)
test_request = BatchRequest()
test_request[raw_key] = ArraySpec(roi=Roi((20, 20, 20), (10, 10, 10)))
with build(pipeline_a):
start = time.time()
for _ in range(n_requests):
batch = pipeline_a.request_batch(test_request)
assert batch.arrays[raw_key].spec.roi == test_request[raw_key].roi
# should be done in a bit more than 4 seconds, certainly less than 8
t_a_1 = time.time() - start
# change request
test_request[raw_key].roi = test_request[raw_key].roi.shift(Coordinate(1, 1, 1))
start = time.time()
for _ in range(n_requests):
batch = pipeline_a.request_batch(test_request)
assert batch.arrays[raw_key].spec.roi == test_request[raw_key].roi
# should be done in a bit more than 4 seconds
t_a_2 = time.time() - start
with build(pipeline_b):
start = time.time()
for _ in range(n_requests):
batch = pipeline_b.request_batch(test_request)
assert batch.arrays[raw_key].spec.roi == test_request[raw_key].roi
# should be done in a bit more than 4 seconds, certainly less than 8
t_b_1 = time.time() - start
# change request
test_request[raw_key].roi = test_request[raw_key].roi.shift(Coordinate(1, 1, 1))
start = time.time()
for _ in range(n_requests):
batch = pipeline_b.request_batch(test_request)
assert batch.arrays[raw_key].spec.roi == test_request[raw_key].roi
# should be done in a bit more than 4 seconds
t_b_2 = time.time() - start
assert t_a_1 - t_b_1 > perfect_speedup / 2, (t_a_1 - t_b_1, perfect_speedup)
assert t_a_2 - t_b_2 > perfect_speedup / 2, (t_a_2 - t_b_2, perfect_speedup)
| 2,853 | 29.688172 | 88 | py |
gunpowder | gunpowder-master/tests/cases/shift_augment.py | import unittest
import numpy as np
import random
import h5py
import logging
import sys
import os
from gunpowder import (
ArrayKey,
ArraySpec,
GraphKey,
GraphSpec,
Graph,
Node,
RandomLocation,
Coordinate,
Roi,
BatchRequest,
Hdf5Source,
ShiftAugment,
CsvPointsSource,
MergeProvider,
build,
)
from gunpowder.pipeline import PipelineRequestError
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class TestShiftAugment2D(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.fake_points_file = "shift_test.csv"
cls.fake_data_file = "shift_test.hdf5"
random.seed(1234)
np.random.seed(1234)
cls.fake_data = np.array([[i + j for i in range(100)] for j in range(100)])
with h5py.File(cls.fake_data_file, "w") as f:
f.create_dataset("testdata", shape=cls.fake_data.shape, data=cls.fake_data)
cls.fake_points = np.random.randint(0, 100, size=(2, 2))
with open(cls.fake_points_file, "w") as f:
for point in cls.fake_points:
f.write(str(point[0]) + "\t" + str(point[1]) + "\n")
def setUp(self):
random.seed(12345)
np.random.seed(12345)
@classmethod
def tearDownClass(cls):
os.remove(cls.fake_data_file)
os.remove(cls.fake_points_file)
##################
# full pipeline #
##################
def test_prepare1(self):
key = ArrayKey("TEST_ARRAY")
spec = ArraySpec(voxel_size=Coordinate((1, 1)), interpolatable=True)
hdf5_source = Hdf5Source(
self.fake_data_file, {key: "testdata"}, array_specs={key: spec}
)
request = BatchRequest()
shape = Coordinate((3, 3))
request.add(key, shape, voxel_size=Coordinate((1, 1)))
shift_node = ShiftAugment(sigma=1, shift_axis=0)
with build((hdf5_source + shift_node)):
shift_node.prepare(request)
self.assertTrue(shift_node.ndim == 2)
self.assertTrue(shift_node.shift_sigmas == tuple([0.0, 1.0]))
def test_prepare2(self):
key = ArrayKey("TEST_ARRAY")
spec = ArraySpec(voxel_size=Coordinate((1, 1)), interpolatable=True)
hdf5_source = Hdf5Source(
self.fake_data_file, {key: "testdata"}, array_specs={key: spec}
)
request = BatchRequest()
shape = Coordinate((3, 3))
request.add(key, shape)
shift_node = ShiftAugment(sigma=1, shift_axis=0)
with build((hdf5_source + shift_node)):
shift_node.prepare(request)
self.assertTrue(shift_node.ndim == 2)
self.assertTrue(shift_node.shift_sigmas == tuple([0.0, 1.0]))
def test_pipeline1(self):
key = ArrayKey("TEST_ARRAY")
spec = ArraySpec(voxel_size=Coordinate((2, 1)), interpolatable=True)
hdf5_source = Hdf5Source(
self.fake_data_file, {key: "testdata"}, array_specs={key: spec}
)
request = BatchRequest()
shape = Coordinate((3, 3))
request.add(key, shape, voxel_size=Coordinate((3, 1)))
shift_node = ShiftAugment(prob_slip=0.2, prob_shift=0.2, sigma=1, shift_axis=0)
with build((hdf5_source + shift_node)) as b:
with self.assertRaises(PipelineRequestError):
b.request_batch(request)
def test_pipeline2(self):
key = ArrayKey("TEST_ARRAY")
spec = ArraySpec(voxel_size=Coordinate((3, 1)), interpolatable=True)
hdf5_source = Hdf5Source(
self.fake_data_file, {key: "testdata"}, array_specs={key: spec}
)
request = BatchRequest()
shape = Coordinate((3, 3))
request[key] = ArraySpec(roi=Roi((9, 9), shape), voxel_size=Coordinate((3, 1)))
shift_node = ShiftAugment(prob_slip=0.2, prob_shift=0.2, sigma=1, shift_axis=0)
with build((hdf5_source + shift_node)) as b:
b.request_batch(request)
def test_pipeline3(self):
array_key = ArrayKey("TEST_ARRAY")
points_key = GraphKey("TEST_POINTS")
voxel_size = Coordinate((1, 1))
spec = ArraySpec(voxel_size=voxel_size, interpolatable=True)
hdf5_source = Hdf5Source(
self.fake_data_file, {array_key: "testdata"}, array_specs={array_key: spec}
)
csv_source = CsvPointsSource(
self.fake_points_file,
points_key,
GraphSpec(roi=Roi(shape=Coordinate((100, 100)), offset=(0, 0))),
)
request = BatchRequest()
shape = Coordinate((60, 60))
request.add(array_key, shape, voxel_size=Coordinate((1, 1)))
request.add(points_key, shape)
shift_node = ShiftAugment(prob_slip=0.2, prob_shift=0.2, sigma=4, shift_axis=0)
pipeline = (
(hdf5_source, csv_source)
+ MergeProvider()
+ RandomLocation(ensure_nonempty=points_key)
+ shift_node
)
with build(pipeline) as b:
request = b.request_batch(request)
# print(request[points_key])
target_vals = [self.fake_data[point[0]][point[1]] for point in self.fake_points]
result_data = request[array_key].data
result_points = list(request[points_key].nodes)
result_vals = [
result_data[int(point.location[0])][int(point.location[1])]
for point in result_points
]
for result_val in result_vals:
self.assertTrue(
result_val in target_vals,
msg="result value {} at points {} not in target values {} at points {}".format(
result_val,
list(result_points),
target_vals,
self.fake_points,
),
)
##################
# shift_and_crop #
##################
def test_shift_and_crop_static(self):
shift_node = ShiftAugment(sigma=1, shift_axis=0)
shift_node.ndim = 2
upstream_arr = np.arange(16).reshape(4, 4)
sub_shift_array = np.zeros(8, dtype=int).reshape(4, 2)
roi_shape = (4, 4)
voxel_size = Coordinate((1, 1))
downstream_arr = np.arange(16).reshape(4, 4)
result = shift_node.shift_and_crop(
upstream_arr, roi_shape, sub_shift_array, voxel_size
)
self.assertTrue(np.array_equal(result, downstream_arr))
def test_shift_and_crop1(self):
shift_node = ShiftAugment(sigma=1, shift_axis=0)
shift_node.ndim = 2
upstream_arr = np.arange(16).reshape(4, 4)
sub_shift_array = np.zeros(8, dtype=int).reshape(4, 2)
sub_shift_array[:, 1] = np.array([0, -1, 1, 0], dtype=int)
roi_shape = (4, 2)
voxel_size = Coordinate((1, 1))
downstream_arr = np.array([[1, 2], [6, 7], [8, 9], [13, 14]], dtype=int)
result = shift_node.shift_and_crop(
upstream_arr, roi_shape, sub_shift_array, voxel_size
)
self.assertTrue(np.array_equal(result, downstream_arr))
def test_shift_and_crop2(self):
shift_node = ShiftAugment(sigma=1, shift_axis=0)
shift_node.ndim = 2
upstream_arr = np.arange(16).reshape(4, 4)
sub_shift_array = np.zeros(8, dtype=int).reshape(4, 2)
sub_shift_array[:, 1] = np.array([0, -1, -2, 0], dtype=int)
roi_shape = (4, 2)
voxel_size = Coordinate((1, 1))
downstream_arr = np.array([[0, 1], [5, 6], [10, 11], [12, 13]], dtype=int)
result = shift_node.shift_and_crop(
upstream_arr, roi_shape, sub_shift_array, voxel_size
)
self.assertTrue(np.array_equal(result, downstream_arr))
def test_shift_and_crop3(self):
shift_node = ShiftAugment(sigma=1, shift_axis=1)
shift_node.ndim = 2
upstream_arr = np.arange(16).reshape(4, 4)
sub_shift_array = np.zeros(8, dtype=int).reshape(4, 2)
sub_shift_array[:, 0] = np.array([0, 1, 0, 2], dtype=int)
roi_shape = (2, 4)
voxel_size = Coordinate((1, 1))
downstream_arr = np.array([[8, 5, 10, 3], [12, 9, 14, 7]], dtype=int)
result = shift_node.shift_and_crop(
upstream_arr, roi_shape, sub_shift_array, voxel_size
)
# print(result)
self.assertTrue(np.array_equal(result, downstream_arr))
def test_shift_and_crop4(self):
shift_node = ShiftAugment(sigma=1, shift_axis=1)
shift_node.ndim = 2
upstream_arr = np.arange(16).reshape(4, 4)
sub_shift_array = np.zeros(8, dtype=int).reshape(4, 2)
sub_shift_array[:, 0] = np.array([0, 2, 0, 4], dtype=int)
roi_shape = (4, 4)
voxel_size = Coordinate((2, 1))
downstream_arr = np.array([[8, 5, 10, 3], [12, 9, 14, 7]], dtype=int)
result = shift_node.shift_and_crop(
upstream_arr, roi_shape, sub_shift_array, voxel_size
)
# print(result)
self.assertTrue(np.array_equal(result, downstream_arr))
result = shift_node.shift_and_crop(
upstream_arr, roi_shape, sub_shift_array, voxel_size
)
# print(result)
self.assertTrue(np.array_equal(result, downstream_arr))
##################
# shift_points #
##################
@staticmethod
def points_equal(vertices1, vertices2):
vs1 = sorted(list(vertices1), key=lambda v: tuple(v.location))
vs2 = sorted(list(vertices2), key=lambda v: tuple(v.location))
for v1, v2 in zip(vs1, vs2):
if not v1.id == v2.id:
print(f"{vs1}, {vs2}")
return False
if not all(np.isclose(v1.location, v2.location)):
print(f"{vs1}, {vs2}")
return False
return True
def test_points_equal(self):
points1 = [Node(id=1, location=np.array([0, 1]))]
points2 = [Node(id=1, location=np.array([0, 1]))]
self.assertTrue(self.points_equal(points1, points2))
points1 = [Node(id=2, location=np.array([1, 2]))]
points2 = [Node(id=2, location=np.array([2, 1]))]
self.assertFalse(self.points_equal(points1, points2))
def test_shift_points1(self):
data = [Node(id=1, location=np.array([0, 1]))]
spec = GraphSpec(Roi(offset=(0, 0), shape=(5, 5)))
points = Graph(data, [], spec)
request_roi = Roi(offset=(0, 1), shape=(5, 3))
shift_array = np.array([[0, -1], [0, -1], [0, 0], [0, 0], [0, 1]], dtype=int)
lcm_voxel_size = Coordinate((1, 1))
shifted_points = Graph([], [], GraphSpec(request_roi))
result = ShiftAugment.shift_points(
points,
request_roi,
shift_array,
shift_axis=0,
lcm_voxel_size=lcm_voxel_size,
)
# print(result)
self.assertTrue(self.points_equal(result.nodes, shifted_points.nodes))
self.assertTrue(result.spec == GraphSpec(request_roi))
def test_shift_points2(self):
data = [Node(id=1, location=np.array([0, 1]))]
spec = GraphSpec(Roi(offset=(0, 0), shape=(5, 5)))
points = Graph(data, [], spec)
request_roi = Roi(offset=(0, 1), shape=(5, 3))
shift_array = np.array([[0, 0], [0, -1], [0, 0], [0, 0], [0, 1]], dtype=int)
lcm_voxel_size = Coordinate((1, 1))
result = ShiftAugment.shift_points(
points,
request_roi,
shift_array,
shift_axis=0,
lcm_voxel_size=lcm_voxel_size,
)
# print("test 2", result.data, data)
self.assertTrue(self.points_equal(result.nodes, data))
self.assertTrue(result.spec == GraphSpec(request_roi))
def test_shift_points3(self):
data = [Node(id=1, location=np.array([0, 1]))]
spec = GraphSpec(Roi(offset=(0, 0), shape=(5, 5)))
points = Graph(data, [], spec)
request_roi = Roi(offset=(0, 1), shape=(5, 3))
shift_array = np.array([[0, 1], [0, -1], [0, 0], [0, 0], [0, 1]], dtype=int)
lcm_voxel_size = Coordinate((1, 1))
shifted_points = Graph(
[Node(id=1, location=np.array([0, 2]))], [], GraphSpec(request_roi)
)
result = ShiftAugment.shift_points(
points,
request_roi,
shift_array,
shift_axis=0,
lcm_voxel_size=lcm_voxel_size,
)
# print("test 3", result.data, shifted_points.data)
self.assertTrue(self.points_equal(result.nodes, shifted_points.nodes))
self.assertTrue(result.spec == GraphSpec(request_roi))
def test_shift_points4(self):
data = [
Node(id=0, location=np.array([1, 0])),
Node(id=1, location=np.array([1, 1])),
Node(id=2, location=np.array([1, 2])),
Node(id=3, location=np.array([1, 3])),
Node(id=4, location=np.array([1, 4])),
]
spec = GraphSpec(Roi(offset=(0, 0), shape=(5, 5)))
points = Graph(data, [], spec)
request_roi = Roi(offset=(1, 0), shape=(3, 5))
shift_array = np.array([[1, 0], [-1, 0], [0, 0], [-1, 0], [1, 0]], dtype=int)
lcm_voxel_size = Coordinate((1, 1))
shifted_data = [
Node(id=0, location=np.array([2, 0])),
Node(id=2, location=np.array([1, 2])),
Node(id=4, location=np.array([2, 4])),
]
result = ShiftAugment.shift_points(
points,
request_roi,
shift_array,
shift_axis=1,
lcm_voxel_size=lcm_voxel_size,
)
# print("test 4", result.data, shifted_data)
self.assertTrue(self.points_equal(result.nodes, shifted_data))
self.assertTrue(result.spec == GraphSpec(request_roi))
def test_shift_points5(self):
data = [
Node(id=0, location=np.array([3, 0])),
Node(id=1, location=np.array([3, 2])),
Node(id=2, location=np.array([3, 4])),
Node(id=3, location=np.array([3, 6])),
Node(id=4, location=np.array([3, 8])),
]
spec = GraphSpec(Roi(offset=(0, 0), shape=(15, 10)))
points = Graph(data, [], spec)
request_roi = Roi(offset=(3, 0), shape=(9, 10))
shift_array = np.array([[3, 0], [-3, 0], [0, 0], [-3, 0], [3, 0]], dtype=int)
lcm_voxel_size = Coordinate((3, 2))
shifted_data = [
Node(id=0, location=np.array([6, 0])),
Node(id=2, location=np.array([3, 4])),
Node(id=4, location=np.array([6, 8])),
]
result = ShiftAugment.shift_points(
points,
request_roi,
shift_array,
shift_axis=1,
lcm_voxel_size=lcm_voxel_size,
)
# print("test 4", result.data, shifted_data)
self.assertTrue(self.points_equal(result.nodes, shifted_data))
self.assertTrue(result.spec == GraphSpec(request_roi))
#######################
# get_sub_shift_array #
#######################
def test_get_sub_shift_array1(self):
total_roi = Roi(offset=(0, 0), shape=(6, 6))
item_roi = Roi(offset=(1, 2), shape=(3, 3))
shift_array = np.arange(12).reshape(6, 2).astype(int)
shift_axis = 1
lcm_voxel_size = Coordinate((1, 1))
sub_shift_array = np.array([[4, 5], [6, 7], [8, 9]], dtype=int)
result = ShiftAugment.get_sub_shift_array(
total_roi, item_roi, shift_array, shift_axis, lcm_voxel_size
)
# print(result)
self.assertTrue(np.array_equal(result, sub_shift_array))
def test_get_sub_shift_array2(self):
total_roi = Roi(offset=(0, 0), shape=(6, 6))
item_roi = Roi(offset=(1, 2), shape=(3, 3))
shift_array = np.arange(12).reshape(6, 2).astype(int)
shift_axis = 0
lcm_voxel_size = Coordinate((1, 1))
sub_shift_array = np.array([[2, 3], [4, 5], [6, 7]], dtype=int)
result = ShiftAugment.get_sub_shift_array(
total_roi, item_roi, shift_array, shift_axis, lcm_voxel_size
)
self.assertTrue(np.array_equal(result, sub_shift_array))
def test_get_sub_shift_array3(self):
total_roi = Roi(offset=(0, 0), shape=(18, 12))
item_roi = Roi(offset=(3, 4), shape=(9, 6))
shift_array = np.arange(12).reshape(6, 2).astype(int)
shift_axis = 0
lcm_voxel_size = Coordinate((3, 2))
sub_shift_array = np.array([[2, 3], [4, 5], [6, 7]], dtype=int)
result = ShiftAugment.get_sub_shift_array(
total_roi, item_roi, shift_array, shift_axis, lcm_voxel_size
)
# print(result)
self.assertTrue(np.array_equal(result, sub_shift_array))
################################
# construct_global_shift_array #
################################
def test_construct_global_shift_array_static(self):
shift_axis_len = 5
shift_sigmas = (0.0, 1.0)
prob_slip = 0
prob_shift = 0
lcm_voxel_size = Coordinate((1, 1))
shift_array = np.zeros(shape=(shift_axis_len, len(shift_sigmas)), dtype=int)
result = ShiftAugment.construct_global_shift_array(
shift_axis_len, shift_sigmas, prob_shift, prob_slip, lcm_voxel_size
)
self.assertTrue(np.array_equal(result, shift_array))
def test_construct_global_shift_array1(self):
shift_axis_len = 5
shift_sigmas = (0.0, 1.0)
prob_slip = 1
prob_shift = 0
lcm_voxel_size = Coordinate((1, 1))
shift_array = np.array([[0, 0], [0, -1], [0, 1], [0, 0], [0, 1]], dtype=int)
result = ShiftAugment.construct_global_shift_array(
shift_axis_len, shift_sigmas, prob_slip, prob_shift, lcm_voxel_size
)
# print(result)
self.assertTrue(len(result) == shift_axis_len)
for position_shift in result:
self.assertTrue(position_shift[0] == 0)
self.assertTrue(np.array_equal(shift_array, result))
def test_construct_global_shift_array2(self):
shift_axis_len = 5
shift_sigmas = (0.0, 1.0)
prob_slip = 0
prob_shift = 1
lcm_voxel_size = Coordinate((1, 1))
shift_array = np.array([[0, 0], [0, -1], [0, 0], [0, 0], [0, 1]], dtype=int)
result = ShiftAugment.construct_global_shift_array(
shift_axis_len, shift_sigmas, prob_slip, prob_shift, lcm_voxel_size
)
self.assertTrue(len(result) == shift_axis_len)
for position_shift in result:
self.assertTrue(position_shift[0] == 0)
self.assertTrue(np.array_equal(shift_array, result))
def test_construct_global_shift_array3(self):
shift_axis_len = 5
shift_sigmas = (0.0, 4.0)
prob_slip = 0
prob_shift = 1
lcm_voxel_size = Coordinate((1, 3))
shift_array = np.array([[0, 3], [0, 0], [0, 6], [0, 6], [0, 12]], dtype=int)
result = ShiftAugment.construct_global_shift_array(
shift_axis_len, shift_sigmas, prob_slip, prob_shift, lcm_voxel_size
)
# print(result)
self.assertTrue(len(result) == shift_axis_len)
for position_shift in result:
self.assertTrue(position_shift[0] == 0)
self.assertTrue(np.array_equal(shift_array, result))
########################
# compute_upstream_roi #
########################
def test_compute_upstream_roi_static(self):
request_roi = Roi(offset=(0, 0), shape=(5, 10))
sub_shift_array = np.array([[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]], dtype=int)
upstream_roi = Roi(offset=(0, 0), shape=(5, 10))
result = ShiftAugment.compute_upstream_roi(request_roi, sub_shift_array)
self.assertTrue(upstream_roi == result)
def test_compute_upstream_roi1(self):
request_roi = Roi(offset=(0, 0), shape=(5, 10))
sub_shift_array = np.array([[0, 0], [0, -1], [0, 0], [0, 0], [0, 1]], dtype=int)
upstream_roi = Roi(offset=(0, -1), shape=(5, 12))
result = ShiftAugment.compute_upstream_roi(request_roi, sub_shift_array)
self.assertTrue(upstream_roi == result)
def test_compute_upstream_roi2(self):
request_roi = Roi(offset=(0, 0), shape=(5, 10))
sub_shift_array = np.array(
[[2, 0], [-1, 0], [5, 0], [-2, 0], [0, 0]], dtype=int
)
upstream_roi = Roi(offset=(-5, 0), shape=(12, 10))
result = ShiftAugment.compute_upstream_roi(request_roi, sub_shift_array)
self.assertTrue(upstream_roi == result)
if __name__ == "__main__":
unittest.main()
| 20,702 | 35.642478 | 95 | py |
gunpowder | gunpowder-master/docker/test_environment.py | import gunpowder
if __name__ == "__main__":
print("Successfully set up gunpowder")
| 88 | 16.8 | 42 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.