repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
wsireg | wsireg-master/wsireg/utils/im_utils.py | import multiprocessing
import warnings
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import List, Optional, Tuple, Union
import cv2
import dask.array as da
import numpy as np
import SimpleITK as sitk
import zarr
from czifile import CziFile
from tifffile import (
OmeXml,
TiffFile,
TiffWriter,
create_output,
imread,
xml2dict,
)
from wsireg.parameter_maps.preprocessing import BoundingBox, ImagePreproParams
from wsireg.utils.tform_utils import sitk_transform_image
TIFFFILE_EXTS = [".scn", ".tif", ".tiff", ".ndpi", ".svs"]
ARRAYLIKE_CLASSES = (np.ndarray, da.core.Array, zarr.Array)
SITK_TO_NP_DTYPE = {
0: np.int8,
1: np.uint8,
2: np.int16,
3: np.uint16,
4: np.int32,
5: np.uint32,
6: np.int64,
7: np.uint64,
8: np.float32,
9: np.float64,
10: np.complex64,
11: np.complex64,
12: np.int8,
13: np.uint8,
14: np.int16,
15: np.int16,
16: np.int32,
17: np.int32,
18: np.int64,
19: np.int64,
20: np.float32,
21: np.float64,
22: np.uint8,
23: np.uint16,
24: np.uint32,
25: np.uint64,
}
COLNAME_TO_HEX = {
"red": "FF0000",
"green": "00FF00",
"blue": "0000FF",
"magenta": "FF00FF",
"yellow": "FFFF00",
"cyan": "00FFFFF",
"white": "FFFFFF",
}
def zarr_get_base_pyr_layer(zarr_store):
"""
Find the base pyramid layer of a zarr store
Parameters
----------
zarr_store
zarr store
Returns
-------
zarr_im: zarr.core.Array
zarr array of base layer
"""
if isinstance(zarr_store, zarr.hierarchy.Group):
zarr_im = zarr_store[str(0)]
elif isinstance(zarr_store, zarr.core.Array):
zarr_im = zarr_store
return zarr_im
def ensure_dask_array(image):
if isinstance(image, da.core.Array):
return image
if isinstance(image, zarr.Array):
return da.from_zarr(image)
# handles np.ndarray _and_ other array like objects.
return da.from_array(image)
def preprocess_dask_array(
array: da.Array, preprocessing: Optional[ImagePreproParams] = None
):
is_rgb = guess_rgb(array.shape)
if is_rgb:
if preprocessing:
image_out = np.asarray(grayscale(array, is_interleaved=is_rgb))
image_out = sitk.GetImageFromArray(image_out)
else:
image_out = np.asarray(array)
image_out = sitk.GetImageFromArray(image_out, isVector=True)
elif len(array.shape) == 2:
image_out = sitk.GetImageFromArray(np.asarray(array))
else:
if preprocessing:
if preprocessing.ch_indices and len(array.shape) > 2:
chs = list(preprocessing.ch_indices)
array = array[chs, :, :]
image_out = sitk.GetImageFromArray(np.squeeze(np.asarray(array)))
return image_out
def read_preprocess_array(array, preprocessing, force_rgb=None):
"""Read np.array, zarr.Array, or dask.array image into memory
with preprocessing for registration."""
is_interleaved = guess_rgb(array.shape)
is_rgb = is_interleaved if not force_rgb else force_rgb
if is_rgb:
if preprocessing:
image_out = np.asarray(
grayscale(array, is_interleaved=is_interleaved)
)
image_out = sitk.GetImageFromArray(image_out)
else:
image_out = np.asarray(array)
if not is_interleaved:
image_out = np.rollaxis(image_out, 0, 3)
image_out = sitk.GetImageFromArray(image_out, isVector=True)
elif len(array.shape) == 2:
image_out = sitk.GetImageFromArray(np.asarray(array))
else:
if preprocessing:
if preprocessing.ch_indices and len(array.shape) > 2:
chs = list(preprocessing.ch_indices)
array = array[chs, :, :]
image_out = sitk.GetImageFromArray(np.squeeze(np.asarray(array)))
return image_out
def tifffile_zarr_backend(
image_filepath, largest_series, preprocessing, force_rgb=None
):
"""
Read image with tifffile and use zarr to read data into memory
Parameters
----------
image_filepath: str
path to the image file
largest_series: int
index of the largest series in the image
preprocessing:
whether to do some read-time pre-processing
- greyscale conversion (at the tile level)
- read individual or range of channels (at the tile level)
Returns
-------
image: sitk.Image
image ready for other registration pre-processing
"""
print("using zarr backend")
zarr_series = imread(image_filepath, aszarr=True, series=largest_series)
zarr_store = zarr.open(zarr_series)
zarr_im = zarr_get_base_pyr_layer(zarr_store)
return read_preprocess_array(
zarr_im, preprocessing=preprocessing, force_rgb=force_rgb
)
def tifffile_dask_backend(
image_filepath, largest_series, preprocessing, force_rgb=None
):
"""
Read image with tifffile and use dask to read data into memory
Parameters
----------
image_filepath: str
path to the image file
largest_series: int
index of the largest series in the image
preprocessing:
whether to do some read-time pre-processing
- greyscale conversion (at the tile level)
- read individual or range of channels (at the tile level)
Returns
-------
image: sitk.Image
image ready for other registration pre-processing
"""
print("using dask backend")
zarr_series = imread(image_filepath, aszarr=True, series=largest_series)
zarr_store = zarr.open(zarr_series)
dask_im = da.squeeze(da.from_zarr(zarr_get_base_pyr_layer(zarr_store)))
return read_preprocess_array(
dask_im, preprocessing=preprocessing, force_rgb=force_rgb
)
def sitk_backend(image_filepath, preprocessing):
"""
Read image with SimpleITK..this will always read the full image into memory
Parameters
----------
image_filepath: str
path to the image file
preprocessing:
whether to do some read-time pre-processing
- greyscale conversion (at the tile level)
- read individual or range of channels (at the tile level)
Returns
-------
image: sitk.Image
image ready for other registration pre-processing
"""
print("using sitk backend")
image = sitk.ReadImage(image_filepath)
if image.GetNumberOfComponentsPerPixel() >= 3:
if preprocessing is not None:
image = sitk_vect_to_gs(image)
elif image.GetDepth() == 0:
return image
else:
if preprocessing is not None:
if (
preprocessing.get("ch_indices") is not None
and image.GetDepth() > 0
):
print("here")
chs = np.asarray(preprocessing.get('ch_indices'))
image = image[:, :, chs]
return image
def guess_rgb(shape):
"""
Guess if the passed shape comes from rgb data.
If last dim is 3 or 4 assume the data is rgb, including rgba.
Parameters
----------
shape : list of int
Shape of the data that should be checked.
Returns
-------
bool
If data is rgb or not.
"""
ndim = len(shape)
last_dim = shape[-1]
if ndim > 2 and last_dim < 5:
rgb = True
else:
rgb = False
return rgb
def grayscale(rgb_image, is_interleaved=False):
"""
convert RGB image data to greyscale
Parameters
----------
rgb_image: np.ndarray
image data
Returns
-------
image:np.ndarray
returns 8-bit greyscale image for 24-bit RGB image
"""
if is_interleaved is True:
result = (
(rgb_image[..., 0] * 0.2125).astype(np.uint8)
+ (rgb_image[..., 1] * 0.7154).astype(np.uint8)
+ (rgb_image[..., 2] * 0.0721).astype(np.uint8)
)
else:
result = (
(rgb_image[0, ...] * 0.2125).astype(np.uint8)
+ (rgb_image[1, ...] * 0.7154).astype(np.uint8)
+ (rgb_image[2, ...] * 0.0721).astype(np.uint8)
)
return result
def czi_tile_grayscale(rgb_image):
"""
convert RGB image data to greyscale
Parameters
----------
rgb_image: np.ndarray
image data
Returns
-------
image:np.ndarray
returns 8-bit greyscale image for 24-bit RGB image
"""
result = (
(rgb_image[..., 0] * 0.2125).astype(np.uint8)
+ (rgb_image[..., 1] * 0.7154).astype(np.uint8)
+ (rgb_image[..., 2] * 0.0721).astype(np.uint8)
)
return np.expand_dims(result, axis=-1)
class CziRegImageReader(CziFile):
"""
Sub-class of CziFile with added functionality to only read certain channels
"""
def sub_asarray(
self,
resize=True,
order=0,
out=None,
max_workers=None,
channel_idx=None,
as_uint8=False,
):
"""Return image data from file(s) as numpy array.
Parameters
----------
resize : bool
If True (default), resize sub/supersampled subblock data.
order : int
The order of spline interpolation used to resize sub/supersampled
subblock data. Default is 0 (nearest neighbor).
out : numpy.ndarray, str, or file-like object; optional
Buffer where image data will be saved.
If numpy.ndarray, a writable array of compatible dtype and shape.
If str or open file, the file name or file object used to
create a memory-map to an array stored in a binary file on disk.
max_workers : int
Maximum number of threads to read and decode subblock data.
By default up to half the CPU cores are used.
channel_idx : int or list of int
The indices of the channels to extract
as_uint8 : bool
byte-scale image data to np.uint8 data type
Parameters
----------
out:np.ndarray
image read with selected parameters as np.ndarray
"""
out_shape = list(self.shape)
start = list(self.start)
ch_dim_idx = self.axes.index('C')
if channel_idx is not None:
if isinstance(channel_idx, int):
channel_idx = [channel_idx]
if out_shape[ch_dim_idx] == 1:
channel_idx = None
else:
out_shape[ch_dim_idx] = len(channel_idx)
min_ch_seq = {}
for idx, i in enumerate(channel_idx):
min_ch_seq.update({i: idx})
if as_uint8 is True:
out_dtype = np.uint8
else:
out_dtype = self.dtype
if out is None:
out = create_output(None, tuple(out_shape), out_dtype)
if max_workers is None:
max_workers = multiprocessing.cpu_count() - 1
def func(
directory_entry, resize=resize, order=order, start=start, out=out
):
"""Read, decode, and copy subblock data."""
subblock = directory_entry.data_segment()
dvstart = list(directory_entry.start)
czi_c_idx = [
de.dimension for de in subblock.dimension_entries
].index('C')
subblock_ch_idx = subblock.dimension_entries[czi_c_idx].start
if channel_idx is not None:
if subblock_ch_idx in channel_idx:
subblock.dimension_entries[czi_c_idx].start
tile = subblock.data(resize=resize, order=order)
dvstart[ch_dim_idx] = min_ch_seq.get(subblock_ch_idx)
else:
return
else:
tile = subblock.data(resize=resize, order=order)
if as_uint8 is True:
tile = (tile / 256).astype("uint8")
index = tuple(
slice(i - j, i - j + k)
for i, j, k in zip(tuple(dvstart), tuple(start), tile.shape)
)
try:
out[index] = tile
except ValueError as e:
warnings.warn(str(e))
if max_workers > 1:
self._fh.lock = True
with ThreadPoolExecutor(max_workers) as executor:
executor.map(func, self.filtered_subblock_directory)
self._fh.lock = None
else:
for directory_entry in self.filtered_subblock_directory:
func(directory_entry)
if hasattr(out, "flush"):
out.flush()
return out
def sub_asarray_rgb(
self,
resize=True,
order=0,
out=None,
max_workers=None,
channel_idx=None,
as_uint8=False,
greyscale=False,
):
"""Return image data from file(s) as numpy array.
Parameters
----------
resize : bool
If True (default), resize sub/supersampled subblock data.
order : int
The order of spline interpolation used to resize sub/supersampled
subblock data. Default is 0 (nearest neighbor).
out : numpy.ndarray, str, or file-like object; optional
Buffer where image data will be saved.
If numpy.ndarray, a writable array of compatible dtype and shape.
If str or open file, the file name or file object used to
create a memory-map to an array stored in a binary file on disk.
max_workers : int
Maximum number of threads to read and decode subblock data.
By default up to half the CPU cores are used.
channel_idx : int or list of int
The indices of the channels to extract
as_uint8 : bool
byte-scale image data to np.uint8 data type
Parameters
----------
out:np.ndarray
image read with selected parameters as np.ndarray
"""
out_shape = list(self.shape)
start = list(self.start)
ch_dim_idx = self.axes.index('0')
if channel_idx is not None:
if isinstance(channel_idx, int):
channel_idx = [channel_idx]
out_shape[ch_dim_idx] = len(channel_idx)
if greyscale is True:
out_shape[ch_dim_idx] = 1
if as_uint8 is True:
out_dtype = np.uint8
else:
out_dtype = self.dtype
if out is None:
out = create_output(None, tuple(out_shape), out_dtype)
if max_workers is None:
max_workers = multiprocessing.cpu_count() - 1
def func(
directory_entry, resize=resize, order=order, start=start, out=out
):
"""Read, decode, and copy subblock data."""
subblock = directory_entry.data_segment()
dvstart = list(directory_entry.start)
tile = subblock.data(resize=resize, order=order)
if greyscale is True:
tile = czi_tile_grayscale(tile)
if channel_idx is not None:
tile = tile[:, :, :, :, :, channel_idx]
index = tuple(
slice(i - j, i - j + k)
for i, j, k in zip(tuple(dvstart), tuple(start), tile.shape)
)
try:
out[index] = tile
except ValueError as e:
warnings.warn(str(e))
if max_workers > 1:
self._fh.lock = True
with ThreadPoolExecutor(max_workers) as executor:
executor.map(func, self.filtered_subblock_directory)
self._fh.lock = None
else:
for directory_entry in self.filtered_subblock_directory:
func(directory_entry)
if hasattr(out, "flush"):
out.flush()
return out
def tf_get_largest_series(image_filepath: Union[str, Path]) -> int:
"""
Determine largest series for .scn files by examining metadata
For other multi-series files, find the one with the most pixels
Parameters
----------
image_filepath: str
path to the image file
Returns
-------
largest_series:int
index of the largest series in the image data
"""
fp_ext = Path(image_filepath).suffix.lower()
tf_im = TiffFile(image_filepath)
if fp_ext == ".scn":
scn_meta = xml2dict(tf_im.scn_metadata)
image_meta = scn_meta.get("scn").get("collection").get("image")
largest_series = np.argmax(
[
im.get("scanSettings")
.get("objectiveSettings")
.get("objective")
for im in image_meta
]
)
else:
largest_series = np.argmax(
[
np.prod(np.asarray(series.shape), dtype=np.int64)
for series in tf_im.series
]
)
return int(largest_series)
def get_sitk_image_info(image_filepath):
"""
Get image info for files only ready by SimpleITK
Parameters
----------
image_filepath:str
filepath to image
Returns
-------
im_dims: np.ndarray
image dimensions in np.ndarray
im_dtype: np.dtype
data type of the image
"""
reader = sitk.ImageFileReader()
reader.SetFileName(image_filepath)
reader.LoadPrivateTagsOn()
reader.ReadImageInformation()
im_dims = np.asarray(reader.GetSize())
# swap to YX
im_dims[[0, 1]] = im_dims[[1, 0]]
im_dtype = np.dtype(SITK_TO_NP_DTYPE.get(reader.GetPixelID()))
is_vector = sitk.GetPixelIDValueAsString(reader.GetPixelID())
if "vector" in is_vector:
im_dims = np.append(im_dims, 3)
elif len(im_dims) == 3:
im_dims = im_dims[[2, 0, 1]]
else:
im_dims = np.concatenate([[1], im_dims], axis=0)
return im_dims, im_dtype
def tifffile_to_dask(
im_fp: Union[str, Path], largest_series: int, level=int
) -> Union[da.Array, List[da.Array]]:
imdata = zarr.open(
imread(im_fp, aszarr=True, series=largest_series, level=level)
)
if isinstance(imdata, zarr.hierarchy.Group):
imdata = [da.from_zarr(imdata[z]) for z in imdata.array_keys()]
else:
imdata = da.from_zarr(imdata)
return imdata
def get_tifffile_info(
image_filepath: Union[str, Path]
) -> Tuple[Tuple[int, int, int], np.dtype, int]:
largest_series = tf_get_largest_series(image_filepath)
zarr_im = zarr.open(
imread(image_filepath, aszarr=True, series=largest_series)
)
zarr_im = zarr_get_base_pyr_layer(zarr_im)
im_dims = np.squeeze(zarr_im.shape)
if len(im_dims) == 2:
im_dims = np.concatenate([[1], im_dims])
im_dtype = zarr_im.dtype
return im_dims, im_dtype, largest_series
def tf_zarr_read_single_ch(
image_filepath, channel_idx, is_rgb, is_rgb_interleaved=True
):
"""
Reads a single channel using zarr or dask in combination with tifffile
Parameters
----------
image_filepath:str
file path to image
channel_idx:int
index of the channel to be read
is_rgb:bool
whether image is rgb interleaved
Returns
-------
im:np.ndarray
image as a np.ndarray
"""
largest_series = tf_get_largest_series(image_filepath)
zarr_im = zarr.open(
imread(image_filepath, aszarr=True, series=largest_series)
)
zarr_im = zarr_get_base_pyr_layer(zarr_im)
try:
im = da.squeeze(da.from_zarr(zarr_im))
if is_rgb and is_rgb_interleaved is True:
im = im[:, :, channel_idx].compute()
elif len(im.shape) > 2:
im = im[channel_idx, :, :].compute()
else:
im = im.compute()
except ValueError:
im = zarr_im
if is_rgb is True and is_rgb_interleaved is True:
im = im[:, :, channel_idx]
elif len(im.shape) > 2:
im = im[channel_idx, :, :].compute()
else:
im = im.compute()
return im
def czi_read_single_ch(image_filepath, channel_idx):
czi = CziRegImageReader(image_filepath)
im = czi.sub_asarray(
channel_idx=channel_idx,
)
return im
def calc_pyramid_levels(xy_final_shape, tile_size):
"""
Calculate number of pyramids for a given image dimension and tile size
Stops when further downsampling would be smaller than tile_size.
Parameters
----------
xy_final_shape:np.ndarray
final shape in xy order
tile_size: int
size of the tiles in the pyramidal layers
Returns
-------
res_shapes:list
list of tuples of the shapes of the downsampled images
"""
res_shape = xy_final_shape[::-1]
res_shapes = [tuple(res_shape)]
while all(res_shape > tile_size):
res_shape = res_shape // 2
res_shapes.append(tuple(res_shape))
return res_shapes[:-1]
def add_ome_axes_single_plane(image_np):
"""
Reshapes np.ndarray image to match OME-zarr standard
Parameters
----------
image_np:np.ndarray
image to which additional axes are added to meet OME-zarr standard
Returns
-------
image_np:np.ndarray
reshaped image array
"""
return image_np.reshape((1,) * (3) + image_np.shape)
def generate_channels(channel_names, channel_colors, im_dtype):
"""
Generate OME-zarr channels metadata
Parameters
----------
channel_names:list
channel_colors:list
im_dtype:np.dtype
Returns
-------
channel_info:list
list of dicts containing OME-zarr channel info
"""
channel_info = []
for channel_name, channel_color in zip(channel_names, channel_colors):
channel_info.append(
{
"label": channel_name,
"color": channel_color,
"active": True,
"window": {"start": 0, "end": int(np.iinfo(im_dtype).max)},
}
)
return channel_info
def format_channel_names(channel_names, n_ch):
"""
Format channel names and ensure number of channel names matches number of channels or default
to C1, C2, C3, etc.
Parameters
----------
channel_names:list
list of str that are channel names
n_ch: int
number of channels detected in image
Returns
-------
channel_names:
list of str that are formatted
"""
if channel_names is None or n_ch != len(channel_names):
channel_names = ["C{}".format(idx) for idx in range(n_ch)]
return channel_names
def get_pyramid_info(y_size, x_size, n_ch, tile_size):
"""
Get pyramidal info for OME-zarr output
Parameters
----------
y_size: int
y dimension of base layer
x_size:int
x dimension of base layer
n_ch:int
number of channels in the image
tile_size:int
tile size of the image
Returns
-------
pyr_levels
pyramidal levels
pyr_shapes:
OME-zarr pyramid shapes for all levels
"""
yx_size = np.asarray([y_size, x_size], dtype=np.int32)
pyr_levels = calc_pyramid_levels(yx_size, tile_size)
pyr_shapes = [(1, n_ch, 1, int(pl[0]), int(pl[1])) for pl in pyr_levels]
return pyr_levels, pyr_shapes
def prepare_ome_zarr_group(
zarr_store_dir,
y_size,
x_size,
n_ch,
im_dtype,
tile_size=512,
channel_names=None,
channel_colors=None,
):
"""
Prepare OME-zarr store with all meta data and channel info and initialize store
Parameters
----------
zarr_store_dir:str
filepath to zarr store
y_size: int
y dimension of base layer
x_size:int
x dimension of base layer
n_ch:int
number of channels in the image
im_dtype:np.dtype
data type of the image
tile_size:int
tile size of the image
channel_names:list
list of str channel names
channel_colors:
list of hex or str channel colors
Returns
-------
grp: zarr.store
initialized store
n_pyr_levels: int
number of sub-resolutions
pyr_levels: list
shapes of sub-resolutions
"""
store = zarr.DirectoryStore(zarr_store_dir)
grp = zarr.group(store, overwrite=True)
zarr_dtype = "{}{}".format(im_dtype.kind, im_dtype.itemsize)
pyr_levels, pyr_shapes = get_pyramid_info(x_size, y_size, n_ch, tile_size)
paths = []
for path, pyr_shape in enumerate(pyr_levels):
grp.create_dataset(
str(path),
shape=pyr_shapes[path],
dtype=zarr_dtype,
chunks=(1, 1, 1, tile_size, tile_size),
)
paths.append({"path": str(path)})
multiscales = [
{
"version": "0.1",
"datasets": paths,
}
]
grp.attrs["multiscales"] = multiscales
n_pyr_levels = len(paths)
channel_names = format_channel_names(channel_names, n_ch)
n_colors = n_ch // len(COLNAME_TO_HEX) + 1
color_palette = [*COLNAME_TO_HEX] * n_colors
if channel_colors is None:
channel_colors = [color_palette[idx] for idx in range(n_ch)]
elif n_ch != len(channel_colors) and n_ch != 1:
channel_colors = [color_palette[idx] for idx in range(n_ch)]
elif n_ch != len(channel_colors) and n_ch == 1:
channel_colors = ["FFFFFF"]
else:
channel_colors = [COLNAME_TO_HEX[ch] for ch in channel_colors]
channel_info = generate_channels(channel_names, channel_colors, im_dtype)
image_data = {
'id': 1,
'channels': channel_info,
'rdefs': {
'model': 'color',
},
}
grp.attrs["omero"] = image_data
return grp, n_pyr_levels, pyr_levels
def get_final_tform_info(final_transform):
"""
Extract size and spacing information from wsireg's final transformation elastix data
Parameters
----------
final_transform:itk.Transform
itk.Transform with added attributes containing transform data
Returns
-------
y_size: int
x_size: int
y_spacing: float
x_spacing: float
"""
x_size, y_size = (
final_transform.output_size[0],
final_transform.output_size[1],
)
x_spacing, y_spacing = (
final_transform.output_spacing[0],
final_transform.output_spacing[1],
)
return int(y_size), int(x_size), float(y_spacing), float(x_spacing)
def image_to_zarr_store(zgrp, image, channel_idx, n_pyr_levels, pyr_levels):
"""
Write image into zarr store with sub resolutions
Parameters
----------
grp: zarr.store
initialized store
image:sitk.Image
image
channel_idx:int
which channel the image represents
n_pyr_levels: int
pyr_levels: list
"""
for pyr_idx in range(n_pyr_levels):
if pyr_idx == 0:
image = sitk.GetArrayFromImage(image)
else:
resize_shape = (
pyr_levels[pyr_idx][1],
pyr_levels[pyr_idx][0],
)
image = cv2.resize(image, resize_shape, cv2.INTER_LINEAR)
zgrp[str(pyr_idx)][
:, channel_idx : channel_idx + 1, :, :, :
] = add_ome_axes_single_plane(image)
def prepare_ome_xml_str(
y_size, x_size, n_ch, im_dtype, is_rgb, **ome_metadata
):
omexml = OmeXml()
if is_rgb:
stored_shape = (1, 1, 1, y_size, x_size, n_ch)
im_shape = (y_size, x_size, n_ch)
else:
stored_shape = (n_ch, 1, 1, y_size, x_size, 1)
im_shape = (n_ch, y_size, x_size)
omexml.addimage(
dtype=im_dtype,
shape=im_shape,
# specify how the image is stored in the TIFF file
storedshape=stored_shape,
**ome_metadata,
)
return omexml.tostring().encode("utf8")
def get_final_yx_from_tform(tform_reg_im, final_transform):
if final_transform is not None:
y_size, x_size, y_spacing, x_spacing = get_final_tform_info(
final_transform
)
else:
y_size, x_size = (
(tform_reg_im.im_dims[0], tform_reg_im.im_dims[1])
if tform_reg_im.is_rgb
else (tform_reg_im.im_dims[1], tform_reg_im.im_dims[2])
)
y_spacing, x_spacing = None, None
return y_size, x_size, y_spacing, x_spacing
def transform_to_ome_zarr(tform_reg_im, output_dir, tile_size=512):
y_size, x_size = get_final_yx_from_tform(tform_reg_im)
n_ch = (
tform_reg_im.im_dims[2]
if tform_reg_im.is_rgb
else tform_reg_im.im_dims[0]
)
pyr_levels, pyr_shapes = get_pyramid_info(y_size, x_size, n_ch, tile_size)
n_pyr_levels = len(pyr_levels)
output_file_name = str(Path(output_dir) / tform_reg_im.image_name)
if tform_reg_im.reader in ["tifffile", "czi", "sitk"]:
if tform_reg_im.reader == "sitk":
full_image = sitk.ReadImage(tform_reg_im.image_filepath)
for channel_idx in range(n_ch):
if tform_reg_im.reader == "tifffile":
tform_reg_im.image = tf_zarr_read_single_ch(
tform_reg_im.image_filepath,
channel_idx,
tform_reg_im.is_rgb,
)
tform_reg_im.image = np.squeeze(tform_reg_im.image)
elif tform_reg_im.reader == "czi":
tform_reg_im.image = czi_read_single_ch(
tform_reg_im.image_filepath, channel_idx
)
tform_reg_im.image = np.squeeze(tform_reg_im.image)
elif tform_reg_im.reader == "sitk":
if tform_reg_im.is_rgb:
tform_reg_im.image = sitk.VectorIndexSelectionCast(
full_image
)
elif len(full_image.GetSize()) > 2:
tform_reg_im.image = full_image[:, :, channel_idx]
else:
tform_reg_im.image = full_image
if tform_reg_im.composite_transform is not None:
tform_reg_im = transform_plane(tform_reg_im)
else:
tform_reg_im.image = sitk.GetImageFromArray(tform_reg_im.image)
if channel_idx == 0:
channel_names = format_channel_names(
tform_reg_im.channel_names, n_ch
)
print(f"saving to {output_file_name}.ome.zarr")
(zgrp, n_pyr_levels, pyr_levels,) = prepare_ome_zarr_group(
f"{output_file_name}.ome.zarr",
y_size,
x_size,
n_ch,
tform_reg_im.im_dtype,
tile_size=tile_size,
channel_names=channel_names,
channel_colors=tform_reg_im.channel_colors,
)
image_to_zarr_store(
zgrp, tform_reg_im.image, channel_idx, n_pyr_levels, pyr_levels
)
return f"{output_file_name}.ome.zarr"
def transform_plane(image, final_transform, composite_transform):
image = sitk_transform_image(
image,
final_transform,
composite_transform,
)
return image
def transform_to_ome_tiff(
tform_reg_im,
image_name,
output_dir,
final_transform,
composite_transform,
tile_size=512,
write_pyramid=True,
):
y_size, x_size, y_spacing, x_spacing = get_final_yx_from_tform(
tform_reg_im, final_transform
)
# protect against too large tile size
while y_size / tile_size <= 1 or x_size / tile_size <= 1:
tile_size = tile_size // 2
n_ch = (
tform_reg_im.im_dims[2]
if tform_reg_im.is_rgb
else tform_reg_im.im_dims[0]
)
pyr_levels, pyr_shapes = get_pyramid_info(y_size, x_size, n_ch, tile_size)
n_pyr_levels = len(pyr_levels)
output_file_name = str(Path(output_dir) / image_name)
channel_names = format_channel_names(tform_reg_im.channel_names, n_ch)
if final_transform is not None:
PhysicalSizeY = y_spacing
PhysicalSizeX = x_spacing
else:
PhysicalSizeY = tform_reg_im.image_res
PhysicalSizeX = tform_reg_im.image_res
omexml = prepare_ome_xml_str(
y_size,
x_size,
n_ch,
tform_reg_im.im_dtype,
tform_reg_im.is_rgb,
PhysicalSizeX=PhysicalSizeX,
PhysicalSizeY=PhysicalSizeY,
PhysicalSizeXUnit="µm",
PhysicalSizeYUnit="µm",
Name=image_name,
Channel=None if tform_reg_im.is_rgb else {"Name": channel_names},
)
subifds = n_pyr_levels - 1 if write_pyramid is True else None
rgb_im_data = []
if tform_reg_im.reader == "sitk":
full_image = sitk.ReadImage(tform_reg_im.image_filepath)
print(f"saving to {output_file_name}.ome.tiff")
with TiffWriter(f"{output_file_name}.ome.tiff", bigtiff=True) as tif:
for channel_idx in range(n_ch):
print(f"transforming : {channel_idx}")
if tform_reg_im.reader != "sitk":
image = tform_reg_im.read_single_channel(channel_idx)
image = np.squeeze(image)
image = sitk.GetImageFromArray(image)
image.SetSpacing(
(tform_reg_im.image_res, tform_reg_im.image_res)
)
else:
if tform_reg_im.is_rgb:
image = sitk.VectorIndexSelectionCast(
full_image, channel_idx
)
elif len(full_image.GetSize()) > 2:
image = full_image[:, :, channel_idx]
else:
image = full_image
if composite_transform is not None:
image = transform_plane(
image, final_transform, composite_transform
)
print(f"transformed : {channel_idx}")
if tform_reg_im.is_rgb:
rgb_im_data.append(image)
else:
print("saving")
if isinstance(image, sitk.Image):
image = sitk.GetArrayFromImage(image)
options = dict(
tile=(tile_size, tile_size),
compression="jpeg" if tform_reg_im.is_rgb else "deflate",
photometric="rgb" if tform_reg_im.is_rgb else "minisblack",
metadata=None,
)
# write OME-XML to the ImageDescription tag of the first page
description = omexml if channel_idx == 0 else None
# write channel data
print(f" writing channel {channel_idx} - shape: {image.shape}")
tif.write(
image,
subifds=subifds,
description=description,
**options,
)
if write_pyramid:
for pyr_idx in range(1, n_pyr_levels):
resize_shape = (
pyr_levels[pyr_idx][0],
pyr_levels[pyr_idx][1],
)
image = cv2.resize(
image,
resize_shape,
cv2.INTER_LINEAR,
)
print(
f"pyr {pyr_idx} : channel {channel_idx} shape: {image.shape}"
)
tif.write(image, **options, subfiletype=1)
if tform_reg_im.is_rgb:
rgb_im_data = sitk.Compose(rgb_im_data)
rgb_im_data = sitk.GetArrayFromImage(rgb_im_data)
options = dict(
tile=(tile_size, tile_size),
compression="jpeg" if tform_reg_im.is_rgb else None,
photometric="rgb" if tform_reg_im.is_rgb else "minisblack",
metadata=None,
)
# write OME-XML to the ImageDescription tag of the first page
description = omexml
# write channel data
tif.write(
rgb_im_data,
subifds=subifds,
description=description,
**options,
)
print(f"RGB shape: {rgb_im_data.shape}")
if write_pyramid:
for pyr_idx in range(1, n_pyr_levels):
resize_shape = (
pyr_levels[pyr_idx][0],
pyr_levels[pyr_idx][1],
)
rgb_im_data = cv2.resize(
rgb_im_data, resize_shape, cv2.INTER_LINEAR
)
print(f"pyr {pyr_idx} : RGB , shape: {rgb_im_data.shape}")
tif.write(rgb_im_data, **options, subfiletype=1)
return f"{output_file_name}.ome.tiff"
def transform_to_ome_tiff_merge(
tform_reg_im,
image_name,
output_dir,
final_transform,
composite_transform,
tile_size=512,
write_pyramid=True,
):
y_size, x_size, y_spacing, x_spacing = get_final_yx_from_tform(
tform_reg_im.images[0], final_transform[0]
)
# protect against too large tile size
while y_size / tile_size <= 1 or x_size / tile_size <= 1:
tile_size = tile_size // 2
n_ch = tform_reg_im.n_ch
pyr_levels, pyr_shapes = get_pyramid_info(y_size, x_size, n_ch, tile_size)
n_pyr_levels = len(pyr_levels)
output_file_name = str(Path(output_dir) / image_name)
channel_names = format_channel_names(tform_reg_im.channel_names, n_ch)
if final_transform is not None:
PhysicalSizeY = y_spacing
PhysicalSizeX = x_spacing
else:
PhysicalSizeY = tform_reg_im.image_res
PhysicalSizeX = tform_reg_im.image_res
omexml = prepare_ome_xml_str(
y_size,
x_size,
n_ch,
tform_reg_im.images[0].im_dtype,
tform_reg_im.images[0].is_rgb,
PhysicalSizeX=PhysicalSizeX,
PhysicalSizeY=PhysicalSizeY,
PhysicalSizeXUnit="µm",
PhysicalSizeYUnit="µm",
Name=image_name,
Channel={"Name": channel_names},
)
subifds = n_pyr_levels - 1 if write_pyramid is True else None
print(f"saving to {output_file_name}.ome.tiff")
with TiffWriter(f"{output_file_name}.ome.tiff", bigtiff=True) as tif:
for m_idx, merge_image in enumerate(tform_reg_im.images):
merge_n_ch = merge_image.n_ch
for channel_idx in range(merge_n_ch):
image = merge_image.read_single_channel(channel_idx)
image = np.squeeze(image)
image = sitk.GetImageFromArray(image)
image.SetSpacing(
(merge_image.image_res, merge_image.image_res)
)
if composite_transform[m_idx] is not None:
image = transform_plane(
image,
final_transform[m_idx],
composite_transform[m_idx],
)
print("saving")
if isinstance(image, sitk.Image):
image = sitk.GetArrayFromImage(image)
options = dict(
tile=(tile_size, tile_size),
compression="jpeg" if merge_image.is_rgb else "deflate",
photometric="rgb" if merge_image.is_rgb else "minisblack",
metadata=None,
)
# write OME-XML to the ImageDescription tag of the first page
description = omexml if channel_idx == 0 else None
# write channel data
print(f" writing channel {channel_idx} - shape: {image.shape}")
tif.write(
image,
subifds=subifds,
description=description,
**options,
)
if write_pyramid:
for pyr_idx in range(1, n_pyr_levels):
resize_shape = (
pyr_levels[pyr_idx][0],
pyr_levels[pyr_idx][1],
)
image = cv2.resize(
image,
resize_shape,
cv2.INTER_LINEAR,
)
print(
f"pyr {pyr_idx} : channel {channel_idx} shape: {image.shape}"
)
tif.write(image, **options, subfiletype=1)
return f"{output_file_name}.ome.tiff"
def compute_mask_to_bbox(mask, mask_padding=100):
mask.SetSpacing((1, 1))
mask_size = mask.GetSize()
mask = sitk.Threshold(mask, 1, 255)
mask = sitk.ConnectedComponent(mask)
labstats = sitk.LabelShapeStatisticsImageFilter()
labstats.SetBackgroundValue(0)
labstats.ComputePerimeterOff()
labstats.ComputeFeretDiameterOff()
labstats.ComputeOrientedBoundingBoxOff()
labstats.Execute(mask)
bb_points = []
for label in labstats.GetLabels():
x1, y1, xw, yh = labstats.GetBoundingBox(label)
x2, y2 = x1 + xw, y1 + yh
lab_points = np.asarray([[x1, y1], [x2, y2]])
bb_points.append(lab_points)
bb_points = np.concatenate(bb_points)
x_min = np.min(bb_points[:, 0])
y_min = np.min(bb_points[:, 1])
x_max = np.max(bb_points[:, 0])
y_max = np.max(bb_points[:, 1])
if (x_min - mask_padding) < 0:
x_min = 0
else:
x_min -= mask_padding
if (y_min - mask_padding) < 0:
y_min = 0
else:
y_min -= mask_padding
if (x_max + mask_padding) > mask_size[0]:
x_max = mask_size[0]
else:
x_max += mask_padding
if (y_max + mask_padding) > mask_size[1]:
y_max = mask_size[1]
else:
y_max += mask_padding
x_width = x_max - x_min
y_height = y_max - y_min
return BoundingBox(x_min, y_min, x_width, y_height)
def sitk_vect_to_gs(image):
"""
converts simpleITK RGB image to greyscale using cv2
Parameters
----------
image
SimpleITK image
Returns
-------
Greyscale SimpleITK image
"""
image = sitk.GetArrayFromImage(image)
if image.shape[2] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
elif image.shape[2] == 4:
image = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY)
return sitk.GetImageFromArray(image, isVector=False)
def sitk_max_int_proj(image: sitk.Image) -> sitk.Image:
"""
Finds maximum intensity projection of multi-channel SimpleITK image
Parameters
----------
image
multichannel impleITK image
Returns
-------
SimpleITK image
"""
# check if there are 3 dimensions (XYC)
if len(image.GetSize()) == 3:
return sitk.MaximumProjection(image, 2)[:, :, 0]
else:
print(
'cannot perform maximum intensity project on single channel image'
)
return image
def sitk_inv_int(image):
"""
inverts intensity of images for registration, useful for alignment of brightfield
and fluorescence images
Parameters
----------
image
SimpleITK image s
Returns
-------
Intensity inverted SimpleITK image
"""
return sitk.InvertIntensity(image)
def contrast_enhance(image):
spacing = image.GetSpacing()
image = sitk.GetArrayFromImage(image)
image = cv2.convertScaleAbs(image, alpha=7, beta=1)
image = sitk.GetImageFromArray(image)
image.SetSpacing(spacing)
return image
def std_prepro():
"""
Catch all dictionary of preprocessing functions that will result in a single 2D image for
registration
Returns
-------
dictionary of processing parameters
"""
STD_PREPRO = {
'image_type': 'FL',
'ch_indices': None,
'as_uint8': True,
'downsample': 1,
'max_int_proj': sitk_max_int_proj,
'inv_int': sitk_inv_int,
}
return STD_PREPRO
def tifffile_to_arraylike(image_filepath):
largest_series_idx = tf_get_largest_series(image_filepath)
image = zarr.open(
imread(image_filepath, aszarr=True, series=largest_series_idx)
)
if isinstance(image, zarr.Group):
image = image[0]
return image, image_filepath
def ome_tifffile_to_arraylike(image_filepath):
ome_metadata = xml2dict(TiffFile(image_filepath).ome_metadata)
im_dims, im_dtype = get_tifffile_info(image_filepath)
largest_series_idx = tf_get_largest_series(image_filepath)
series_metadata = ome_metadata.get("OME").get("Image")
if isinstance(series_metadata, list):
series_metadata = series_metadata[largest_series_idx]
if isinstance(series_metadata.get("Pixels").get("Channel"), list):
samples_per_pixel = (
series_metadata.get("Pixels")
.get("Channel")[0]
.get("SamplesPerPixel")
)
else:
samples_per_pixel = (
series_metadata.get("Pixels").get("Channel").get("SamplesPerPixel")
)
is_rgb = guess_rgb(im_dims)
image = zarr.open(
imread(image_filepath, aszarr=True, series=largest_series_idx)
)
if isinstance(image, zarr.Group):
image = image[0]
image = da.from_zarr(image)
if samples_per_pixel:
if is_rgb is False and samples_per_pixel >= 3:
image = image.transpose(1, 2, 0)
return image, image_filepath
| 45,655 | 28.173163 | 97 | py |
wsireg | wsireg-master/wsireg/utils/reg_utils.py | import json
from pathlib import Path
from typing import Dict, List, Union
import itk
import numpy as np
import SimpleITK as sitk
from wsireg.parameter_maps.reg_model import RegModel
from wsireg.utils.itk_im_conversions import itk_image_to_sitk_image
NP_TO_SITK_DTYPE = {
np.dtype(np.int8): 0,
np.dtype(np.uint8): 1,
np.dtype(np.int16): 2,
np.dtype(np.uint16): 3,
np.dtype(np.int32): 4,
np.dtype(np.uint32): 5,
np.dtype(np.int64): 6,
np.dtype(np.uint64): 7,
np.dtype(np.float32): 8,
np.dtype(np.float64): 9,
np.dtype(np.complex64): 10,
np.dtype(np.complex64): 11,
}
def sitk_pmap_to_dict(pmap):
"""
Convert SimpleElastix ParameterMap to python dictionary
Parameters
----------
pmap
SimpleElastix ParameterMap
Returns
-------
Python dict of SimpleElastix ParameterMap
"""
pmap_dict = {}
for k, v in pmap.items():
if k in ["image", "invert"]:
t_pmap = {}
for k2, v2 in v.items():
t_pmap[k2] = v2
pmap_dict[k] = t_pmap
else:
pmap_dict[k] = v
return pmap_dict
def pmap_dict_to_sitk(pmap_dict):
"""
Convert python dict to SimpleElastix ParameterMap
Parameters
----------
pmap_dict
SimpleElastix ParameterMap in python dictionary
Returns
-------
SimpleElastix ParameterMap of Python dict
"""
# pmap = sitk.ParameterMap()
# pmap = {}
# for k, v in pmap_dict.items():
# pmap[k] = v
return pmap_dict
def pmap_dict_to_json(pmap_dict, output_file):
"""
Save python dict of ITKElastix to json
Parameters
----------
pmap_dict : dict
parameter map stored in python dict
output_file : str
filepath of where to save the json
"""
with open(output_file, "w") as fp:
json.dump(pmap_dict, fp, indent=4)
def json_to_pmap_dict(json_file):
"""
Load python dict of SimpleElastix stored in json
Parameters
----------
json_file : dict
filepath to json contained SimpleElastix parameter map
"""
with open(json_file, "r") as fp:
pmap_dict = json.load(fp)
return pmap_dict
def _prepare_reg_models(
reg_params: List[Union[RegModel, Dict[str, List[str]]]]
) -> List[Dict[str, List[str]]]:
prepared_params = []
for rp in reg_params:
if isinstance(rp, RegModel):
prepared_params.append(rp.value)
elif isinstance(rp, str):
prepared_params.append(RegModel[rp].value)
elif isinstance(rp, dict):
prepared_params.append(rp)
return prepared_params
def parameter_to_itk_pobj(reg_param_map):
"""
Transfer parameter data stored in dict to ITKElastix ParameterObject
Parameters
----------
reg_param_map: dict
elastix registration parameters
Returns
-------
itk_param_map:itk.ParameterObject
ITKElastix object for registration parameters
"""
parameter_object = itk.ParameterObject.New()
itk_param_map = parameter_object.GetDefaultParameterMap("rigid")
for k, v in reg_param_map.items():
itk_param_map[k] = v
return itk_param_map
def register_2d_images_itkelx(
source_image,
target_image,
reg_params: List[Dict[str, List[str]]],
reg_output_fp: Union[str, Path],
histogram_match=False,
return_image=False,
):
"""
Register 2D images with multiple models and return a list of elastix
transformation maps.
Parameters
----------
source_image : SimpleITK.Image
RegImage of image to be aligned
target_image : SimpleITK.Image
RegImage that is being aligned to (grammar is hard)
reg_params : list of dict
registration parameter maps stored in a dict, can be file paths to SimpleElastix parameterMaps stored
as text or one of the default parameter maps (see parameter_load() function)
reg_output_fp : str
where to store registration outputs (iteration data and transformation files)
histogram_match : bool
whether to attempt histogram matching to improve registration
Returns
-------
tform_list: list
list of ITKElastix transformation parameter maps
image: itk.Image
resulting registered moving image
"""
if histogram_match is True:
matcher = sitk.HistogramMatchingImageFilter()
matcher.SetNumberOfHistogramLevels(64)
matcher.SetNumberOfMatchPoints(7)
matcher.ThresholdAtMeanIntensityOn()
source_image.image = matcher.Execute(
source_image.reg_image, target_image.reg_image
)
pixel_id = source_image.reg_image.GetPixelID()
source_image.reg_image_sitk_to_itk()
target_image.reg_image_sitk_to_itk()
selx = itk.ElastixRegistrationMethod.New(
source_image.reg_image, target_image.reg_image
)
# Set additional options
selx.SetLogToConsole(True)
selx.SetOutputDirectory(str(reg_output_fp))
if source_image.mask is not None:
selx.SetMovingMask(source_image.mask)
if target_image.mask is not None:
selx.SetFixedMask(target_image.mask)
selx.SetMovingImage(source_image.reg_image)
selx.SetFixedImage(target_image.reg_image)
parameter_object_registration = itk.ParameterObject.New()
for idx, pmap in enumerate(reg_params):
if idx == 0:
pmap["WriteResultImage"] = ["true"] if return_image else ["false"]
if target_image.mask is not None:
pmap["AutomaticTransformInitialization"] = ["false"]
else:
pmap["AutomaticTransformInitialization"] = ['true']
parameter_object_registration.AddParameterMap(pmap)
else:
pmap["WriteResultImage"] = ["true"] if return_image else ["false"]
pmap["AutomaticTransformInitialization"] = ['false']
parameter_object_registration.AddParameterMap(pmap)
selx.SetParameterObject(parameter_object_registration)
# Update filter object (required)
selx.UpdateLargestPossibleRegion()
# Results of Registration
result_transform_parameters = selx.GetTransformParameterObject()
# execute registration:
tform_list = []
for idx in range(result_transform_parameters.GetNumberOfParameterMaps()):
tform = {}
for k, v in result_transform_parameters.GetParameterMap(idx).items():
tform[k] = v
tform_list.append(tform)
if return_image is False:
return tform_list
else:
image = selx.GetOutput()
image = itk_image_to_sitk_image(image)
image = sitk.Cast(image, pixel_id)
return tform_list, image
| 6,736 | 27.42616 | 109 | py |
wsireg | wsireg-master/wsireg/utils/__init__.py | 0 | 0 | 0 | py | |
wsireg | wsireg-master/wsireg/parameter_maps/preprocessing.py | from enum import Enum
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple, Union
from pydantic import BaseModel, validator
class ImageType(str, Enum):
"""Set the photometric interpretation of the image
* "FL": background is black (fluorescence)
* "BF": Background is white (brightfield)
"""
DARK = "FL"
LIGHT = "BF"
class CoordinateFlip(str, Enum):
"""Coordinate flip options
* "h" : horizontal flip
* "v" : vertical flip
"""
HORIZONTAL = "h"
VERTIAL = "v"
class BoundingBox(NamedTuple):
"""
Bounding box named tuple.
"""
X: int
Y: int
WIDTH: int
HEIGHT: int
def _transform_to_bbox(mask_bbox: Union[Tuple[int, int, int, int], List[int]]):
return BoundingBox(*mask_bbox)
def _index_to_list(ch_indices: Union[int, List[int]]):
if isinstance(ch_indices, int):
ch_indices = [ch_indices]
return ch_indices
def _transform_custom_proc(
custom_procs: Union[List[Callable], Tuple[Callable, ...]]
):
return {
f"custom processing {str(idx+1).zfill(2)}": proc
for idx, proc in enumerate(custom_procs)
}
class ImagePreproParams(BaseModel):
"""Preprocessing parameter model
Attributes
----------
image_type: ImageType
Whether image is dark or light background. Light background images are intensity inverted
by default
max_int_proj: bool
Perform max intensity projection number of channels > 1.
contrast_enhance: bool
Enhance contrast of image
ch_indices: list of int or int
Channel indicies to use for registartion, 0-index, so ch_indices = 0, pulls the first channel
as_uint8: bool
Whether to byte scale registration image data for memory saving
invert_intensity: bool
invert the intensity of an image
rot_cc: int, float
Rotate image counter-clockwise by degrees, can be positive or negative (cw rot)
flip: CoordinateFlip, default: None
flip coordinates, "v" = vertical flip, "h" = horizontal flip
crop_to_mask_bbox: bool
Convert a binary mask to a bounding box and crop to this area
mask_bbox: tuple or list of 4 ints
supply a pre-computed list of bbox info of form x,y,width,height
downsampling: int
Downsampling by integer factor, i.e., downsampling = 3, downsamples image 3x
use_mask: bool
Whether to use mask in elastix registration. At times it is better to use the mask to find a cropping area
then use the mask during the registration process as errors are frequent
custom_processing: callable
Custom intensity preprocessing functions in a dict like {"my_custom_process: custom_func} that will be applied
to the image. Must take in an sitk.Image and return an sitk.Image
"""
# intensity preprocessing
image_type: ImageType = ImageType.DARK
max_int_proj: bool = True
ch_indices: Optional[List[int]] = None
as_uint8: bool = True
contrast_enhance: bool = False
invert_intensity: bool = False
custom_processing: Optional[Dict[str, Callable]] = None
# spatial preprocessing
rot_cc: Union[int, float] = 0
flip: Optional[CoordinateFlip] = None
crop_to_mask_bbox: bool = False
mask_bbox: Optional[BoundingBox] = None
downsampling: int = 1
use_mask: bool = True
@validator('mask_bbox', pre=True)
def _make_bbox(cls, v):
return _transform_to_bbox(v)
@validator('ch_indices', pre=True)
def _make_ch_list(cls, v):
return _index_to_list(v)
@validator('custom_processing', pre=True)
def _check_custom_prepro(cls, v):
if isinstance(v, (list, tuple)):
return _transform_custom_proc(v)
else:
return v
# make sure serialization to dict is json like
# for saving YAML file
def dict(self, **kwargs):
output = super().dict(**kwargs)
for k, v in output.items():
if isinstance(v, Enum):
output[k] = v.value
return output
class Config:
use_enum_names = True
| 4,110 | 29.007299 | 118 | py |
wsireg | wsireg-master/wsireg/parameter_maps/reg_params.py | DEFAULT_REG_PARAM_MAPS = {
'rigid': {
"AutomaticScalesEstimation": ['true'],
"AutomaticTransformInitialization": ['true'],
"BSplineInterpolationOrder": ['1'],
"CompressResultImage": ['true'],
"DefaultPixelValue": ['0'],
"ErodeMask": ['false'],
"FinalBSplineInterpolationOrder": ['1'],
"FixedImageDimension": ['2'],
"FixedImagePyramid": ['FixedRecursiveImagePyramid'],
"FixedInternalImagePixelType": ['float'],
"HowToCombineTransforms": ['Compose'],
"ImageSampler": ["Random"],
"Interpolator": ['LinearInterpolator'],
"MaximumNumberOfIterations": ['200'],
"MaximumNumberOfSamplingAttempts": [
'10',
],
"MaximumStepLength": [
'100.0',
'75.0',
'66.0',
'50.0',
'25.0',
'15.0',
'10.0',
'10.0',
'5.0',
'1.0',
],
"Metric": ['AdvancedMattesMutualInformation'],
"MovingImageDimension": ['2'],
"MovingImagePyramid": ['MovingRecursiveImagePyramid'],
"MovingInternalImagePixelType": ['float'],
"NewSamplesEveryIteration": ['true'],
"NumberOfHistogramBins": ['16'],
"NumberOfResolutions": ['10'],
"NumberOfSpatialSamples": ['10000'],
"Optimizer": ['AdaptiveStochasticGradientDescent'],
"Registration": ['MultiResolutionRegistration'],
"RequiredRatioOfValidSamples": ['0.05'],
"ResampleInterpolator": ['FinalNearestNeighborInterpolator'],
"Resampler": ['DefaultResampler'],
"ResultImageFormat": ['mha'],
"ResultImagePixelType": ['short'],
"Transform": ['EulerTransform'],
"UseDirectionCosines": ['true'],
"WriteResultImage": ['false'],
"WriteTransformParametersEachResolution": ['true'],
},
'affine': {
"AutomaticScalesEstimation": ['true'],
"AutomaticTransformInitialization": ['true'],
"BSplineInterpolationOrder": ['1'],
"CompressResultImage": ['true'],
"DefaultPixelValue": ['0'],
"ErodeMask": ['false'],
"FinalBSplineInterpolationOrder": ['1'],
"FixedImageDimension": ['2'],
"FixedImagePyramid": ['FixedRecursiveImagePyramid'],
"FixedInternalImagePixelType": ['float'],
"HowToCombineTransforms": ['Compose'],
"ImageSampler": ['Random'],
"Interpolator": ['LinearInterpolator'],
"MaximumNumberOfIterations": ['200'],
"MaximumNumberOfSamplingAttempts": [
'10',
],
"MaximumStepLength": [
'100.0',
'75.0',
'66.0',
'50.0',
'25.0',
'15.0',
'10.0',
'10.0',
'5.0',
'1.0',
],
"Metric": ['AdvancedMattesMutualInformation'],
"MovingImageDimension": ['2'],
"MovingImagePyramid": ['MovingRecursiveImagePyramid'],
"MovingInternalImagePixelType": ['float'],
"NewSamplesEveryIteration": ['true'],
"NumberOfHistogramBins": ['32'],
"NumberOfResolutions": ['10'],
"NumberOfSpatialSamples": ['10000'],
"Optimizer": ['AdaptiveStochasticGradientDescent'],
"Registration": ['MultiResolutionRegistration'],
"RequiredRatioOfValidSamples": ['0.05'],
"ResampleInterpolator": ['FinalNearestNeighborInterpolator'],
"Resampler": ['DefaultResampler'],
"ResultImageFormat": ['mha'],
"ResultImagePixelType": ['short'],
"Transform": ['AffineTransform'],
"UseDirectionCosines": ['true'],
"WriteResultImage": ['false'],
"WriteTransformParametersEachResolution": ['true'],
},
'similarity': {
"AutomaticScalesEstimation": ['true'],
"AutomaticTransformInitialization": ['true'],
"BSplineInterpolationOrder": ['1'],
"CompressResultImage": ['true'],
"DefaultPixelValue": ['0'],
"ErodeMask": ['false'],
"FinalBSplineInterpolationOrder": ['1'],
"FixedImageDimension": ['2'],
"FixedImagePyramid": ['FixedRecursiveImagePyramid'],
"FixedInternalImagePixelType": ['float'],
"HowToCombineTransforms": ['Compose'],
"ImageSampler": ['Random'],
"Interpolator": ['LinearInterpolator'],
"MaximumNumberOfIterations": ['200'],
"MaximumNumberOfSamplingAttempts": [
'10',
],
"MaximumStepLength": [
'100.0',
'75.0',
'66.0',
'50.0',
'25.0',
'15.0',
'10.0',
'10.0',
'5.0',
'1.0',
],
"Metric": ['AdvancedMattesMutualInformation'],
"MovingImageDimension": ['2'],
"MovingImagePyramid": ['MovingRecursiveImagePyramid'],
"MovingInternalImagePixelType": ['float'],
"NewSamplesEveryIteration": ['true'],
"NumberOfHistogramBins": ['32'],
"NumberOfResolutions": ['10'],
"NumberOfSpatialSamples": ['10000'],
"Optimizer": ['AdaptiveStochasticGradientDescent'],
"Registration": ['MultiResolutionRegistration'],
"RequiredRatioOfValidSamples": ['0.05'],
"ResampleInterpolator": ['FinalNearestNeighborInterpolator'],
"Resampler": ['DefaultResampler'],
"ResultImageFormat": ['mha'],
"ResultImagePixelType": ['short'],
"Transform": ['SimilarityTransform'],
"UseDirectionCosines": ['true'],
"WriteResultImage": ['false'],
"WriteTransformParametersEachResolution": ['true'],
},
'nl': {
"AutomaticScalesEstimation": ['true'],
"AutomaticTransformInitialization": ['false'],
"BSplineInterpolationOrder": ['1'],
"CompressResultImage": ['true'],
"DefaultPixelValue": ['0'],
"ErodeMask": ['false'],
"FinalBSplineInterpolationOrder": ['1'],
"FinalGridSpacingInPhysicalUnits": ['100'],
"FixedImageDimension": ['2'],
"FixedImagePyramid": ['FixedRecursiveImagePyramid'],
"FixedInternalImagePixelType": ['float'],
"GridSpacingSchedule": [
'512',
'512',
'392',
'392',
'256',
'256',
'128',
'128',
'64',
'64',
'32',
'32',
'16',
'16',
'4',
'4',
'2',
'2',
'1',
'1',
],
"HowToCombineTransforms": ['Compose'],
"ImageSampler": ['Random'],
"Interpolator": ['LinearInterpolator'],
"MaximumNumberOfIterations": ['200'],
"MaximumNumberOfSamplingAttempts": [
'10',
],
"MaximumStepLength": [
'100',
'90',
'70',
'50',
'40',
'30',
'20',
'10',
'1',
'1',
],
"Metric": ['AdvancedMattesMutualInformation'],
"MovingImageDimension": ['2'],
"MovingImagePyramid": ['MovingRecursiveImagePyramid'],
"MovingInternalImagePixelType": ['float'],
"NewSamplesEveryIteration": ['true'],
"NumberOfHistogramBins": ['32'],
"NumberOfResolutions": ['10'],
"NumberOfSpatialSamples": ['50000'],
"Optimizer": ['AdaptiveStochasticGradientDescent'],
"Registration": ['MultiResolutionRegistration'],
"RequiredRatioOfValidSamples": ['0.05'],
"ResampleInterpolator": ['FinalNearestNeighborInterpolator'],
"Resampler": ['DefaultResampler'],
"ResultImageFormat": ['mha'],
"ResultImagePixelType": ['short'],
"Transform": ['BSplineTransform'],
"UseDirectionCosines": ['true'],
"WriteResultImage": ['false'],
"WriteTransformParametersEachResolution": ['true'],
},
'fi_correction': {
"AutomaticScalesEstimation": ['true'],
"AutomaticTransformInitialization": ['false'],
"BSplineInterpolationOrder": ['1'],
"CompressResultImage": ['true'],
"DefaultPixelValue": ['0'],
"ErodeMask": ['false'],
"FinalBSplineInterpolationOrder": ['1'],
"FixedImageDimension": ['2'],
"FixedImagePyramid": ['FixedRecursiveImagePyramid'],
"FixedInternalImagePixelType": ['float'],
"HowToCombineTransforms": ['Compose'],
"ImagePyramidSchedule": ['8', '8', '4', '4', '2', '2', '1', '1'],
"ImageSampler": ['Random'],
"Interpolator": ['LinearInterpolator'],
"MaximumNumberOfIterations": ['75'],
"MaximumNumberOfSamplingAttempts": [
'10',
],
"MaximumStepLength": ['100', '50', '20', '10'],
"Metric": ['AdvancedMattesMutualInformation'],
"MovingImageDimension": ['2'],
"MovingImagePyramid": ['MovingRecursiveImagePyramid'],
"MovingInternalImagePixelType": ['float'],
"NewSamplesEveryIteration": ['true'],
"NumberOfHistogramBins": ['16'],
"NumberOfResolutions": ['4'],
"NumberOfSpatialSamples": ['10000'],
"Optimizer": ['AdaptiveStochasticGradientDescent'],
"Registration": ['MultiResolutionRegistration'],
"RequiredRatioOfValidSamples": ['0.05'],
"ResampleInterpolator": ['FinalNearestNeighborInterpolator'],
"Resampler": ['DefaultResampler'],
"ResultImageFormat": ['mha'],
"ResultImagePixelType": ['short'],
"Transform": ['EulerTransform'],
"UseDirectionCosines": ['true'],
"WriteResultImage": ['true'],
"WriteTransformParametersEachResolution": ['false'],
},
'nl-reduced': {
"AutomaticScalesEstimation": ['true'],
"AutomaticTransformInitialization": ['false'],
"BSplineInterpolationOrder": ['1'],
"CompressResultImage": ['true'],
"DefaultPixelValue": ['0'],
"ErodeMask": ['false'],
"FinalBSplineInterpolationOrder": ['1'],
"FinalGridSpacingInPhysicalUnits": ['100'],
"FixedImageDimension": ['2'],
"FixedImagePyramid": ['FixedRecursiveImagePyramid'],
"FixedInternalImagePixelType": ['float'],
"GridSpacingSchedule": [
'392',
'392',
'256',
'256',
'128',
'128',
'64',
'64',
'32',
'32',
],
"HowToCombineTransforms": ['Compose'],
"ImageSampler": ['Random'],
"Interpolator": ['LinearInterpolator'],
"MaximumNumberOfIterations": ['150'],
"MaximumNumberOfSamplingAttempts": [
'10',
],
"MaximumStepLength": [
'50',
'40',
'30',
'20',
],
"Metric": ['AdvancedMattesMutualInformation'],
"MovingImageDimension": ['2'],
"MovingImagePyramid": ['MovingRecursiveImagePyramid'],
"MovingInternalImagePixelType": ['float'],
"NewSamplesEveryIteration": ['true'],
"NumberOfHistogramBins": ['32'],
"NumberOfResolutions": ['5'],
"NumberOfSpatialSamples": ['50000'],
"Optimizer": ['AdaptiveStochasticGradientDescent'],
"Registration": ['MultiResolutionRegistration'],
"RequiredRatioOfValidSamples": ['0.05'],
"ResampleInterpolator": ['FinalNearestNeighborInterpolator'],
"Resampler": ['DefaultResampler'],
"ResultImageFormat": ['mha'],
"ResultImagePixelType": ['short'],
"Transform": ['BSplineTransform'],
"UseDirectionCosines": ['true'],
"WriteResultImage": ['false'],
"WriteTransformParametersEachResolution": ['true'],
},
'nl-mid': {
"AutomaticScalesEstimation": ['true'],
"AutomaticTransformInitialization": ['false'],
"BSplineInterpolationOrder": ['1'],
"CompressResultImage": ['true'],
"DefaultPixelValue": ['0'],
"ErodeMask": ['false'],
"FinalBSplineInterpolationOrder": ['1'],
"FinalGridSpacingInPhysicalUnits": ['150'],
"FixedImageDimension": ['2'],
"FixedImagePyramid": ['FixedRecursiveImagePyramid'],
"FixedInternalImagePixelType": ['float'],
"GridSpacingSchedule": [
'512',
'512',
'128',
'128',
'64',
'64',
'32',
'32',
'2',
'2',
],
"HowToCombineTransforms": ['Compose'],
"ImageSampler": ['Random'],
"Interpolator": ['LinearInterpolator'],
"MaximumNumberOfIterations": ['200'],
"MaximumNumberOfSamplingAttempts": [
'10',
],
"MaximumStepLength": [
'100',
'70',
'50',
'30',
'10',
],
"Metric": ['AdvancedMattesMutualInformation'],
"MovingImageDimension": ['2'],
"MovingImagePyramid": ['MovingRecursiveImagePyramid'],
"MovingInternalImagePixelType": ['float'],
"NewSamplesEveryIteration": ['true'],
"NumberOfHistogramBins": ['32'],
"NumberOfResolutions": ['5'],
"NumberOfSpatialSamples": ['50000'],
"Optimizer": ['AdaptiveStochasticGradientDescent'],
"Registration": ['MultiResolutionRegistration'],
"RequiredRatioOfValidSamples": ['0.05'],
"ResampleInterpolator": ['FinalNearestNeighborInterpolator'],
"Resampler": ['DefaultResampler'],
"ResultImageFormat": ['mha'],
"ResultImagePixelType": ['short'],
"Transform": ['BSplineTransform'],
"UseDirectionCosines": ['true'],
"WriteResultImage": ['false'],
"WriteTransformParametersEachResolution": ['true'],
},
'rigid-expanded': {
"AutomaticScalesEstimation": ['true'],
"AutomaticTransformInitialization": ['true'],
"BSplineInterpolationOrder": ['1'],
"CompressResultImage": ['true'],
"DefaultPixelValue": ['0'],
"ErodeMask": ['false'],
"FinalBSplineInterpolationOrder": ['1'],
"FixedImageDimension": ['2'],
"FixedImagePyramid": ['FixedRecursiveImagePyramid'],
"FixedInternalImagePixelType": ['float'],
"HowToCombineTransforms": ['Compose'],
"ImageSampler": ["Random"],
"Interpolator": ['LinearInterpolator'],
"MaximumNumberOfIterations": ['500'],
"MaximumNumberOfSamplingAttempts": [
'10',
],
"MaximumStepLength": [
'100.0',
'75.0',
'66.0',
'50.0',
'25.0',
'15.0',
'10.0',
'10.0',
'5.0',
'1.0',
],
"Metric": ['AdvancedMattesMutualInformation'],
"MovingImageDimension": ['2'],
"MovingImagePyramid": ['MovingRecursiveImagePyramid'],
"MovingInternalImagePixelType": ['float'],
"NewSamplesEveryIteration": ['true'],
"NumberOfHistogramBins": ['16'],
"NumberOfResolutions": ['10'],
"NumberOfSpatialSamples": ['30000'],
"Optimizer": ['AdaptiveStochasticGradientDescent'],
"Registration": ['MultiResolutionRegistration'],
"RequiredRatioOfValidSamples": ['0.05'],
"ResampleInterpolator": ['FinalNearestNeighborInterpolator'],
"Resampler": ['DefaultResampler'],
"ResultImageFormat": ['mha'],
"ResultImagePixelType": ['short'],
"Transform": ['EulerTransform'],
"UseDirectionCosines": ['true'],
"WriteResultImage": ['false'],
"WriteTransformParametersEachResolution": ['true'],
},
'nl3': {
"AutomaticScalesEstimation": ['true'],
"AutomaticTransformInitialization": ['false'],
"BSplineInterpolationOrder": ['1'],
"CompressResultImage": ['true'],
"DefaultPixelValue": ['0'],
"ErodeMask": ['false'],
"FinalBSplineInterpolationOrder": ['1'],
"FinalGridSpacingInPhysicalUnits": ['200'],
"FixedImageDimension": ['2'],
"FixedImagePyramid": ['FixedRecursiveImagePyramid'],
"FixedImagePyramidSchedule": [str(int(2**2)), str(int(2**2))],
"FixedInternalImagePixelType": ['float'],
"GridSpacingSchedule": [
'8',
'8',
],
"HowToCombineTransforms": ['Compose'],
"ImageSampler": ['Random'],
"Interpolator": ['LinearInterpolator'],
"MaximumNumberOfIterations": ['1000'],
"MaximumNumberOfSamplingAttempts": [
'10',
],
"MaximumStepLength": [
'10',
],
"Metric": ['AdvancedMattesMutualInformation'],
"MovingImageDimension": ['2'],
"MovingImagePyramid": ['MovingRecursiveImagePyramid'],
"MovingImagePyramidSchedule": [str(int(2**2)), str(int(2**2))],
"MovingInternalImagePixelType": ['float'],
"NewSamplesEveryIteration": ['true'],
"NumberOfHistogramBins": ['32'],
"NumberOfResolutions": ['1'],
"NumberOfSpatialSamples": ['125000'],
"Optimizer": ['AdaptiveStochasticGradientDescent'],
"Registration": ['MultiResolutionRegistration'],
"RequiredRatioOfValidSamples": ['0.05'],
"ResampleInterpolator": ['FinalNearestNeighborInterpolator'],
"Resampler": ['DefaultResampler'],
"ResultImageFormat": ['mha'],
"ResultImagePixelType": ['short'],
"Transform": ['BSplineTransform'],
"UseDirectionCosines": ['true'],
"WriteResultImage": ['false'],
"WriteTransformParametersEachResolution": ['true'],
},
'nl2': {
"AutomaticScalesEstimation": ['true'],
"AutomaticTransformInitialization": ['false'],
"BSplineInterpolationOrder": ['1'],
"CompressResultImage": ['true'],
"DefaultPixelValue": ['0'],
"ErodeMask": ['false'],
"FinalBSplineInterpolationOrder": ['1'],
"FinalGridSpacingInPhysicalUnits": ['75'],
"FixedImageDimension": ['2'],
"FixedImagePyramid": ['FixedRecursiveImagePyramid'],
"FixedInternalImagePixelType": ['float'],
"GridSpacingSchedule": [
'512',
'512',
'392',
'392',
'256',
'256',
'128',
'128',
'64',
'64',
'32',
'32',
'16',
'16',
'4',
'4',
'2',
'2',
'1',
'1',
],
"HowToCombineTransforms": ['Compose'],
"ImageSampler": ['Random'],
"Interpolator": ['LinearInterpolator'],
"MaximumNumberOfIterations": ['200'],
"MaximumNumberOfSamplingAttempts": [
'10',
],
"MaximumStepLength": [
'25',
'20',
'15',
'10',
'10',
'8',
'5',
'5',
'5',
'2',
],
# "MaximumStepLength": [
# '50',
# '45',
# '40',
# '35',
# '30',
# '25',
# '5',
# '5',
# '5',
# '2',
# ],
"Metric": ['AdvancedMattesMutualInformation'],
"MovingImageDimension": ['2'],
"MovingImagePyramid": ['MovingRecursiveImagePyramid'],
"MovingInternalImagePixelType": ['float'],
"NewSamplesEveryIteration": ['true'],
"NumberOfHistogramBins": ['32'],
"NumberOfResolutions": ['10'],
"NumberOfSpatialSamples": ['15000'],
"Optimizer": ['AdaptiveStochasticGradientDescent'],
"Registration": ['MultiResolutionRegistration'],
"RequiredRatioOfValidSamples": ['0.05'],
"ResampleInterpolator": ['FinalNearestNeighborInterpolator'],
"Resampler": ['DefaultResampler'],
"ResultImageFormat": ['mha'],
"ResultImagePixelType": ['short'],
"Transform": ['BSplineTransform'],
"UseDirectionCosines": ['true'],
"WriteResultImage": ['false'],
"WriteTransformParametersEachResolution": ['true'],
},
}
test_rig = DEFAULT_REG_PARAM_MAPS["rigid"].copy()
test_aff = DEFAULT_REG_PARAM_MAPS["affine"].copy()
test_sim = DEFAULT_REG_PARAM_MAPS["similarity"].copy()
test_nl = DEFAULT_REG_PARAM_MAPS["nl"].copy()
test_rig["MaximumNumberOfIterations"] = ["10"]
test_aff["MaximumNumberOfIterations"] = ["10"]
test_sim["MaximumNumberOfIterations"] = ["10"]
test_nl["MaximumNumberOfIterations"] = ["10"]
DEFAULT_REG_PARAM_MAPS["rigid_test"] = test_rig
DEFAULT_REG_PARAM_MAPS["affine_test"] = test_aff
DEFAULT_REG_PARAM_MAPS["similarity_test"] = test_sim
DEFAULT_REG_PARAM_MAPS["nl_test"] = test_nl
# advanced mean squares
ams_rig = DEFAULT_REG_PARAM_MAPS["rigid"].copy()
ams_aff = DEFAULT_REG_PARAM_MAPS["affine"].copy()
ams_sim = DEFAULT_REG_PARAM_MAPS["similarity"].copy()
ams_nl = DEFAULT_REG_PARAM_MAPS["nl"].copy()
ams_rig["Metric"] = ["AdvancedMeanSquares"]
ams_aff["Metric"] = ["AdvancedMeanSquares"]
ams_sim["Metric"] = ["AdvancedMeanSquares"]
ams_nl["Metric"] = ["AdvancedMeanSquares"]
DEFAULT_REG_PARAM_MAPS["rigid_ams"] = ams_rig
DEFAULT_REG_PARAM_MAPS["affine_ams"] = ams_aff
DEFAULT_REG_PARAM_MAPS["similarity_ams"] = ams_sim
DEFAULT_REG_PARAM_MAPS["nl_ams"] = ams_nl
# normalized correlation
anc_rig = DEFAULT_REG_PARAM_MAPS["rigid"].copy()
anc_aff = DEFAULT_REG_PARAM_MAPS["affine"].copy()
anc_sim = DEFAULT_REG_PARAM_MAPS["similarity"].copy()
anc_nl = DEFAULT_REG_PARAM_MAPS["nl"].copy()
anc_rig["Metric"] = ["AdvancedNormalizedCorrelation"]
anc_aff["Metric"] = ["AdvancedNormalizedCorrelation"]
anc_sim["Metric"] = ["AdvancedNormalizedCorrelation"]
anc_nl["Metric"] = ["AdvancedNormalizedCorrelation"]
DEFAULT_REG_PARAM_MAPS["rigid_anc"] = anc_rig
DEFAULT_REG_PARAM_MAPS["affine_anc"] = anc_aff
DEFAULT_REG_PARAM_MAPS["similarity_anc"] = anc_sim
DEFAULT_REG_PARAM_MAPS["nl_anc"] = anc_nl
| 22,148 | 35.250409 | 73 | py |
wsireg | wsireg-master/wsireg/parameter_maps/transformations.py | BASE_RIG_TFORM = dict(
{
"Transform": ["EulerTransform"],
"NumberOfParameters": ["3"],
"TransformParameters": ["0", "0", "0"],
"InitialTransformParametersFileName": ["NoInitialTransform"],
"HowToCombineTransforms": ["Compose"],
"FixedImageDimension": ["2"],
"MovingImageDimension": ["2"],
"FixedInternalImagePixelType": ["float"],
"MovingInternalImagePixelType": ["float"],
"Size": ["0", "0"],
"Index": ["0", "0"],
"Spacing": ["", ""],
"Origin": ["0.0000", "0.0000"],
"Direction": [
"1.0000000000",
"0.0000000000",
"0.0000000000",
"1.0000000000",
],
"UseDirectionCosines": ["true"],
"CenterOfRotationPoint": ["0", "0"],
"ResampleInterpolator": ["FinalNearestNeighborInterpolator"],
"Resampler": ["DefaultResampler"],
"DefaultPixelValue": ["0.000000"],
"ResultImageFormat": ["mha"],
"ResultImagePixelType": ["float"],
"CompressResultImage": ["true"],
}
)
BASE_TRANSLATION_TFORM = dict(
{
"Transform": ["TranslationTransform"],
"NumberOfParameters": ["2"],
"TransformParameters": ["0", "0"],
"InitialTransformParametersFileName": ["NoInitialTransform"],
"HowToCombineTransforms": ["Compose"],
"FixedImageDimension": ["2"],
"MovingImageDimension": ["2"],
"FixedInternalImagePixelType": ["float"],
"MovingInternalImagePixelType": ["float"],
"Size": ["0", "0"],
"Index": ["0", "0"],
"Spacing": ["", ""],
"Origin": ["0.0000", "0.0000"],
"Direction": [
"1.0000000000",
"0.0000000000",
"0.0000000000",
"1.0000000000",
],
"UseDirectionCosines": ["true"],
"CenterOfRotationPoint": ["0", "0"],
"ResampleInterpolator": ["FinalNearestNeighborInterpolator"],
"Resampler": ["DefaultResampler"],
"DefaultPixelValue": ["0.000000"],
"ResultImageFormat": ["mha"],
"ResultImagePixelType": ["float"],
"CompressResultImage": ["true"],
}
)
BASE_AFF_TFORM = dict(
{
"Transform": ["AffineTransform"],
"NumberOfParameters": ["6"],
"TransformParameters": ["1", "0", "0", "1", "0", "0"],
"InitialTransformParametersFileName": ["NoInitialTransform"],
"HowToCombineTransforms": ["Compose"],
"FixedImageDimension": ["2"],
"MovingImageDimension": ["2"],
"FixedInternalImagePixelType": ["float"],
"MovingInternalImagePixelType": ["float"],
"Size": ["0", "0"],
"Index": ["0", "0"],
"Spacing": ["0", "0"],
"Origin": ["0.0000", "0.0000"],
"Direction": [
"1.0000000000",
"0.0000000000",
"0.0000000000",
"1.0000000000",
],
"UseDirectionCosines": ["true"],
"CenterOfRotationPoint": ["0", "0"],
"ResampleInterpolator": ["FinalNearestNeighborInterpolator"],
"Resampler": ["DefaultResampler"],
"DefaultPixelValue": ["0.000000"],
"ResultImageFormat": ["mha"],
"ResultImagePixelType": ["float"],
"CompressResultImage": ["true"],
}
)
| 3,312 | 33.154639 | 69 | py |
wsireg | wsireg-master/wsireg/parameter_maps/__init__.py | """Parameter maps."""
__author__ = """Nathan Heath Patterson"""
__email__ = 'heath.patterson@vanderbilt.edu'
__version__ = '0.0.1'
| 132 | 21.166667 | 44 | py |
wsireg | wsireg-master/wsireg/parameter_maps/reg_model.py | from enum import Enum, EnumMeta
from pathlib import Path
from typing import Dict, List, Tuple, Union
from wsireg.parameter_maps.reg_params import DEFAULT_REG_PARAM_MAPS
DEFAULT_REG_PARAM_MAPS.keys()
PATH_LIKE = Union[str, Path]
def _elx_lineparser(
line: str,
) -> Union[Tuple[str, List[str]], Tuple[None, None]]:
if line[0] == "(":
params = (
line.replace("(", "")
.replace(")", "")
.replace("\n", "")
.replace('"', "")
)
params = params.split(" ", 1)
k, v = params[0], params[1]
if " " in v:
v = v.split(" ")
v = list(filter(lambda a: a != "", v))
if isinstance(v, list) is False:
v = [v]
return k, v
else:
return None, None
def _read_elastix_parameter_file(
elx_param_fp: PATH_LIKE,
) -> Dict[str, List[str]]:
with open(
elx_param_fp,
"r",
) as f:
lines = f.readlines()
parameters = {}
for line in lines:
k, v = _elx_lineparser(line)
if k is not None:
parameters.update({k: v})
return parameters
class _RegModelMeta(EnumMeta):
def __getitem__(self, name):
try:
return super().__getitem__(name)
except (TypeError, KeyError):
if isinstance(name, (str, Path)) and Path(name).exists():
return _read_elastix_parameter_file(name)
else:
raise ValueError(
"unrecognized registration parameter, please provide"
"file path to elastix transform parameters or specify one of "
f"{[i.name for i in self]}"
)
class RegModel(dict, Enum, metaclass=_RegModelMeta):
"""
Default registration parameters. Can also pass a filepath of elastix transforms and these
will be used.
"""
rigid: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["rigid"]
affine: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["affine"]
similarity: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["similarity"]
nl: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["nl"]
fi_correction: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS[
"fi_correction"
]
nl_reduced: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["nl-reduced"]
nl_mid: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["nl-mid"]
nl2: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["nl2"]
rigid_expanded: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS[
"rigid-expanded"
]
rigid_test: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["rigid_test"]
affine_test: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["affine_test"]
similarity_test: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS[
"similarity_test"
]
nl_test: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["nl_test"]
rigid_ams: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["rigid_ams"]
affine_ams: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["affine_ams"]
similarity_ams: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS[
"similarity_ams"
]
nl_ams: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["nl_ams"]
rigid_anc: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["rigid_anc"]
affine_anc: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["affine_anc"]
similarity_anc: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS[
"similarity_anc"
]
nl_anc: Dict[str, List[str]] = DEFAULT_REG_PARAM_MAPS["nl_anc"]
def __str__(self):
return self.name
def __deepcopy__(self, _):
return self.name
| 3,622 | 32.859813 | 93 | py |
wsireg | wsireg-master/wsireg/reg_shapes/reg_shapes.py | import json
from pathlib import Path
from typing import Tuple, Union, List, Dict, Any
import cv2
import numpy as np
from wsireg.reg_transforms.reg_transform_seq import RegTransformSeq
from wsireg.utils.shape_utils import (
get_int_dtype,
insert_transformed_pts_gj,
invert_nonrigid_transforms,
scale_shape_coordinates,
shape_reader,
transform_shapes,
)
class RegShapes:
"""
Class that holds and manages shape data and it's transformations in the registration graph
Parameters
----------
shape_data: list
list of np.ndarrays of shape data
str - file path to GeoJSON file containing shape data
np.ndarray - single shape
itk_point_transforms: list
list of ITK point transforms, appropriately inverted where non-linear.
Usually generated on-the-fly when points are transformed from wsireg transformation data
source_res:float
isotropic image resolution of the source image in the registration stack,
i.e., resolution of the image to which shape data is associated
target_res:float
isotropic image resolution of the target image in the registration stack
usually generated on-the-fly when points are transformed from wsireg transformation data
kwargs
keyword arguments passed to wsireg.utils.tform_utils.shape_reader (shape_name, shape_type)
Attributes
----------
shape_data: list of shape data
list of np.ndarrays of shape data
str - file path to GeoJSON file containing shape data
np.ndarray - single shape
shape_data_gj: list of geoJSON shape data
shape data in geoJSON dicts
transformed_shape_data: list of shape data
numpy shape data that as been tranformed
itk_point_transforms: list
list of ITK point transforms, appropriately inverted where non-linear.
Usually generated on-the-fly when points are transformed from wsireg transformation data
source_res:float
isotropic image resolution of the source image in the registration stack,
i.e., resolution of the image to which shape data is associated
target_res:float
isotropic image resolution of the target image in the registration stack
usually generated on-the-fly when points are transformed from wsireg transformation data
"""
def __init__(
self,
shape_data=None,
itk_point_transforms=None,
source_res=1,
target_res=1,
**kwargs,
):
self.shape_data = []
self.shape_data_gj = []
self.transformed_shape_data = []
self.itk_point_transforms = itk_point_transforms
self.source_res = source_res
self.target_res = target_res
self._n_shapes = None
self._shape_types = None
self._shape_names = None
if shape_data:
self.add_shapes(shape_data, **kwargs)
@property
def n_shapes(self):
"""
Number of shapes loaded
"""
return self._n_shapes
@property
def shape_types(self):
"""
List of GeoJSON shape types in shape data
"""
return self._n_shape_types
@property
def shape_names(self):
"""
list of GeoJSON shape names in shape data
"""
return self._shape_names
def add_shapes(self, shape_data, **kwargs):
"""
Add shapes via shape_reader, will extend current shape list rather than overwrite it
Parameters
----------
shape_data
list of np.ndarrays of shape data
str - file path to GeoJSON file containing shape data
np.ndarray - single shape
kwargs
keyword arguments passed to wsireg.utils.tform_utils.shape_reader (shape_name, shape_type)
"""
gj_shapes, np_shapes = shape_reader(shape_data, **kwargs)
self.update_shapes(np_shapes)
self.update_shapes_gj(gj_shapes)
def update_shapes(
self, imported_shapes: List[Dict[str, Union[np.ndarray, str]]]
):
"""
Extend list of shape data with new shape data
Parameters
----------
imported_shapes: list of shape data
Shapes to add in numpy format
"""
self.shape_data.extend(imported_shapes)
def update_shapes_gj(self, imported_shapes: List[Dict[Any, Any]]):
"""
Extend list of shape data with new shape data in geoJSON.
Parameters
----------
imported_shapes: list of shape data
Shapes to add in geoJSON format
"""
self.shape_data_gj.extend(imported_shapes)
self._n_shapes = len(self.shape_data_gj)
self._n_shape_types = [
sh["geometry"]["type"] for sh in self.shape_data_gj
]
self._shape_names = [
sh["properties"]["classification"]["name"]
for sh in self.shape_data_gj
]
def scale_shapes(self, scale_factor: Union[int, float]):
"""
Scale coordinates of list of shape data by scale_factor
Parameters
----------
scale_factor: float, int
isotropic scaling factor for the coordinates
"""
self.shape_data = [
scale_shape_coordinates(shape, scale_factor)
for shape in self.shape_data
]
def transform_shapes(
self,
transformations: Union[str, Path, dict, RegTransformSeq],
px_idx: bool = True,
output_idx: bool = True,
):
"""
Transform shapes using transformations data from wsireg
Parameters
----------
transformations
RegTransformSeq parsable object
px_idx: bool
whether shape points are specified in physical coordinates (i.e., microns) or
in pixel indices
output_idx: bool
whether transformed shape points should be output in physical coordinates (i.e., microns) or
in pixel indices
"""
if isinstance(transformations, (str, Path, dict)):
transformations_seq = RegTransformSeq(transformations)
else:
transformations_seq = transformations
invert_nonrigid_transforms(
transformations_seq.reg_transforms_itk_order
)
self.transformed_shape_data = transform_shapes(
self.shape_data,
transformations_seq.reg_transforms_itk_order,
px_idx=px_idx,
source_res=self.source_res,
output_idx=output_idx,
target_res=transformations_seq.output_spacing[0],
)
def save_shape_data(
self, output_fp: Union[str, Path], transformed: bool = True
) -> str:
"""
Save shape file to GeoJSON on disk.
Parameters
----------
output_fp: str
path to the .json file where shape data will be saved
transformed:bool
save the transformed shape data or shape data as currently help in memory
Returns
-------
output_fp: str
Path to the saved file
"""
if transformed is True:
# updated GeoJSON with transformed points
out_shapes = insert_transformed_pts_gj(
self.shape_data_gj, self.transformed_shape_data
)
else:
out_shapes = self.shape_data_gj
json.dump(
out_shapes,
open(
output_fp,
"w",
),
indent=1,
)
return str(output_fp)
def draw_mask(
self,
output_size: Tuple[int, int],
transformed: bool = False,
labels: bool = False,
) -> np.ndarray:
"""
Draw a binary or label mask using shape data.
Parameters
----------
output_size: tuple of int
size of mask in in tuple(x,y)
transformed: bool
Whether to write transformed shapes to mask or original shapes
labels: bool
Whether to write each mask instance as a label (1-n_shapes)
or to write all as binary (255)
Returns
-------
mask: np.ndarray
Drawn mask at set output size
"""
if labels:
im_dtype = get_int_dtype(self.n_shapes)
else:
im_dtype = np.uint8
mask = np.zeros(output_size[::-1], dtype=im_dtype)
if transformed:
shapes = self.transformed_shape_data
else:
shapes = self.shape_data
# if all([sh["name"] for sh in self.shape_data]):
for idx, sh in enumerate(shapes):
mask = cv2.fillPoly(
mask,
pts=[sh["array"].astype(np.int32)],
color=idx + 1 if labels else np.iinfo(im_dtype).max,
)
return mask
| 8,937 | 29.505119 | 104 | py |
wsireg | wsireg-master/wsireg/reg_shapes/__init__.py | from .reg_shapes import RegShapes # noqa: F401
| 48 | 23.5 | 47 | py |
wsireg | wsireg-master/tests/test_reg_transform_seq.py | import os
from pathlib import Path
import pytest
from wsireg.reg_transforms.reg_transform_seq import RegTransformSeq
HERE = os.path.dirname(__file__)
FIXTURES_DIR = os.path.join(HERE, "fixtures")
@pytest.mark.usefixtures("complex_transform_larger")
def test_RegTransformSeq_from_dict(complex_transform_larger):
rts = RegTransformSeq(complex_transform_larger)
assert len(rts.reg_transforms) == 6
assert len(rts.reg_transforms_itk_order) == 6
assert rts.transform_seq_idx == [0, 1, 2, 2, 3, 3]
@pytest.mark.usefixtures("complex_transform_larger")
def test_RegTransformSeq_resize_output_up(complex_transform_larger):
rts = RegTransformSeq(complex_transform_larger)
os_pre = rts.output_size
rts.set_output_spacing((1, 1))
assert rts.output_size == (2048, 2048)
assert os_pre != rts.output_size
@pytest.mark.usefixtures("complex_transform_larger")
def test_RegTransformSeq_resize_output_down(complex_transform_larger):
rts = RegTransformSeq(complex_transform_larger)
os_pre = rts.output_size
rts.set_output_spacing((4, 4))
assert rts.output_size == (512, 512)
assert os_pre != rts.output_size
def test_RegTransformSeq_from_json():
test_tform = str(Path(FIXTURES_DIR) / "test-tform.json")
rts = RegTransformSeq(test_tform)
assert len(rts.reg_transforms) == 10
def test_RegTransformSeq_from_RegTransform():
test_tform = str(Path(FIXTURES_DIR) / "test-tform.json")
rts = RegTransformSeq(test_tform)
rts_1 = RegTransformSeq(
reg_transforms=[
rts.reg_transforms[0],
rts.reg_transforms[1],
rts.reg_transforms[2],
],
transform_seq_idx=[0, 1, 1],
)
assert len(rts_1.reg_transforms) == 3
assert rts_1.transform_seq_idx == [0, 1, 1]
def test_RegTransformSeq_append():
test_tform = str(Path(FIXTURES_DIR) / "test-tform.json")
rts = RegTransformSeq(test_tform)
rts_1 = RegTransformSeq(
reg_transforms=[
rts.reg_transforms[0],
rts.reg_transforms[1],
rts.reg_transforms[2],
],
transform_seq_idx=[0, 1, 1],
)
rts_2 = RegTransformSeq(
reg_transforms=[
rts.reg_transforms[0],
rts.reg_transforms[1],
rts.reg_transforms[2],
],
transform_seq_idx=[0, 1, 1],
)
rts_1.append(rts_2)
assert len(rts_1.reg_transforms) == 6
assert rts_1.transform_seq_idx == [0, 1, 1, 2, 3, 3]
| 2,473 | 27.767442 | 70 | py |
wsireg | wsireg-master/tests/test_im_read.py | import os
import numpy as np
import pytest
from wsireg.parameter_maps.preprocessing import ImagePreproParams
from wsireg.reg_images.loader import reg_image_loader
# private data logic borrowed from https://github.com/cgohlke/tifffile/tests/test_tifffile.py
HERE = os.path.dirname(__file__)
PRIVATE_DIR = os.path.join(HERE, "private_data")
SKIP_PRIVATE = False
REASON = "private data"
if not os.path.exists(PRIVATE_DIR):
SKIP_PRIVATE = True
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_rgb():
image_fp = os.path.join(PRIVATE_DIR, "czi_rgb.czi")
ri = reg_image_loader(image_fp, 1)
assert len(ri.shape) == 3
assert ri.shape[2] == 3
assert ri.im_dtype == np.uint8
assert ri.is_rgb is True
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_rgb_default_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "czi_rgb.czi")
ri = reg_image_loader(image_fp, 1)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_rgb_bf_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "czi_rgb.czi")
preprocessing = {"image_type": "BF"}
ri = reg_image_loader(image_fp, 1, preprocessing=preprocessing)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_rgb_bf_preprocess_default():
image_fp = os.path.join(PRIVATE_DIR, "czi_rgb.czi")
preprocessing = ImagePreproParams()
ri = reg_image_loader(image_fp, 1, preprocessing=preprocessing)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_mc():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.czi")
ri = reg_image_loader(image_fp, 1)
assert len(ri.shape) == 3
assert ri.shape[0] == 4
assert ri.shape[2] > 3
assert ri.im_dtype == np.uint16
assert ri.is_rgb is False
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_mc_default_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.czi")
ri = reg_image_loader(image_fp, 1)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
assert ri.reg_image.GetPixelID() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_mc_fl_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.czi")
preprocessing = {"image_type": "FL", "as_uint8": True}
ri = reg_image_loader(image_fp, 1, preprocessing=preprocessing)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
assert ri.reg_image.GetPixelID() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_mc_std_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.czi")
preprocessing = ImagePreproParams()
ri = reg_image_loader(image_fp, 1, preprocessing=preprocessing)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
assert ri.reg_image.GetPixelID() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_mc_selectch_preprocess_list():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.czi")
preprocessing = {"ch_indices": [0]}
ri = reg_image_loader(image_fp, 1, preprocessing=preprocessing)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
assert ri.reg_image.GetPixelID() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_mc_selectch_preprocess_int():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.czi")
preprocessing = {"ch_indices": 0}
ri = reg_image_loader(image_fp, 1, preprocessing=preprocessing)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
assert ri.reg_image.GetPixelID() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_mc_read_channels():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.czi")
ri = reg_image_loader(image_fp, 1)
ch0 = ri.read_single_channel(0)
ch1 = ri.read_single_channel(1)
ch2 = ri.read_single_channel(2)
ch3 = ri.read_single_channel(3)
assert np.squeeze(ch0).shape == ri.shape[1:]
assert np.squeeze(ch1).shape == ri.shape[1:]
assert np.squeeze(ch2).shape == ri.shape[1:]
assert np.squeeze(ch3).shape == ri.shape[1:]
assert np.ndim(ch0) == 6
assert np.ndim(ch1) == 6
assert np.ndim(ch2) == 6
assert np.ndim(ch3) == 6
assert np.mean(ch1) > 0
assert np.mean(ch2) > 0
assert np.mean(ch3) > 0
assert np.array_equal(ch0, ch1) is False
assert np.array_equal(ch0, ch2) is False
assert np.array_equal(ch0, ch3) is False
assert np.array_equal(ch1, ch2) is False
assert np.array_equal(ch1, ch3) is False
assert np.array_equal(ch2, ch3) is False
assert ch0.dtype == np.uint16
assert ch1.dtype == np.uint16
assert ch2.dtype == np.uint16
assert ch3.dtype == np.uint16
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_rgb_read_channels():
image_fp = os.path.join(PRIVATE_DIR, "czi_rgb.czi")
ri = reg_image_loader(image_fp, 1)
ch0 = ri.read_single_channel(0)
ch1 = ri.read_single_channel(1)
ch2 = ri.read_single_channel(2)
assert np.squeeze(ch0).shape == ri.shape[:2]
assert np.squeeze(ch1).shape == ri.shape[:2]
assert np.squeeze(ch2).shape == ri.shape[:2]
assert np.ndim(ch0) == 6
assert np.ndim(ch1) == 6
assert np.ndim(ch2) == 6
assert np.array_equal(ch0, ch1) is False
assert np.array_equal(ch0, ch2) is False
assert np.array_equal(ch1, ch2) is False
assert ch0.dtype == np.uint8
assert ch1.dtype == np.uint8
assert ch2.dtype == np.uint8
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_scn_read_rgb():
image_fp = os.path.join(PRIVATE_DIR, "scn_rgb.scn")
ri = reg_image_loader(image_fp, 1)
assert len(ri.shape) == 3
assert ri.shape[2] == 3
assert ri.im_dtype == np.uint8
assert ri.is_rgb is True
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_scn_read_rgb_default_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "scn_rgb.scn")
ri = reg_image_loader(image_fp, 1)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_scn_read_rgb_bf_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "scn_rgb.scn")
preprocessing = {"image_type": "BF"}
ri = reg_image_loader(image_fp, 1, preprocessing=preprocessing)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_huron_read_rgb():
image_fp = os.path.join(PRIVATE_DIR, "huron_rgb.tif")
ri = reg_image_loader(image_fp, 1)
assert len(ri.shape) == 3
assert ri.shape[2] == 3
assert ri.im_dtype == np.uint8
assert ri.is_rgb is True
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_huron_read_rgb_default_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "huron_rgb.tif")
ri = reg_image_loader(image_fp, 1)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_huron_read_rgb_bf_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "huron_rgb.tif")
preprocessing = {"image_type": "BF"}
ri = reg_image_loader(image_fp, 1, preprocessing=preprocessing)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_huron_read_rgb_bf_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "huron_rgb.tif")
preprocessing = {"image_type": "BF"}
ri = reg_image_loader(image_fp, 1, preprocessing=preprocessing)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_ometiff_read_rgb():
image_fp = os.path.join(PRIVATE_DIR, "czi_rgb.ome.tiff")
ri = reg_image_loader(image_fp, 1)
assert len(ri.shape) == 3
assert ri.im_dtype == np.uint8
assert ri.is_rgb is True
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_ometiff_read_rgb_default_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "czi_rgb.ome.tiff")
ri = reg_image_loader(image_fp, 1)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_ometiff_read_mc():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.ome.tiff")
ri = reg_image_loader(image_fp, 1)
assert len(ri.shape) == 3
assert ri.shape[0] == 4
assert ri.shape[2] > 3
assert ri.im_dtype == np.uint16
assert ri.is_rgb is False
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_ometiff_read_mc_default_preprocess():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.ome.tiff")
ri = reg_image_loader(image_fp, 1)
ri.read_reg_image()
assert ri.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_mc_read_channels():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.ome.tiff")
ri = reg_image_loader(image_fp, 1)
ch0 = ri.read_single_channel(0)
ch1 = ri.read_single_channel(1)
ch2 = ri.read_single_channel(2)
ch3 = ri.read_single_channel(3)
assert np.squeeze(ch0).shape == ri.shape[1:]
assert np.squeeze(ch1).shape == ri.shape[1:]
assert np.squeeze(ch2).shape == ri.shape[1:]
assert np.squeeze(ch3).shape == ri.shape[1:]
assert np.ndim(ch0) == 2
assert np.ndim(ch1) == 2
assert np.ndim(ch2) == 2
assert np.ndim(ch3) == 2
assert np.array_equal(ch0, ch1) is False
assert np.array_equal(ch0, ch2) is False
assert np.array_equal(ch0, ch3) is False
assert np.array_equal(ch1, ch2) is False
assert np.array_equal(ch1, ch3) is False
assert np.array_equal(ch2, ch3) is False
assert ch0.dtype == np.uint16
assert ch1.dtype == np.uint16
assert ch2.dtype == np.uint16
assert ch3.dtype == np.uint16
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_read_rgb_read_channels():
image_fp = os.path.join(PRIVATE_DIR, "czi_rgb.ome.tiff")
ri = reg_image_loader(image_fp, 1)
ch0 = ri.read_single_channel(0)
ch1 = ri.read_single_channel(1)
ch2 = ri.read_single_channel(2)
assert np.squeeze(ch0).shape == ri.shape[1:]
assert np.squeeze(ch1).shape == ri.shape[1:]
assert np.squeeze(ch2).shape == ri.shape[1:]
assert np.ndim(ch0) == 2
assert np.ndim(ch1) == 2
assert np.ndim(ch2) == 2
assert np.array_equal(ch0, ch1) is False
assert np.array_equal(ch0, ch2) is False
assert np.array_equal(ch1, ch2) is False
assert ch0.dtype == np.uint8
assert ch1.dtype == np.uint8
assert ch2.dtype == np.uint8
| 11,016 | 33.53605 | 93 | py |
wsireg | wsireg-master/tests/test_wsireg_config.py | import os
from pathlib import Path
from typing import Union
import cv2
import numpy as np
import pytest
import SimpleITK as sitk
from wsireg.reg_images.loader import reg_image_loader
from wsireg.reg_shapes import RegShapes
from wsireg.utils.config_utils import parse_check_reg_config
from wsireg.wsireg2d import WsiReg2D
HERE = os.path.dirname(__file__)
FIXTURES_DIR = os.path.join(HERE, "fixtures")
PRIVATE_DIR = os.path.join(HERE, "private_data")
config1_fp = str(Path(FIXTURES_DIR) / "test-config1.yaml")
config2_fp = str(Path(FIXTURES_DIR) / "test-config2.yaml")
config3_fp = str(Path(FIXTURES_DIR) / "test-config3.yaml")
config4_fp = str(Path(FIXTURES_DIR) / "test-config4.yaml")
config5_fp = str(Path(FIXTURES_DIR) / "test-config5.yaml")
config_prepro_fp = str(Path(FIXTURES_DIR) / "test-config1-prepro-test.yaml")
SKIP_PRIVATE = False
REASON = "private data"
if not os.path.exists(PRIVATE_DIR):
SKIP_PRIVATE = True
def geojson_to_binary_image(geojson_fp: Union[str, Path]) -> sitk.Image:
rs = RegShapes(geojson_fp)
shape_cv2 = [s["array"].astype(np.int32) for s in rs.shape_data]
x_max = np.max([np.max(s["array"][:, 0]) for s in rs.shape_data]).astype(
int
)
y_max = np.max([np.max(s["array"][:, 1]) for s in rs.shape_data]).astype(
int
)
binary_image = np.zeros((y_max + 200, x_max + 200), dtype=np.uint8)
return sitk.GetImageFromArray(cv2.fillPoly(binary_image, shape_cv2, 255))
def compute_dice(b1: sitk.Image, b2: sitk.Image):
b2 = sitk.Resample(
b2,
b1.GetSize(),
sitk.Transform(),
sitk.sitkNearestNeighbor,
b1.GetOrigin(),
b1.GetSpacing(),
b1.GetDirection(),
0,
b2.GetPixelID(),
)
labstats = sitk.LabelOverlapMeasuresImageFilter()
labstats.Execute(b1, b2)
return labstats.GetDiceCoefficient()
def config_to_WsiReg2D(config_filepath, output_dir):
reg_config = parse_check_reg_config(config_filepath)
reg_graph = WsiReg2D(
reg_config.get("project_name"),
output_dir,
reg_config.get("cache_images"),
)
return reg_graph
@pytest.fixture(scope="session")
def data_out_dir(tmpdir_factory):
out_dir = tmpdir_factory.mktemp("output")
return out_dir
@pytest.mark.parametrize("config_fp", [(config1_fp), (config2_fp)])
def test_wsireg_configs(config_fp, data_out_dir):
wsi_reg = config_to_WsiReg2D(config_fp, data_out_dir)
wsi_reg.add_data_from_config(config_fp)
wsi_reg.register_images()
wsi_reg.save_transformations()
assert wsi_reg.output_dir == Path(str(data_out_dir))
# @pytest.mark.parametrize("config_fp", [(config1_fp), (config2_fp)])
# def test_wsireg_configs_fromcache(config_fp, data_out_dir):
# wsi_reg1 = config_to_WsiReg2D(config_fp, data_out_dir)
# wsi_reg1.add_data_from_config(config_fp)
# wsi_reg1.register_images()
#
# wsi_reg2 = config_to_WsiReg2D(config_fp, data_out_dir)
# wsi_reg2.add_data_from_config(config_fp)
# wsi_reg2.register_images()
#
# assert wsi_reg1.output_dir == Path(str(data_out_dir))
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
@pytest.mark.parametrize(
"config_fp",
[
(config3_fp),
],
)
def test_wsireg_config_full_exp_DICE(config_fp, data_out_dir):
wsi_reg1 = config_to_WsiReg2D(config_fp, data_out_dir)
wsi_reg1.add_data_from_config(config_fp)
wsi_reg1.register_images()
shape_fps = wsi_reg1.transform_shapes()
gt = geojson_to_binary_image(
"private_data/unreg_rois/VAN0006-LK-2-85-AF_preIMS_unregistered.geojson"
)
dice_vals = []
for shape in shape_fps:
test_mask = geojson_to_binary_image(shape)
dice_vals.append(compute_dice(gt, test_mask))
assert all(np.asarray(dice_vals) > 0.85)
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
@pytest.mark.parametrize(
"config_fp",
[
(config4_fp),
],
)
def test_wsireg_config_full_exp_DICE_ds(config_fp, data_out_dir):
wsi_reg1 = config_to_WsiReg2D(config_fp, data_out_dir)
wsi_reg1.add_data_from_config(config_fp)
wsi_reg1.register_images()
shape_fps = wsi_reg1.transform_shapes()
gt = geojson_to_binary_image(
"private_data/unreg_rois/VAN0006-LK-2-85-AF_preIMS_unregistered.geojson"
)
dice_vals = []
for shape in shape_fps:
test_mask = geojson_to_binary_image(shape)
dice_vals.append(compute_dice(gt, test_mask))
assert all(np.asarray(dice_vals) > 0.8)
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
@pytest.mark.parametrize(
"config_fp",
[
(config5_fp),
],
)
def test_wsireg_config_full_merge_rgb_mc(config_fp, data_out_dir):
wsi_reg1 = config_to_WsiReg2D(config_fp, data_out_dir)
wsi_reg1.add_data_from_config(config_fp)
wsi_reg1.register_images()
im_fps = wsi_reg1.transform_images()
ri = reg_image_loader(im_fps[0], 1)
assert ri.im_dtype == np.uint16
assert ri.im_dims == (9, 3993, 3397)
def test_save_config_round_trip(data_out_dir):
wsi_reg = config_to_WsiReg2D(config_prepro_fp, data_out_dir)
wsi_reg.add_data_from_config(config_prepro_fp)
config_out = wsi_reg.save_config()
wsi_reg_rt = config_to_WsiReg2D(config_out, data_out_dir)
wsi_reg_rt.add_data_from_config(config_out)
assert wsi_reg.modalities == wsi_reg_rt.modalities
assert wsi_reg.reg_paths == wsi_reg_rt.reg_paths
assert wsi_reg.transform_paths == wsi_reg_rt.transform_paths
| 5,460 | 28.518919 | 80 | py |
wsireg | wsireg-master/tests/conftest.py | import pytest
from tests.fixtures.im_fixtures import (
dask_im_gry_np,
dask_im_mch_np,
dask_im_rgb_np,
disk_im_gry,
disk_im_gry_pyr,
disk_im_mch,
disk_im_mch_notile,
disk_im_mch_pyr,
disk_im_rgb,
disk_im_rgb_pyr,
im_gry_np,
im_mch_np,
im_rgb_np,
im_rgb_np_uneven,
mask_np,
mask_geojson,
zarr_im_gry_np,
zarr_im_mch_np,
zarr_im_rgb_np,
)
from tests.fixtures.transform_fixtures import (
complex_transform,
complex_transform_larger,
complex_transform_larger_padded,
simple_transform_affine,
simple_transform_affine_large_output,
simple_transform_affine_nl,
simple_transform_affine_nl_large_output,
)
| 702 | 20.30303 | 47 | py |
wsireg | wsireg-master/tests/test_reg_image.py | import os
import itk
import pytest
import SimpleITK as sitk
from wsireg.reg_images.loader import reg_image_loader
HERE = os.path.dirname(__file__)
GEOJSON_FP = os.path.join(HERE, "fixtures/polygons.geojson")
@pytest.mark.usefixtures("disk_im_mch")
def test_reg_image_loader_image_fp_mc_std_prepro(disk_im_mch):
reg_image = reg_image_loader(str(disk_im_mch), 0.65)
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("disk_im_rgb")
def test_reg_image_loader_image_fp_rgb_std_prepro(disk_im_rgb):
reg_image = reg_image_loader(str(disk_im_rgb), 0.65)
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("disk_im_gry")
def test_reg_image_loader_image_fp_gry_std_prepro(disk_im_gry):
reg_image = reg_image_loader(str(disk_im_gry), 0.65)
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_mch_np")
def test_reg_image_loader_image_np_mc_std_prepro(im_mch_np):
reg_image = reg_image_loader(im_mch_np, 0.65)
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.reg_image.GetDepth() == 0
@pytest.mark.usefixtures("im_rgb_np")
def test_reg_image_loader_image_np_rgb_std_prepro(im_rgb_np):
reg_image = reg_image_loader(im_rgb_np, 0.65)
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.usefixtures("im_gry_np")
def test_reg_image_loader_image_np_gry_std_prepro(im_gry_np):
reg_image = reg_image_loader(im_gry_np, 0.65)
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.usefixtures("im_mch_np")
def test_reg_image_loader_image_np_mc_std_prepro_rot(im_mch_np):
reg_image = reg_image_loader(im_mch_np, 0.65, preprocessing={"rot_cc": 90})
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.reg_image.GetDepth() == 0
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_rgb_np")
def test_reg_image_loader_image_np_rgb_std_prepro_rot(im_rgb_np):
reg_image = reg_image_loader(im_rgb_np, 0.65, preprocessing={"rot_cc": 90})
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np")
def test_reg_image_loader_image_np_gry_std_prepro_rot(im_gry_np):
reg_image = reg_image_loader(im_gry_np, 0.65, preprocessing={"rot_cc": 90})
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_mch_np")
def test_reg_image_loader_image_np_mc_std_prepro_fliph(im_mch_np):
reg_image = reg_image_loader(im_mch_np, 0.65, preprocessing={"flip": "h"})
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.reg_image.GetDepth() == 0
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_rgb_np")
def test_reg_image_loader_image_np_rgb_std_prepro_fliph(im_rgb_np):
reg_image = reg_image_loader(im_rgb_np, 0.65, preprocessing={"flip": "h"})
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np")
def test_reg_image_loader_image_np_gry_std_prepro_fliph(im_gry_np):
reg_image = reg_image_loader(im_gry_np, 0.65, preprocessing={"flip": "h"})
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 1
@pytest.mark.usefixtures("im_mch_np")
def test_reg_image_loader_image_np_mc_std_prepro_flipv(im_mch_np):
reg_image = reg_image_loader(im_mch_np, 0.65, preprocessing={"flip": "v"})
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.reg_image.GetDepth() == 0
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_rgb_np")
def test_reg_image_loader_image_np_rgb_std_prepro_flipv(im_rgb_np):
reg_image = reg_image_loader(im_rgb_np, 0.65, preprocessing={"flip": "v"})
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np")
def test_reg_image_loader_image_np_gry_std_prepro_flipv(im_gry_np):
reg_image = reg_image_loader(im_gry_np, 0.65, preprocessing={"flip": "v"})
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_mch_np")
def test_reg_image_loader_image_np_mc_std_prepro_rot_flipv(im_mch_np):
reg_image = reg_image_loader(
im_mch_np, 0.65, preprocessing={"rot_cc": 90, "flip": "v"}
)
reg_image.read_reg_image()
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.reg_image.GetDepth() == 0
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 2
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np")
def test_reg_image_loader_image_np_gry_std_prepro_flipv(im_gry_np):
reg_image = reg_image_loader(im_gry_np, 0.65, preprocessing={"flip": "v"})
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (2048, 2048)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.pre_reg_transforms is not None
assert len(reg_image.pre_reg_transforms) == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np", "mask_np")
def test_reg_image_loader_mask(im_gry_np, mask_np):
reg_image = reg_image_loader(im_gry_np, 0.65, mask=mask_np)
reg_image.read_reg_image()
assert reg_image.mask is not None
assert isinstance(reg_image.mask, sitk.Image) is True
assert reg_image.mask.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np", "mask_np")
def test_reg_image_loader_mask_rot(im_gry_np, mask_np):
reg_image = reg_image_loader(
im_gry_np, 0.65, preprocessing={"rot_cc": 90}, mask=mask_np
)
reg_image.read_reg_image()
assert reg_image.mask is not None
assert isinstance(reg_image.mask, sitk.Image) is True
assert reg_image.mask.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np", "mask_np")
def test_reg_image_loader_mask_rot_flip(im_gry_np, mask_np):
reg_image = reg_image_loader(
im_gry_np,
0.65,
preprocessing={"rot_cc": 90, "flip": "h"},
mask=mask_np,
)
reg_image.read_reg_image()
assert reg_image.mask is not None
assert isinstance(reg_image.mask, sitk.Image) is True
assert reg_image.mask.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np", "mask_np")
def test_reg_image_loader_mask_flip(im_gry_np, mask_np):
reg_image = reg_image_loader(
im_gry_np, 0.65, preprocessing={"flip": "v"}, mask=mask_np
)
reg_image.read_reg_image()
assert reg_image.mask is not None
assert isinstance(reg_image.mask, sitk.Image) is True
assert reg_image.mask.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np", "mask_np")
def test_reg_image_loader_downsampling(im_gry_np, mask_np):
reg_image = reg_image_loader(
im_gry_np, 1, preprocessing={"downsampling": 2}, mask=mask_np
)
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (1024, 1024)
assert reg_image.mask.GetSize() == (1024, 1024)
assert reg_image.mask.GetSpacing() == (2, 2)
assert reg_image.reg_image.GetSpacing() == (2, 2)
@pytest.mark.usefixtures("im_gry_np", "mask_np")
def test_reg_image_loader_to_itk(im_gry_np, mask_np):
reg_image = reg_image_loader(im_gry_np, 0.65, mask=mask_np)
reg_image.read_reg_image()
reg_image.reg_image_sitk_to_itk(cast_to_float32=True)
assert isinstance(reg_image.reg_image, itk.Image) is True
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
assert isinstance(reg_image.mask, itk.Image) is True
assert reg_image.mask.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np", "mask_geojson")
def test_gj_reg_image_loader_mask(im_gry_np, mask_geojson):
reg_image = reg_image_loader(im_gry_np, 0.65, mask=mask_geojson)
reg_image.read_reg_image()
assert reg_image.mask is not None
assert isinstance(reg_image.mask, sitk.Image) is True
assert reg_image.mask.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np", "mask_geojson")
def test_gj_reg_image_loader_mask_rot(im_gry_np, mask_geojson):
reg_image = reg_image_loader(
im_gry_np, 0.65, preprocessing={"rot_cc": 90}, mask=mask_geojson
)
reg_image.read_reg_image()
assert reg_image.mask is not None
assert isinstance(reg_image.mask, sitk.Image) is True
assert reg_image.mask.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np", "mask_geojson")
def test_gj_reg_image_loader_mask_rot_flip(im_gry_np, mask_geojson):
reg_image = reg_image_loader(
im_gry_np,
0.65,
preprocessing={"rot_cc": 90, "flip": "h"},
mask=mask_geojson,
)
reg_image.read_reg_image()
assert reg_image.mask is not None
assert isinstance(reg_image.mask, sitk.Image) is True
assert reg_image.mask.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np", "mask_geojson")
def test_gj_reg_image_loader_mask_flip(im_gry_np, mask_geojson):
reg_image = reg_image_loader(
im_gry_np, 0.65, preprocessing={"flip": "v"}, mask=mask_geojson
)
reg_image.read_reg_image()
assert reg_image.mask is not None
assert isinstance(reg_image.mask, sitk.Image) is True
assert reg_image.mask.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("im_gry_np", "mask_geojson")
def test_gj_reg_image_loader_downsampling(im_gry_np, mask_geojson):
reg_image = reg_image_loader(
im_gry_np, 1, preprocessing={"downsampling": 2}, mask=mask_geojson
)
reg_image.read_reg_image()
assert reg_image.reg_image.GetSize() == (1024, 1024)
assert reg_image.mask.GetSize() == (1024, 1024)
assert reg_image.mask.GetSpacing() == (2, 2)
assert reg_image.reg_image.GetSpacing() == (2, 2)
@pytest.mark.usefixtures("im_gry_np", "mask_geojson")
def test_gj_reg_image_loader_to_itk(im_gry_np, mask_geojson):
reg_image = reg_image_loader(im_gry_np, 0.65, mask=mask_geojson)
reg_image.read_reg_image()
reg_image.reg_image_sitk_to_itk(cast_to_float32=True)
assert isinstance(reg_image.reg_image, itk.Image) is True
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
assert isinstance(reg_image.mask, itk.Image) is True
assert reg_image.mask.GetSpacing() == (0.65, 0.65)
@pytest.mark.usefixtures("dask_im_rgb_np")
def test_reg_image_loader_dask_rgb(dask_im_rgb_np):
reg_image = reg_image_loader(dask_im_rgb_np, 0.65)
reg_image.read_reg_image()
assert len(reg_image.shape) == 3
assert reg_image.shape[-1] == 3
assert reg_image.is_rgb
assert reg_image.n_ch == 3
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.usefixtures("dask_im_gry_np")
def test_reg_image_loader_dask_gry(dask_im_gry_np):
reg_image = reg_image_loader(dask_im_gry_np, 0.65)
reg_image.read_reg_image()
assert len(reg_image.shape) == 3
assert reg_image.shape[0] == 1
assert reg_image.is_rgb is False
assert reg_image.n_ch == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.usefixtures("dask_im_mch_np")
def test_reg_image_loader_dask_mch(dask_im_mch_np):
reg_image = reg_image_loader(dask_im_mch_np, 0.65)
reg_image.read_reg_image()
assert len(reg_image.shape) == 3
assert reg_image.shape[0] == 3
assert reg_image.is_rgb is False
assert reg_image.n_ch == 3
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.usefixtures("zarr_im_rgb_np")
def test_reg_image_loader_zarr_rgb(zarr_im_rgb_np):
reg_image = reg_image_loader(zarr_im_rgb_np, 0.65)
reg_image.read_reg_image()
assert len(reg_image.shape) == 3
assert reg_image.shape[-1] == 3
assert reg_image.is_rgb
assert reg_image.n_ch == 3
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.usefixtures("zarr_im_gry_np")
def test_reg_image_loader_zarr_gry(zarr_im_gry_np):
reg_image = reg_image_loader(zarr_im_gry_np, 0.65)
reg_image.read_reg_image()
assert len(reg_image.shape) == 3
assert reg_image.shape[0] == 1
assert reg_image.is_rgb is False
assert reg_image.n_ch == 1
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.usefixtures("zarr_im_mch_np")
def test_reg_image_loader_zarr_mch(zarr_im_mch_np):
reg_image = reg_image_loader(zarr_im_mch_np, 0.65)
reg_image.read_reg_image()
assert len(reg_image.shape) == 3
assert reg_image.shape[0] == 3
assert reg_image.is_rgb is False
assert reg_image.n_ch == 3
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
@pytest.mark.usefixtures("im_rgb_np_uneven")
def test_reg_image_loader_zarr_mch(im_rgb_np_uneven):
reg_image = reg_image_loader(im_rgb_np_uneven, 0.65, mask=GEOJSON_FP)
reg_image.read_reg_image()
assert reg_image.reg_image.GetSpacing() == (0.65, 0.65)
assert reg_image.reg_image.GetNumberOfComponentsPerPixel() == 1
assert reg_image.mask.GetSize() == reg_image.reg_image.GetSize()
assert reg_image.mask.GetSpacing() == reg_image.reg_image.GetSpacing()
| 16,398 | 37.768322 | 79 | py |
wsireg | wsireg-master/tests/test_shapes.py | import os
from copy import deepcopy
import numpy as np
import pytest
from wsireg.reg_shapes import RegShapes
HERE = os.path.dirname(__file__)
GEOJSON_FP = os.path.join(HERE, "fixtures/polygons.geojson")
@pytest.mark.usefixtures("complex_transform")
def test_RegShapes_transform(complex_transform):
rs = RegShapes(GEOJSON_FP)
shape0 = deepcopy(rs.shape_data[0])
rs.transform_shapes(complex_transform)
shape0_tform = rs.transformed_shape_data[0]
assert np.array_equal(shape0["array"], shape0_tform["array"]) is False
def test_RegShapes_shape_types_numpy():
points = np.random.randint(1, 100, (2, 20))
shape_data = []
for point in points.transpose():
shape_data.append(
{
"array": point,
"shape_type": 'point',
"shape_name": 'point',
}
)
ri = RegShapes(shape_data)
assert len(ri.shape_data_gj[0]["geometry"]["coordinates"]) == 2
assert ri.shape_data_gj[0]["geometry"]["type"] == "Point"
shape_data = [
{
"array": points,
"shape_type": 'multipoint',
"shape_name": 'multipoint',
}
]
ri = RegShapes(shape_data)
assert len(ri.shape_data_gj[0]["geometry"]["coordinates"]) == 20
assert ri.shape_data_gj[0]["geometry"]["type"] == "MultiPoint"
shape_data = [
{
"array": points,
"shape_type": 'linestring',
"shape_name": 'linestring',
}
]
ri = RegShapes(shape_data)
assert len(ri.shape_data_gj[0]["geometry"]["coordinates"]) == 20
assert ri.shape_data_gj[0]["geometry"]["type"] == "LineString"
def test_RegShapes_drawmask():
triangles = [
np.array([[11, 13], [111, 113], [22, 246]]),
np.array([[11, 13], [111, 113], [22, 246]]) * 2,
]
rs = RegShapes(triangles)
bin_mask = rs.draw_mask((512, 512), labels=False)
assert np.sum(bin_mask) > 0
lab_mask = rs.draw_mask((512, 512), labels=True)
# 0 = bg
assert len(np.unique(lab_mask)) == 3
| 2,064 | 25.474359 | 74 | py |
wsireg | wsireg-master/tests/test_im_utils.py | import os
import dask.array as da
import numpy as np
import pytest
import zarr
from tifffile import imread
from wsireg.parameter_maps.preprocessing import ImagePreproParams
from wsireg.utils.im_utils import (
CziRegImageReader,
czi_tile_grayscale,
ensure_dask_array,
get_sitk_image_info,
get_tifffile_info,
grayscale,
guess_rgb,
read_preprocess_array,
sitk_backend,
tf_get_largest_series,
tifffile_dask_backend,
tifffile_zarr_backend,
zarr_get_base_pyr_layer,
)
# private data logic borrowed from https://github.com/cgohlke/tifffile/tests/test_tifffile.py
HERE = os.path.dirname(__file__)
PRIVATE_DIR = os.path.join(HERE, "private_data")
SKIP_PRIVATE = False
REASON = "private data"
if not os.path.exists(PRIVATE_DIR):
SKIP_PRIVATE = True
@pytest.mark.usefixtures(
"disk_im_gry_pyr",
"disk_im_mch_pyr",
"disk_im_rgb_pyr",
"disk_im_gry",
"disk_im_mch",
"disk_im_rgb",
)
def test_zarr_get_base_pyr_layer(
disk_im_gry_pyr,
disk_im_mch_pyr,
disk_im_rgb_pyr,
disk_im_gry,
disk_im_mch,
disk_im_rgb,
):
zarr_store = zarr.open(imread(disk_im_gry_pyr, aszarr=True))
gry_zarray_from_pyr = zarr_get_base_pyr_layer(zarr_store)
zarr_store = zarr.open(imread(disk_im_mch_pyr, aszarr=True))
mch_zarray_from_pyr = zarr_get_base_pyr_layer(zarr_store)
zarr_store = zarr.open(imread(disk_im_rgb_pyr, aszarr=True))
rgb_zarray_from_pyr = zarr_get_base_pyr_layer(zarr_store)
zarr_store = zarr.open(imread(disk_im_gry, aszarr=True))
gry_zarray_from_flat = zarr_get_base_pyr_layer(zarr_store)
zarr_store = zarr.open(imread(disk_im_mch, aszarr=True))
mch_zarray_from_flat = zarr_get_base_pyr_layer(zarr_store)
zarr_store = zarr.open(imread(disk_im_rgb, aszarr=True))
rgb_zarray_from_flat = zarr_get_base_pyr_layer(zarr_store)
assert gry_zarray_from_pyr.shape == (2048, 2048)
assert mch_zarray_from_pyr.shape == (3, 2048, 2048)
assert rgb_zarray_from_pyr.shape == (2048, 2048, 3)
assert gry_zarray_from_flat.shape == (2048, 2048)
assert mch_zarray_from_flat.shape == (3, 2048, 2048)
assert rgb_zarray_from_flat.shape == (2048, 2048, 3)
def test_ensure_dask_array():
np_arr = np.zeros((128, 128, 3), dtype=np.uint8)
da_arr = da.zeros((128, 128, 3), dtype=np.uint8)
za_arr = zarr.zeros((128, 128, 3), dtype=np.uint8)
np_out = ensure_dask_array(np_arr)
da_out = ensure_dask_array(da_arr)
za_out = ensure_dask_array(za_arr)
assert isinstance(np_out, da.Array)
assert isinstance(da_out, da.Array)
assert isinstance(za_out, da.Array)
def test_read_preprocess_array():
da_arr = ensure_dask_array(np.zeros((128, 128, 3), dtype=np.uint8))
mc_arr = ensure_dask_array(np.zeros((3, 128, 128), dtype=np.uint8))
gr_arr = ensure_dask_array(np.zeros((128, 128), dtype=np.uint8))
std_rgb = read_preprocess_array(
da_arr,
preprocessing=ImagePreproParams(image_type="BF"),
force_rgb=None,
)
nonstd_rgb = read_preprocess_array(
mc_arr,
preprocessing=ImagePreproParams(image_type="BF"),
force_rgb=True,
)
all_ch_mc = read_preprocess_array(
mc_arr, preprocessing=ImagePreproParams(image_type="FL")
)
ch0_ch_mc = read_preprocess_array(
mc_arr, preprocessing=ImagePreproParams(ch_indices=[0])
)
ch01_ch_mc = read_preprocess_array(
mc_arr, preprocessing=ImagePreproParams(ch_indices=[0, 1])
)
ch12_ch_mc = read_preprocess_array(
mc_arr, preprocessing=ImagePreproParams(ch_indices=[1, 2])
)
ch02_ch_mc = read_preprocess_array(
mc_arr, preprocessing=ImagePreproParams(ch_indices=[0, 2])
)
ch1_ch_mc = read_preprocess_array(
mc_arr, preprocessing=ImagePreproParams(ch_indices=[1])
)
ch2_ch_mc = read_preprocess_array(
mc_arr, preprocessing=ImagePreproParams(ch_indices=[2])
)
std_gr = read_preprocess_array(gr_arr, ImagePreproParams(image_type="FL"))
chsel0_gr = read_preprocess_array(
gr_arr, preprocessing=ImagePreproParams(ch_indices=[0])
)
chsel01_gr = read_preprocess_array(
gr_arr, preprocessing=ImagePreproParams(ch_indices=[0, 1])
)
assert std_rgb.GetNumberOfComponentsPerPixel() == 1
assert nonstd_rgb.GetNumberOfComponentsPerPixel() == 1
assert all_ch_mc.GetDepth() == 3
assert ch0_ch_mc.GetDepth() == 0
assert ch01_ch_mc.GetDepth() == 2
assert ch12_ch_mc.GetDepth() == 2
assert ch02_ch_mc.GetDepth() == 2
assert ch1_ch_mc.GetDepth() == 0
assert ch2_ch_mc.GetDepth() == 0
assert std_gr.GetDepth() == 0
assert chsel0_gr.GetDepth() == 0
assert chsel01_gr.GetDepth() == 0
assert std_rgb.GetSize() == (128, 128)
assert nonstd_rgb.GetSize() == (128, 128)
assert all_ch_mc.GetSize() == (128, 128, 3)
assert ch0_ch_mc.GetSize() == (128, 128)
assert ch01_ch_mc.GetSize() == (128, 128, 2)
assert ch12_ch_mc.GetSize() == (128, 128, 2)
assert ch02_ch_mc.GetSize() == (128, 128, 2)
assert ch1_ch_mc.GetSize() == (128, 128)
assert ch2_ch_mc.GetSize() == (128, 128)
assert std_gr.GetSize() == (128, 128)
assert chsel0_gr.GetSize() == (128, 128)
assert chsel01_gr.GetSize() == (128, 128)
@pytest.mark.usefixtures(
"disk_im_gry_pyr",
"disk_im_mch_pyr",
"disk_im_rgb_pyr",
"disk_im_gry",
"disk_im_mch",
"disk_im_rgb",
)
def test_tifffile_zarr_backend(
disk_im_gry_pyr,
disk_im_mch_pyr,
disk_im_rgb_pyr,
disk_im_gry,
disk_im_mch,
disk_im_rgb,
):
im_gry_pyr = tifffile_zarr_backend(
disk_im_gry_pyr,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_mch_pyr = tifffile_zarr_backend(
disk_im_mch_pyr,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_rgb_pyr = tifffile_zarr_backend(
disk_im_rgb_pyr,
largest_series=0,
preprocessing=ImagePreproParams(image_type="BF"),
)
im_gry = tifffile_zarr_backend(
disk_im_gry,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_mch = tifffile_zarr_backend(
disk_im_mch,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_rgb = tifffile_zarr_backend(
disk_im_rgb,
largest_series=0,
preprocessing=ImagePreproParams(image_type="BF"),
)
assert im_gry_pyr.GetSize() == (2048, 2048)
assert im_mch_pyr.GetSize() == (2048, 2048, 3)
assert im_rgb_pyr.GetSize() == (2048, 2048)
assert im_gry.GetSize() == (2048, 2048)
assert im_mch.GetSize() == (2048, 2048, 3)
assert im_rgb.GetSize() == (2048, 2048)
@pytest.mark.usefixtures(
"disk_im_gry_pyr",
"disk_im_mch_pyr",
"disk_im_rgb_pyr",
"disk_im_gry",
"disk_im_mch",
"disk_im_rgb",
)
def test_tifffile_dask_backend(
disk_im_gry_pyr,
disk_im_mch_pyr,
disk_im_rgb_pyr,
disk_im_gry,
disk_im_mch,
disk_im_rgb,
):
im_gry_pyr = tifffile_dask_backend(
disk_im_gry_pyr,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_mch_pyr = tifffile_dask_backend(
disk_im_mch_pyr,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_rgb_pyr = tifffile_dask_backend(
disk_im_rgb_pyr,
largest_series=0,
preprocessing=ImagePreproParams(image_type="BF"),
)
im_gry = tifffile_dask_backend(
disk_im_gry,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_mch = tifffile_dask_backend(
disk_im_mch,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_rgb = tifffile_dask_backend(
disk_im_rgb,
largest_series=0,
preprocessing=ImagePreproParams(image_type="BF"),
)
assert im_gry_pyr.GetSize() == (2048, 2048)
assert im_mch_pyr.GetSize() == (2048, 2048, 3)
assert im_rgb_pyr.GetSize() == (2048, 2048)
assert im_gry.GetSize() == (2048, 2048)
assert im_mch.GetSize() == (2048, 2048, 3)
assert im_rgb.GetSize() == (2048, 2048)
@pytest.mark.usefixtures(
"disk_im_gry_pyr",
"disk_im_mch_pyr",
"disk_im_rgb_pyr",
"disk_im_gry",
"disk_im_mch",
"disk_im_rgb",
)
def test_tifffile_dask_backend(
disk_im_gry_pyr,
disk_im_mch_pyr,
disk_im_rgb_pyr,
disk_im_gry,
disk_im_mch,
disk_im_rgb,
):
im_gry_pyr = tifffile_dask_backend(
disk_im_gry_pyr,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_mch_pyr = tifffile_dask_backend(
disk_im_mch_pyr,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_rgb_pyr = tifffile_dask_backend(
disk_im_rgb_pyr,
largest_series=0,
preprocessing=ImagePreproParams(image_type="BF"),
)
im_gry = tifffile_dask_backend(
disk_im_gry,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_mch = tifffile_dask_backend(
disk_im_mch,
largest_series=0,
preprocessing=ImagePreproParams(image_type="FL"),
)
im_rgb = tifffile_dask_backend(
disk_im_rgb,
largest_series=0,
preprocessing=ImagePreproParams(image_type="BF"),
)
assert im_gry_pyr.GetSize() == (2048, 2048)
assert im_mch_pyr.GetSize() == (2048, 2048, 3)
assert im_rgb_pyr.GetSize() == (2048, 2048)
assert im_gry.GetSize() == (2048, 2048)
assert im_mch.GetSize() == (2048, 2048, 3)
assert im_rgb.GetSize() == (2048, 2048)
@pytest.mark.usefixtures(
"disk_im_gry",
"disk_im_mch_notile",
"disk_im_rgb",
)
def test_sitk_backend(
disk_im_gry,
disk_im_mch_notile,
disk_im_rgb,
):
im_gry = sitk_backend(
str(disk_im_gry), preprocessing=ImagePreproParams(image_type="FL")
)
# im_mch = sitk_backend(str(disk_im_mch_notile), preprocessing={"image_type":"FL"})
im_rgb = sitk_backend(
str(disk_im_rgb), preprocessing=ImagePreproParams(image_type="BF")
)
assert im_gry.GetSize() == (2048, 2048)
# assert im_mch.GetSize() == (2048,2048,3)
assert im_rgb.GetSize() == (2048, 2048)
def test_guess_rgb():
imshape_gs = (2048, 2048)
imshape_mch = (3, 2048, 2048)
imshape_rgb = (2048, 2048, 3)
imshape_rgba = (2048, 2048, 4)
assert guess_rgb(imshape_gs) is False
assert guess_rgb(imshape_mch) is False
assert guess_rgb(imshape_rgb) is True
assert guess_rgb(imshape_rgba) is True
def test_greyscale():
rgb_image_il = np.ones((2048, 2048, 3), dtype=np.uint8)
rgb_image_noil = np.ones((3, 2048, 2048), dtype=np.uint8)
da_rgb_image_il = da.ones(
(2048, 2048, 3), dtype=np.uint8, chunks=(512, 512, 3)
)
da_rgb_image_noil = da.ones(
(3, 2048, 2048), dtype=np.uint8, chunks=(512, 512, 3)
)
gs_il = grayscale(rgb_image_il, is_interleaved=True)
gs_noil = grayscale(rgb_image_noil, is_interleaved=False)
da_gs_il = grayscale(da_rgb_image_il, is_interleaved=True)
da_gs_noil = grayscale(da_rgb_image_noil, is_interleaved=False)
assert gs_il.shape == (2048, 2048)
assert gs_noil.shape == (2048, 2048)
assert np.array_equal(gs_il, gs_noil)
assert da_gs_il.shape == (2048, 2048)
assert da_gs_noil.shape == (2048, 2048)
assert np.array_equal(da_gs_il.compute(), da_gs_noil.compute())
# PRIVATE_DIR = "tests/private_data"
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_czi_tile_greyscale():
image_fp = os.path.join(PRIVATE_DIR, "czi_rgb.czi")
czi = CziRegImageReader(image_fp)
fsbd = czi.filtered_subblock_directory[5]
subblock = fsbd.data_segment()
tile = subblock.data(resize=False, order=0)
tile_gs = czi_tile_grayscale(tile)
assert tile.shape[:5] == tile_gs.shape[:5]
assert tile_gs.shape[-1] == 1
assert tile_gs.dtype == np.uint8
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_CziRegImageReader_rgb():
image_fp = os.path.join(PRIVATE_DIR, "czi_rgb.czi")
czi = CziRegImageReader(image_fp)
gs_out = czi.sub_asarray_rgb(greyscale=True)
rgb_out = czi.sub_asarray_rgb(greyscale=False)
assert len(np.squeeze(gs_out).shape) == 2
assert len(np.squeeze(rgb_out).shape) == 3
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_CziRegImageReader_mc():
image_fp = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.czi")
czi = CziRegImageReader(image_fp)
mc_out = czi.asarray(max_workers=1)
ch0_out = czi.sub_asarray(channel_idx=[0], max_workers=1)
ch1_out = czi.sub_asarray(channel_idx=[1], max_workers=1)
ch2_out = czi.sub_asarray(channel_idx=[2], max_workers=1)
ch3_out = czi.sub_asarray(channel_idx=[3], max_workers=1)
ch02_out = czi.sub_asarray(channel_idx=[0, 2], max_workers=1)
ch13_out = czi.sub_asarray(channel_idx=[1, 3], max_workers=1)
ch03_out = czi.sub_asarray(channel_idx=[0, 3], max_workers=1)
ch12_out = czi.sub_asarray(channel_idx=[1, 2], max_workers=1)
ch23_out = czi.sub_asarray(channel_idx=[2, 3], max_workers=1)
assert np.squeeze(mc_out).shape == (4, 4305, 4194)
assert np.squeeze(ch0_out).shape == (4305, 4194)
assert np.squeeze(ch1_out).shape == (4305, 4194)
assert np.squeeze(ch1_out).shape == (4305, 4194)
assert np.squeeze(ch2_out).shape == (4305, 4194)
assert np.squeeze(ch3_out).shape == (4305, 4194)
assert np.squeeze(ch02_out).shape == (2, 4305, 4194)
assert np.array_equal(
np.squeeze(ch02_out), np.squeeze(mc_out)[[0, 2], :, :]
)
assert np.array_equal(
np.squeeze(ch13_out), np.squeeze(mc_out)[[1, 3], :, :]
)
assert np.array_equal(
np.squeeze(ch03_out), np.squeeze(mc_out)[[0, 3], :, :]
)
assert np.array_equal(
np.squeeze(ch12_out), np.squeeze(mc_out)[[1, 2], :, :]
)
assert np.array_equal(
np.squeeze(ch23_out), np.squeeze(mc_out)[[2, 3], :, :]
)
assert np.array_equal(np.squeeze(mc_out)[0, :, :], np.squeeze(ch0_out))
assert np.array_equal(np.squeeze(mc_out)[1, :, :], np.squeeze(ch1_out))
assert np.array_equal(np.squeeze(mc_out)[2, :, :], np.squeeze(ch2_out))
assert np.array_equal(np.squeeze(mc_out)[3, :, :], np.squeeze(ch3_out))
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_tf_get_largest_series():
image_fp_czi_mc = os.path.join(PRIVATE_DIR, "czi_4ch_16bit.ome.tiff")
image_fp_czi_rgb = os.path.join(PRIVATE_DIR, "czi_rgb.ome.tiff")
image_fp_bigtiff = os.path.join(PRIVATE_DIR, "huron_rgb.tif")
image_fp_scn = os.path.join(PRIVATE_DIR, "scn_rgb.scn")
assert tf_get_largest_series(image_fp_czi_mc) == 0
assert tf_get_largest_series(image_fp_czi_rgb) == 0
assert tf_get_largest_series(image_fp_bigtiff) == 0
assert tf_get_largest_series(image_fp_scn) == 1
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_get_sitk_image_info():
image_fp_jpgrgb = os.path.join(PRIVATE_DIR, "testjpegrgb.jpg")
image_fp_pngrgb = os.path.join(PRIVATE_DIR, "testpngrgb.png")
assert np.array_equal(
get_sitk_image_info(image_fp_jpgrgb)[0], [450, 750, 3]
)
assert get_sitk_image_info(image_fp_jpgrgb)[1] == np.uint8
assert np.array_equal(
get_sitk_image_info(image_fp_pngrgb)[0], [450, 750, 3]
)
assert get_sitk_image_info(image_fp_pngrgb)[1] == np.uint8
@pytest.mark.skipif(SKIP_PRIVATE, reason=REASON)
def test_get_tifffile_info_private():
image_fp_bigtiff = os.path.join(PRIVATE_DIR, "huron_rgb.tif")
image_fp_scn = os.path.join(PRIVATE_DIR, "scn_rgb.scn")
assert np.array_equal(
get_tifffile_info(image_fp_bigtiff)[0], [7662, 15778, 3]
)
assert get_tifffile_info(image_fp_bigtiff)[1] == np.uint8
assert np.array_equal(
get_tifffile_info(image_fp_scn)[0], [11776, 18528, 3]
)
assert get_tifffile_info(image_fp_scn)[1] == np.uint8
@pytest.mark.usefixtures(
"disk_im_gry_pyr",
"disk_im_mch_pyr",
"disk_im_rgb_pyr",
)
def test_get_tifffile_info_public(
disk_im_gry_pyr,
disk_im_mch_pyr,
disk_im_rgb_pyr,
):
assert np.array_equal(
get_tifffile_info(disk_im_rgb_pyr)[0], [2048, 2048, 3]
)
assert get_tifffile_info(disk_im_rgb_pyr)[1] == np.uint8
assert np.array_equal(
get_tifffile_info(disk_im_mch_pyr)[0], [3, 2048, 2048]
)
assert get_tifffile_info(disk_im_mch_pyr)[1] == np.uint16
assert np.array_equal(
get_tifffile_info(disk_im_gry_pyr)[0], [1, 2048, 2048]
)
assert get_tifffile_info(disk_im_gry_pyr)[1] == np.uint16
| 16,740 | 30.706439 | 93 | py |
wsireg | wsireg-master/tests/__init__.py | """Unit test package for wsireg."""
| 36 | 17.5 | 35 | py |
wsireg | wsireg-master/tests/test_writer.py | from pathlib import Path
import os
import random
import string
import numpy as np
import pytest
from tifffile import imread
import dask.array as da
from wsireg.reg_images.loader import reg_image_loader
from wsireg.reg_images.merge_reg_image import MergeRegImage
from wsireg.reg_transforms.reg_transform_seq import RegTransformSeq
from wsireg.writers.merge_ome_tiff_writer import MergeOmeTiffWriter
from wsireg.writers.ome_tiff_writer import OmeTiffWriter
from wsireg.writers.tiled_ome_tiff_writer import OmeTiffTiledWriter
HERE = os.path.dirname(__file__)
TFORM_FP = os.path.join(HERE, "fixtures/complex_linear_reg_transform.json")
def gen_project_name_str():
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(10))
@pytest.mark.usefixtures("complex_transform")
def test_OmeTiffWriter_by_plane(complex_transform, tmp_path):
reg_image = reg_image_loader(np.ones((1024, 1024), dtype=np.uint8), 1)
# composite_transform, _, final_transform = prepare_wsireg_transform_data(
# complex_transform
# )
rts = RegTransformSeq(complex_transform)
ometiffwriter = OmeTiffWriter(reg_image, reg_transform_seq=rts)
by_plane_fp = ometiffwriter.write_image_by_plane(
gen_project_name_str(),
output_dir=str(tmp_path),
)
by_plane_image = reg_image_loader(by_plane_fp, 2)
assert by_plane_image.shape == (1, 1024, 1024)
@pytest.mark.usefixtures("complex_transform")
def test_OmeTiffWriter_by_tile(complex_transform, tmp_path):
reg_image = reg_image_loader(np.ones((4096, 4096), dtype=np.uint8), 0.5)
rts = RegTransformSeq(complex_transform)
ometiffwriter = OmeTiffTiledWriter(reg_image, reg_transform_seq=rts)
by_tile_fp = ometiffwriter.write_image_by_tile(
gen_project_name_str(),
output_dir=str(tmp_path),
zarr_temp_dir=tmp_path,
)
by_tile_image = reg_image_loader(by_tile_fp, 2)
assert by_tile_image.shape == (1, 1024, 1024)
@pytest.mark.usefixtures("simple_transform_affine")
def test_OmeTiffWriter_compare_tile_plane(simple_transform_affine, tmp_path):
reg_image = reg_image_loader(np.ones((1024, 1024), dtype=np.uint8), 1)
rts = RegTransformSeq(simple_transform_affine)
ometifftilewriter = OmeTiffTiledWriter(reg_image, reg_transform_seq=rts)
ometiffwriter = OmeTiffWriter(reg_image, reg_transform_seq=rts)
by_tile_fp = ometifftilewriter.write_image_by_tile(
gen_project_name_str(),
output_dir=str(tmp_path),
zarr_temp_dir=tmp_path,
)
by_tile_image = reg_image_loader(by_tile_fp, 2)
by_plane_fp = ometiffwriter.write_image_by_plane(
gen_project_name_str(),
output_dir=str(tmp_path),
)
by_plane_image = reg_image_loader(by_plane_fp, 2)
ch0_im_tile = by_tile_image.read_single_channel(0)
ch0_im_plane = by_plane_image.read_single_channel(0)
assert by_plane_image.shape == by_tile_image.shape
assert np.array_equal(ch0_im_tile, ch0_im_plane)
@pytest.mark.usefixtures("simple_transform_affine")
def test_OmeTiffWriter_compare_tile_plane_rgb(
simple_transform_affine, tmp_path
):
reg_image = reg_image_loader(
np.random.randint(0, 255, (1024, 1024, 3), dtype=np.uint8), 1
)
rts = RegTransformSeq(simple_transform_affine)
ometiffwriter = OmeTiffWriter(reg_image, reg_transform_seq=rts)
ometiletiffwriter = OmeTiffTiledWriter(reg_image, reg_transform_seq=rts)
by_tile_fp = ometiletiffwriter.write_image_by_tile(
gen_project_name_str(),
output_dir=str(tmp_path),
zarr_temp_dir=tmp_path,
)
by_plane_fp = ometiffwriter.write_image_by_plane(
gen_project_name_str(),
output_dir=str(tmp_path),
)
Path(by_tile_fp).as_posix()
im_tile = imread(by_tile_fp)
im_plane = imread(by_plane_fp)
assert np.array_equal(im_tile, im_plane)
@pytest.mark.usefixtures("simple_transform_affine_nl")
def test_OmeTiffWriter_compare_tile_plane_rgb_nl(
simple_transform_affine_nl, tmp_path
):
reg_image = reg_image_loader(
np.random.randint(0, 255, (1024, 1024, 3), dtype=np.uint8), 1
)
rts = RegTransformSeq(simple_transform_affine_nl)
ometiffwriter = OmeTiffWriter(reg_image, reg_transform_seq=rts)
ometiletiffwriter = OmeTiffTiledWriter(reg_image, reg_transform_seq=rts)
by_tile_fp = ometiletiffwriter.write_image_by_tile(
gen_project_name_str(),
output_dir=str(tmp_path),
)
by_plane_fp = ometiffwriter.write_image_by_plane(
gen_project_name_str(),
output_dir=str(tmp_path),
)
im_tile = imread(by_tile_fp)
im_plane = imread(by_plane_fp)
assert np.array_equal(im_tile, im_plane)
@pytest.mark.usefixtures("simple_transform_affine")
def test_OmeTiffWriter_compare_tile_plane_mc(
simple_transform_affine, tmp_path
):
reg_image = reg_image_loader(
np.random.randint(0, 255, (3, 1024, 1024), dtype=np.uint8), 1
)
rts = RegTransformSeq(simple_transform_affine)
ometiffwriter = OmeTiffWriter(reg_image, reg_transform_seq=rts)
ometiletiffwriter = OmeTiffTiledWriter(reg_image, reg_transform_seq=rts)
by_tile_fp = ometiletiffwriter.write_image_by_tile(
gen_project_name_str(),
output_dir=str(tmp_path),
)
by_plane_fp = ometiffwriter.write_image_by_plane(
gen_project_name_str(),
output_dir=str(tmp_path),
)
im_tile = imread(by_tile_fp)
im_plane = imread(by_plane_fp)
assert np.array_equal(im_tile, im_plane)
@pytest.mark.usefixtures("simple_transform_affine_nl")
def test_OmeTiffWriter_compare_tile_plane_mc_nl(
simple_transform_affine_nl, tmp_path
):
reg_image = reg_image_loader(
np.random.randint(0, 255, (3, 1024, 1024), dtype=np.uint8), 1
)
rts = RegTransformSeq(simple_transform_affine_nl)
ometiffwriter = OmeTiffWriter(reg_image, reg_transform_seq=rts)
ometiletiffwriter = OmeTiffTiledWriter(reg_image, reg_transform_seq=rts)
by_tile_fp = ometiletiffwriter.write_image_by_tile(
gen_project_name_str(),
output_dir=str(tmp_path),
)
by_plane_fp = ometiffwriter.write_image_by_plane(
gen_project_name_str(),
output_dir=str(tmp_path),
)
im_tile = imread(by_tile_fp)
im_plane = imread(by_plane_fp)
assert np.array_equal(im_tile, im_plane)
def test_OmeTiffWriter_compare_tile_plane_mc_nl_large(tmp_path):
im_array = da.from_array(
np.random.randint(0, 255, (2, 2**13, 2**13), dtype=np.uint8),
chunks=(1, 1024, 1024),
)
reg_image = reg_image_loader(im_array, 0.5)
rts = RegTransformSeq(TFORM_FP)
ometiffwriter = OmeTiffWriter(reg_image, reg_transform_seq=rts)
ometiletiffwriter = OmeTiffTiledWriter(reg_image, reg_transform_seq=rts)
by_tile_fp = ometiletiffwriter.write_image_by_tile(
gen_project_name_str(),
output_dir=str(tmp_path),
)
by_plane_fp = ometiffwriter.write_image_by_plane(
gen_project_name_str(),
output_dir=str(tmp_path),
)
im_tile = imread(by_tile_fp)
im_plane = imread(by_plane_fp)
assert np.array_equal(im_tile, im_plane)
def test_OmeTiffWriter_compare_tile_plane_rgb_nl_large(tmp_path):
im_array = da.from_array(
np.random.randint(0, 255, (2**13, 2**13, 3), dtype=np.uint8),
chunks=(1024, 1024, 3),
)
reg_image = reg_image_loader(im_array, 0.5)
rts = RegTransformSeq(TFORM_FP)
ometiffwriter = OmeTiffWriter(reg_image, reg_transform_seq=rts)
ometiletiffwriter = OmeTiffTiledWriter(reg_image, reg_transform_seq=rts)
by_tile_fp = ometiletiffwriter.write_image_by_tile(
gen_project_name_str(),
output_dir=str(tmp_path),
)
by_plane_fp = ometiffwriter.write_image_by_plane(
gen_project_name_str(),
output_dir=str(tmp_path),
)
im_tile = imread(by_tile_fp)
im_plane = imread(by_plane_fp)
assert np.array_equal(im_tile, im_plane)
@pytest.mark.usefixtures("simple_transform_affine_nl")
def test_MergeOmeTiffWriter_mc(simple_transform_affine_nl, tmp_path):
reg_image1 = np.random.randint(0, 255, (3, 1024, 1024), dtype=np.uint8)
reg_image2 = np.random.randint(0, 255, (3, 1024, 1024), dtype=np.uint8)
reg_image3 = np.random.randint(0, 255, (3, 1024, 1024), dtype=np.uint8)
mreg_image = MergeRegImage(
[reg_image1, reg_image2, reg_image3],
[1, 1, 1],
channel_names=[["1", "2", "3"], ["1", "2", "3"], ["1", "2", "3"]],
)
rts = RegTransformSeq(simple_transform_affine_nl)
merge_ometiffwriter = MergeOmeTiffWriter(
mreg_image, reg_transform_seqs=[rts, rts, rts]
)
by_plane_fp = merge_ometiffwriter.merge_write_image_by_plane(
"merge_testimage_by_plane",
["1", "2", "3"],
output_dir=str(tmp_path),
)
im_plane = imread(by_plane_fp)
reg_image1_loaded = reg_image_loader(reg_image1, 1)
ometiffwriter = OmeTiffWriter(reg_image1_loaded, reg_transform_seq=rts)
by_plane_fp_s1 = ometiffwriter.write_image_by_plane(
"testimage_by_plane_s1",
output_dir=str(tmp_path),
)
im_plane_s1 = imread(by_plane_fp_s1)
reg_image2_loaded = reg_image_loader(reg_image2, 1)
ometiffwriter = OmeTiffWriter(reg_image2_loaded, reg_transform_seq=rts)
by_plane_fp_s2 = ometiffwriter.write_image_by_plane(
"testimage_by_plane_s2",
output_dir=str(tmp_path),
)
im_plane_s2 = imread(by_plane_fp_s2)
reg_image3_loaded = reg_image_loader(reg_image3, 1)
ometiffwriter = OmeTiffWriter(reg_image3_loaded, reg_transform_seq=rts)
by_plane_fp_s3 = ometiffwriter.write_image_by_plane(
"testimage_by_plane_s3",
output_dir=str(tmp_path),
)
im_plane_s3 = imread(by_plane_fp_s3)
assert im_plane.shape[0] == 9
assert np.array_equal(im_plane[0:3, :, :], im_plane_s1)
assert np.array_equal(im_plane[3:6, :, :], im_plane_s2)
assert np.array_equal(im_plane[6:9, :, :], im_plane_s3)
@pytest.mark.usefixtures("simple_transform_affine_nl")
def test_MergeOmeTiffWriter_mix_merge(simple_transform_affine_nl, tmp_path):
reg_image1 = np.random.randint(0, 255, (1024, 1024, 3), dtype=np.uint16)
reg_image2 = np.random.randint(0, 255, (3, 1024, 1024), dtype=np.uint16)
reg_image3 = np.random.randint(0, 255, (3, 1024, 1024), dtype=np.uint8)
mreg_image = MergeRegImage(
[reg_image1, reg_image2, reg_image3],
[1, 1, 1],
channel_names=[["1", "2", "3"], ["1", "2", "3"], ["1", "2", "3"]],
)
rts = RegTransformSeq(simple_transform_affine_nl)
merge_ometiffwriter = MergeOmeTiffWriter(
mreg_image, reg_transform_seqs=[rts, rts, rts]
)
by_plane_fp = merge_ometiffwriter.merge_write_image_by_plane(
"merge_testimage_by_plane_mix",
["1", "2", "3"],
output_dir=str(tmp_path),
)
im_plane = imread(by_plane_fp)
assert im_plane.shape[0] == 9
| 10,950 | 31.208824 | 78 | py |
wsireg | wsireg-master/tests/test_wsireg.py | import os
import random
import string
from pathlib import Path
import numpy as np
import pytest
from ome_types import from_xml
from tifffile import TiffFile, imread
import dask
from wsireg.parameter_maps.preprocessing import ImagePreproParams
from wsireg.reg_images.loader import reg_image_loader
from wsireg.wsireg2d import WsiReg2D
HERE = os.path.dirname(__file__)
GEOJSON_FP = os.path.join(HERE, "fixtures/polygons.geojson")
def gen_project_name_str():
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(10))
@pytest.fixture(scope="session")
def data_out_dir(tmpdir_factory):
out_dir = tmpdir_factory.mktemp("output")
return out_dir
@pytest.fixture(scope="session")
def data_im_fp(tmpdir_factory):
out_im = tmpdir_factory.mktemp("image").join("image_fp.tiff")
return out_im
def test_WsiReg2D_instantiation(data_out_dir):
pstr = gen_project_name_str()
wsi_reg = WsiReg2D(pstr, str(data_out_dir))
assert wsi_reg.project_name == pstr
assert wsi_reg.output_dir == Path(str(data_out_dir))
dask.config.set(scheduler="single-threaded")
def test_wsireg2d_add_modality_w_fp(data_out_dir, data_im_fp):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(data_im_fp)
wsi_reg.add_modality(
"test_mod",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
assert wsi_reg.modalities.get("test_mod").get("image_filepath") == img_fp1
assert wsi_reg.modalities.get("test_mod").get("image_res") == 0.65
assert wsi_reg.modalities.get("test_mod").get("channel_names") == ["test"]
assert wsi_reg.modalities.get("test_mod").get("channel_colors") == ["red"]
assert (
wsi_reg.modalities.get("test_mod").get("preprocessing")
== ImagePreproParams()
)
assert wsi_reg.modalities.get("test_mod").get("mask") == None
assert wsi_reg.n_modalities == 1
@pytest.mark.usefixtures("im_mch_np")
def test_wsireg2d_add_modality_w_np(data_out_dir, im_mch_np):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
wsi_reg.add_modality(
"test_mod",
im_mch_np,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
assert np.array_equal(
wsi_reg.modalities.get("test_mod").get("image_filepath"), im_mch_np
)
assert wsi_reg.modalities.get("test_mod").get("image_res") == 0.65
assert wsi_reg.modalities.get("test_mod").get("channel_names") == ["test"]
assert wsi_reg.modalities.get("test_mod").get("channel_colors") == ["red"]
assert (
wsi_reg.modalities.get("test_mod").get("preprocessing")
== ImagePreproParams()
)
assert wsi_reg.modalities.get("test_mod").get("mask") == None
assert wsi_reg.n_modalities == 1
def test_wsireg2d_add_modality_check_names(data_out_dir, data_im_fp):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(data_im_fp)
modality_name1 = "test_mod1"
modality_name2 = "test_mod2"
wsi_reg.add_modality(
modality_name1,
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
modality_name2,
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
assert modality_name1 in wsi_reg.modality_names
assert modality_name2 in wsi_reg.modality_names
assert wsi_reg.modality_names == ["test_mod1", "test_mod2"]
assert wsi_reg.n_modalities == 2
def test_wsireg2d_add_reg_path_single(data_out_dir, data_im_fp):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(data_im_fp)
modality_name1 = "test_mod1"
modality_name2 = "test_mod2"
wsi_reg.add_modality(
modality_name1,
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
modality_name2,
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path(modality_name1, modality_name2, reg_params=["rigid"])
assert wsi_reg.n_registrations == 1
assert wsi_reg.reg_paths.get(modality_name1) == [modality_name2]
assert (
wsi_reg.reg_graph_edges[0].get("modalities").get("target")
== modality_name2
)
def test_wsireg2d_add_modality_duplicated_error(data_out_dir, data_im_fp):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(data_im_fp)
wsi_reg.add_modality(
"test_mod",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
with pytest.raises(ValueError):
wsi_reg.add_modality(
"test_mod",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
def test_wsireg2d_add_merge_modality_notfound(data_out_dir, data_im_fp):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(data_im_fp)
wsi_reg.add_modality(
"test_mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
img_fp1 = str(data_im_fp)
wsi_reg.add_modality(
"test_mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
with pytest.raises(ValueError):
wsi_reg.add_merge_modalities("mergetest", ["test_mod1", "test_mod3"])
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path(
"mod1", "mod2", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.register_images()
wsi_reg.save_transformations()
im_fps = wsi_reg.transform_images(transform_non_reg=True)
assert Path(im_fps[0]).exists() is True
assert Path(im_fps[1]).exists() is True
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_with_crop(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={
"mask_bbox": [512, 512, 512, 512],
"crop_to_mask_bbox": True,
},
)
wsi_reg.add_reg_path(
"mod1", "mod2", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.register_images()
# not cropped
im_fps = wsi_reg.transform_images(
transform_non_reg=True, to_original_size=True
)
registered_image_nocrop = reg_image_loader(im_fps[0], 1)
unregistered_image_nocrop = reg_image_loader(im_fps[1], 1)
# crop image
im_fps = wsi_reg.transform_images(
transform_non_reg=True, to_original_size=False
)
wsi_reg.save_transformations()
#
registered_image_crop = reg_image_loader(im_fps[0], 1)
unregistered_image_crop = reg_image_loader(im_fps[1], 1)
#
assert registered_image_nocrop.shape[1:] == (2048, 2048)
assert unregistered_image_nocrop.shape[1:] == (2048, 2048)
assert registered_image_crop.shape[1:] == (512, 512)
assert unregistered_image_crop.shape[1:] == (512, 512)
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_with_flip_crop(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={
"mask_bbox": [512, 512, 512, 512],
"flip": "h",
"crop_to_mask_bbox": True,
},
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.register_images()
# not cropped
im_fps = wsi_reg.transform_images(
transform_non_reg=True, to_original_size=True
)
registered_image_nocrop = reg_image_loader(im_fps[0], 1)
unregistered_image_nocrop = reg_image_loader(im_fps[1], 1)
# crop image
im_fps = wsi_reg.transform_images(
transform_non_reg=True, to_original_size=False
)
wsi_reg.save_transformations()
registered_image_crop = reg_image_loader(im_fps[0], 1)
unregistered_image_crop = reg_image_loader(im_fps[1], 1)
assert registered_image_nocrop.shape[1:] == (2048, 2048)
assert unregistered_image_nocrop.shape[1:] == (2048, 2048)
assert registered_image_crop.shape[1:] == (512, 512)
assert unregistered_image_crop.shape[1:] == (512, 512)
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_with_crop_merge(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod3",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={
"mask_bbox": [512, 512, 512, 512],
"crop_to_mask_bbox": True,
},
)
wsi_reg.add_reg_path("mod1", "mod3", reg_params=["rigid_test"])
wsi_reg.add_reg_path("mod2", "mod3", reg_params=["rigid_test"])
wsi_reg.register_images()
wsi_reg.add_merge_modalities("merge", ["mod1", "mod2", "mod3"])
# not cropped
im_fps = wsi_reg.transform_images(
transform_non_reg=True, to_original_size=True
)
registered_image_nocrop = reg_image_loader(im_fps[0], 1)
# crop image
im_fps = wsi_reg.transform_images(
transform_non_reg=True, to_original_size=False
)
wsi_reg.save_transformations()
registered_image_crop = reg_image_loader(im_fps[0], 1)
assert registered_image_nocrop.shape[1:] == (2048, 2048)
assert registered_image_crop.shape[1:] == (512, 512)
def test_wsireg_run_reg_wmerge(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path(
"mod1", "mod2", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.add_merge_modalities("test_merge", ["mod1", "mod2"])
wsi_reg.register_images()
wsi_reg.save_transformations()
im_fps = wsi_reg.transform_images(transform_non_reg=True)
merged_im = reg_image_loader(im_fps[0], 0.65)
assert Path(im_fps[0]).exists() is True
assert merged_im.shape == (2, 2048, 2048)
def test_wsireg_run_reg_wmerge_and_indiv(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod3",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path(
"mod2", "mod1", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.add_reg_path(
"mod3", "mod1", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.add_merge_modalities("test_merge", ["mod1", "mod2"])
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(
remove_merged=False, transform_non_reg=True
)
merged_im = reg_image_loader(im_fps[-2], 0.65)
wsi_reg.save_transformations()
assert len(im_fps) == 4
assert Path(im_fps[0]).exists() is True
assert Path(im_fps[1]).exists() is True
assert Path(im_fps[2]).exists() is True
assert Path(im_fps[3]).exists() is True
assert merged_im.shape == (2, 2048, 2048)
def test_wsireg_run_reg_wattachment(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
im1 = np.random.randint(0, 255, (2048, 2048), dtype=np.uint16)
im2 = np.random.randint(0, 255, (2048, 2048), dtype=np.uint16)
wsi_reg.add_modality(
"mod1",
im1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod2",
im2,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_attachment_images("mod2", "attached", im2, image_res=0.65)
wsi_reg.add_attachment_images("mod1", "attached2", im1, image_res=0.65)
wsi_reg.add_reg_path(
"mod2", "mod1", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(transform_non_reg=False)
wsi_reg.save_transformations()
regim = reg_image_loader(im_fps[0], 0.65)
attachim = reg_image_loader(im_fps[1], 0.65)
attachim2 = reg_image_loader(im_fps[2], 0.65)
assert np.array_equal(
np.squeeze(regim.dask_image.compute()),
np.squeeze(attachim.dask_image.compute()),
)
assert np.array_equal(
np.squeeze(im1), np.squeeze(attachim2.dask_image.compute())
)
def test_wsireg_run_reg_wattachment_ds2(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
im1 = np.random.randint(0, 255, (2048, 2048), dtype=np.uint16)
im2 = np.random.randint(0, 255, (2048, 2048), dtype=np.uint16)
wsi_reg.add_modality(
"mod1",
im1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_modality(
"mod2",
im2,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_attachment_images("mod2", "attached", im2, image_res=0.65)
wsi_reg.add_attachment_images("mod1", "attached2", im1, image_res=0.65)
wsi_reg.add_reg_path(
"mod2", "mod1", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(transform_non_reg=False)
wsi_reg.save_transformations()
regim = reg_image_loader(im_fps[0], 0.65)
attachim = reg_image_loader(im_fps[1], 0.65)
attachim2 = reg_image_loader(im_fps[2], 0.65)
assert np.array_equal(
np.squeeze(regim.dask_image.compute()),
np.squeeze(attachim.dask_image.compute()),
)
assert np.array_equal(
np.squeeze(im1), np.squeeze(attachim2.dask_image.compute())
)
@pytest.mark.usefixtures("im_gry_np")
def test_wsireg_run_reg_shapes(data_out_dir, im_gry_np):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = im_gry_np
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path(
"mod1", "mod2", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.add_attachment_shapes("mod1", "shapeset", GEOJSON_FP)
wsi_reg.register_images()
wsi_reg.transform_shapes()
wsi_reg.save_transformations()
im_fps = wsi_reg.transform_images(transform_non_reg=False)
gj_files = sorted(Path(im_fps[0]).parent.glob("*.geojson"))
assert Path(im_fps[0]).exists() is True
assert len(gj_files) > 0
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_changeres(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
output_res=0.325,
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path(
"mod1", "mod2", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(transform_non_reg=False)
regim = reg_image_loader(im_fps[0], 0.325)
assert regim.shape[1:] == (4096, 4096)
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_downsampling_m1(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path(
"mod1", "mod2", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(transform_non_reg=False)
regim = reg_image_loader(im_fps[0], 0.65)
assert regim.shape[1:] == (2048, 2048)
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_downsampling_m1_prepro(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2, "rot_cc": 90},
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path(
"mod1", "mod2", reg_params=["rigid_test", "affine_test"]
)
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(transform_non_reg=False)
regim = reg_image_loader(im_fps[0], 0.65)
assert regim.shape[1:] == (2048, 2048)
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_downsampling_m1m2(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(transform_non_reg=False)
regim = reg_image_loader(im_fps[0], 0.65)
assert regim.shape[1:] == (2048, 2048)
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_downsampling_m1m2_changeores(
data_out_dir, disk_im_gry
):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
output_res=(1.3, 1.3),
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(transform_non_reg=False)
regim = reg_image_loader(im_fps[0], 0.65)
assert regim.shape[1:] == (1024, 1024)
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_downsampling_m2_prepro(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"rot_cc": 90, "downsampling": 2},
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(transform_non_reg=True)
regim = reg_image_loader(im_fps[1], 0.65)
assert regim.shape[1:] == (2048, 2048)
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_downsampling_m1m2_merge(data_out_dir, disk_im_gry):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2, "rot_cc": 90},
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"rot_cc": 90, "downsampling": 2},
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.add_merge_modalities("mod12-merge", ["mod1", "mod2"])
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(
transform_non_reg=True, remove_merged=True
)
regim = reg_image_loader(im_fps[0], 0.65)
assert regim.shape == (2, 2048, 2048)
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_downsampling_m1m2_merge_no_prepro(
data_out_dir, disk_im_gry
):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.add_merge_modalities("mod12-merge", ["mod1", "mod2"])
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(
transform_non_reg=False, remove_merged=True
)
regim = reg_image_loader(im_fps[0], 0.65)
assert regim.shape == (2, 2048, 2048)
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_downsampling_m1m2_merge_ds_attach(
data_out_dir, disk_im_gry
):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_attachment_images(
"mod2",
"mod3",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.add_merge_modalities("mod12-merge", ["mod1", "mod2", "mod3"])
wsi_reg.register_images()
im_fps = wsi_reg.transform_images(
transform_non_reg=False, remove_merged=True
)
regim = reg_image_loader(im_fps[0], 0.65)
ome_data = from_xml(TiffFile(im_fps[0]).ome_metadata)
assert regim.shape == (3, 2048, 2048)
assert ome_data.images[0].pixels.physical_size_x == 0.65
assert ome_data.images[0].pixels.physical_size_y == 0.65
assert ome_data.images[0].pixels.size_x == 2048
assert ome_data.images[0].pixels.size_y == 2048
assert ome_data.images[0].pixels.size_c == 3
@pytest.mark.usefixtures("disk_im_gry")
def test_wsireg_run_reg_downsampling_from_cache(data_out_dir, disk_im_gry):
pstr = gen_project_name_str()
wsi_reg = WsiReg2D(pstr, str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg.add_attachment_images(
"mod2",
"mod3",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.register_images()
im_fps_nocache = wsi_reg.transform_images(
transform_non_reg=True, remove_merged=True
)
regim_nocache = reg_image_loader(im_fps_nocache[0], 0.65)
regim_nocache_attach = reg_image_loader(im_fps_nocache[1], 0.65)
regim_nocache_br = reg_image_loader(im_fps_nocache[2], 0.65)
wsi_reg2 = WsiReg2D(pstr, str(data_out_dir))
img_fp1 = str(disk_im_gry)
wsi_reg2.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg2.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"downsampling": 2},
)
wsi_reg2.add_attachment_images(
"mod2",
"mod3",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg2.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg2.register_images()
im_fps_cache = wsi_reg.transform_images(
transform_non_reg=True, remove_merged=True
)
regim_cache = reg_image_loader(im_fps_cache[0], 0.65)
regim_cache_attach = reg_image_loader(im_fps_cache[1], 0.65)
regim_cache_br = reg_image_loader(im_fps_cache[2], 0.65)
assert regim_cache.shape == regim_nocache.shape
assert regim_cache_br.shape == regim_nocache_br.shape
assert regim_cache_attach.shape == regim_nocache_attach.shape
def test_wsireg_remove_modality(data_out_dir):
pstr = gen_project_name_str()
wsi_reg = WsiReg2D(pstr, str(data_out_dir))
wsi_reg.add_modality("preAF-IMS", "", 0.65)
wsi_reg.add_modality("preAF-MxIF", "", 0.65)
wsi_reg.add_modality("mxif1", "", 0.65)
wsi_reg.add_modality("mxif2", "", 0.65)
wsi_reg.add_modality("mxif3", "", 0.65)
wsi_reg.add_modality("pas", "", 0.65)
wsi_reg.add_reg_path("preAF-MxIF", "preAF-IMS", reg_params=["rigid"])
wsi_reg.add_reg_path(
"mxif1", "preAF-IMS", thru_modality="preAF-MxIF", reg_params=["rigid"]
)
wsi_reg.add_reg_path(
"mxif2", "preAF-IMS", thru_modality="mxif1", reg_params=["rigid"]
)
wsi_reg.add_reg_path(
"mxif3", "preAF-IMS", thru_modality="mxif1", reg_params=["rigid"]
)
wsi_reg.add_reg_path("pas", "preAF-IMS", reg_params=["rigid"])
wsi_reg.add_attachment_images("pas", "pas-attach", "", 0.65)
wsi_reg.add_attachment_shapes("pas", "pas-shape", "")
wsi_reg.add_merge_modalities("mxif-merge-1-2", ["mxif1", "mxif2"])
assert len(wsi_reg.merge_modalities.keys()) == 1
wsi_reg.remove_modality("pas")
assert wsi_reg.n_registrations == 4
assert wsi_reg.n_modalities == 6
assert "PAS" not in wsi_reg.reg_paths.keys()
wsi_reg.remove_modality("mxif1")
assert wsi_reg.n_registrations == 1
assert wsi_reg.n_modalities == 5
assert "mxif1" not in wsi_reg.reg_paths.keys()
assert len(wsi_reg.merge_modalities.keys()) == 0
wsi_reg.remove_modality("pas-attach")
assert wsi_reg.n_modalities == 4
assert "pas-attach" not in wsi_reg.modality_names
assert "pas-attach" not in wsi_reg.attachment_images.keys()
wsi_reg.remove_modality("pas-shape")
assert len(wsi_reg.shape_set_names) == 0
assert wsi_reg.shape_sets.get("pas-shape") is None
@pytest.mark.usefixtures("im_mch_np")
def test_wsireg_run_reg_w_override(data_out_dir, im_mch_np):
wsi_reg = WsiReg2D(gen_project_name_str(), str(data_out_dir))
img_fp1 = im_mch_np
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod3",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod4",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.add_reg_path("mod4", "mod3", reg_params=["rigid_test"])
wsi_reg.add_reg_path(
"mod2",
"mod3",
reg_params=["rigid_test"],
override_prepro={
"source": {"ch_indices": [1]},
"target": {"ch_indices": [1]},
},
)
wsi_reg.register_images()
or_mod2 = imread(wsi_reg.image_cache / "mod2-mod3-override_prepro.tiff")
or_mod3 = imread(wsi_reg.image_cache / "mod3-mod2-override_prepro.tiff")
pp_mod2 = imread(wsi_reg.image_cache / "mod2_prepro.tiff")
pp_mod3 = imread(wsi_reg.image_cache / "mod3_prepro.tiff")
assert not np.array_equal(or_mod2, pp_mod2)
assert not np.array_equal(or_mod3, pp_mod3)
@pytest.mark.usefixtures("im_mch_np")
def test_wsireg_run_reg_reload_from_cache(data_out_dir, im_mch_np):
img_fp1 = im_mch_np
output_dir = str(data_out_dir)
pname = gen_project_name_str()
wsi_reg = WsiReg2D(pname, output_dir)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.register_images()
pp_mod1_r1 = imread(wsi_reg.image_cache / "mod1_prepro.tiff")
pp_mod2_r1 = imread(wsi_reg.image_cache / "mod2_prepro.tiff")
# run registration again, loading data from cache
wsi_reg = WsiReg2D(pname, output_dir)
wsi_reg.add_modality(
"mod1",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"ch_indices": [1]},
)
wsi_reg.add_modality(
"mod2",
img_fp1,
0.65,
channel_names=["test"],
channel_colors=["red"],
preprocessing={"ch_indices": [1]},
)
wsi_reg.add_reg_path("mod1", "mod2", reg_params=["rigid_test"])
wsi_reg.register_images()
pp_mod1_r2 = imread(wsi_reg.image_cache / "mod1_prepro.tiff")
pp_mod2_r2 = imread(wsi_reg.image_cache / "mod2_prepro.tiff")
assert not np.array_equal(pp_mod1_r1, pp_mod1_r2)
assert not np.array_equal(pp_mod2_r1, pp_mod2_r2)
| 32,409 | 26.939655 | 78 | py |
wsireg | wsireg-master/tests/test_console_scripts.py | import os
from pathlib import Path
HERE = os.path.dirname(__file__)
FIXTURES_DIR = os.path.join(HERE, "fixtures")
PRIVATE_DIR = os.path.join(HERE, "private_data")
config1_fp = str(Path(FIXTURES_DIR) / "test-config1-cmd-line.yaml")
def test_wsireg2d_entrypoint():
exit_status = os.system('wsireg2d --help')
assert exit_status == 0
def test_wsireg2d_run():
exit_status = os.system(f'wsireg2d "{str(config1_fp)}" --testing')
assert exit_status == 0
| 469 | 22.5 | 70 | py |
wsireg | wsireg-master/tests/fixtures/im_fixtures.py | import pytest
import os
import dask.array as da
import numpy as np
import zarr
from tifffile import TiffWriter, imwrite
HERE = os.path.dirname(__file__)
GEOJSON_FP = os.path.join(HERE, "polygons.geojson")
@pytest.fixture
def im_gry_np():
return np.random.randint(0, 255, (2048, 2048), dtype=np.uint16)
@pytest.fixture
def mask_np():
mask_im = np.zeros((2048, 2048), dtype=np.uint8)
mask_im[256:1792, 256:1792] = 255
return mask_im
@pytest.fixture
def mask_geojson():
return GEOJSON_FP
@pytest.fixture
def im_mch_np():
return np.random.randint(0, 255, (3, 2048, 2048), dtype=np.uint16)
@pytest.fixture
def im_rgb_np():
return np.random.randint(0, 255, (2048, 2048, 3), dtype=np.uint8)
@pytest.fixture
def im_rgb_np_uneven():
return np.random.randint(0, 255, (3096, 2048, 3), dtype=np.uint8)
@pytest.fixture
def dask_im_gry_np():
return da.from_array(
np.random.randint(0, 255, (2048, 2048), dtype=np.uint16)
)
@pytest.fixture
def dask_im_mch_np():
return da.from_array(
np.random.randint(0, 255, (3, 2048, 2048), dtype=np.uint16)
)
@pytest.fixture
def dask_im_rgb_np():
return da.from_array(
np.random.randint(0, 255, (2048, 2048, 3), dtype=np.uint8)
)
@pytest.fixture
def zarr_im_gry_np():
return zarr.array(np.random.randint(0, 255, (2048, 2048), dtype=np.uint16))
@pytest.fixture
def zarr_im_mch_np():
return zarr.array(
np.random.randint(0, 255, (3, 2048, 2048), dtype=np.uint16)
)
@pytest.fixture
def zarr_im_rgb_np():
return zarr.array(
np.random.randint(0, 255, (2048, 2048, 3), dtype=np.uint8)
)
@pytest.fixture
def disk_im_mch(tmpdir_factory, im_mch_np):
out_im = tmpdir_factory.mktemp("image").join("image_fp_mch.tiff")
imwrite(out_im, im_mch_np, tile=(256, 256))
return out_im
@pytest.fixture
def disk_im_mch_notile(tmpdir_factory, im_mch_np):
out_im = tmpdir_factory.mktemp("image").join("image_fp_mch_nt.tiff")
imwrite(out_im, im_mch_np, photometric="MINISBLACK", tile=(256, 256))
return out_im
@pytest.fixture
def disk_im_rgb(tmpdir_factory, im_rgb_np):
out_im = tmpdir_factory.mktemp("image").join("image_fp_rgb.tiff")
imwrite(out_im, im_rgb_np, tile=(256, 256))
return out_im
@pytest.fixture
def disk_im_gry(tmpdir_factory, im_gry_np):
out_im = tmpdir_factory.mktemp("image").join("image_fp_gry.tiff")
imwrite(out_im, im_gry_np, tile=(256, 256))
return out_im
@pytest.fixture
def disk_im_mch_pyr(tmpdir_factory):
out_im = tmpdir_factory.mktemp("image").join("image_fp_mch_pyr.tiff")
subifds = 2
full_im = np.random.randint(0, 255, (3, 2048, 2048), dtype=np.uint16)
with TiffWriter(out_im) as tif:
for ch in range(full_im.shape[0]):
options = dict(
tile=(256, 256),
compression="deflate",
photometric="minisblack",
metadata=None,
)
tif.write(
full_im[ch, :, :],
subifds=subifds,
**options,
)
for pyr_idx in range(subifds):
if pyr_idx == 0:
subresimage = full_im[ch, ::2, ::2]
else:
subresimage = subresimage[::2, ::2]
tif.write(subresimage, **options, subfiletype=1)
return out_im
@pytest.fixture
def disk_im_rgb_pyr(tmpdir_factory):
out_im = tmpdir_factory.mktemp("image").join("image_fp_rgb_pyr.tiff")
subifds = 2
full_im = np.random.randint(0, 255, (2048, 2048, 3), dtype=np.uint8)
with TiffWriter(out_im) as tif:
options = dict(
tile=(128, 128),
compression="deflate",
photometric="rgb",
metadata=None,
)
tif.write(
full_im,
subifds=subifds,
**options,
)
for pyr_idx in range(subifds):
if pyr_idx == 0:
subresimage = full_im[::2, ::2, :]
else:
subresimage = subresimage[::2, ::2, :]
tif.write(subresimage, **options, subfiletype=1)
return out_im
@pytest.fixture
def disk_im_gry_pyr(tmpdir_factory):
out_im = tmpdir_factory.mktemp("image").join("image_fp_gry_pyr.tiff")
subifds = 2
full_im = np.random.randint(0, 255, (2048, 2048), dtype=np.uint16)
with TiffWriter(out_im) as tif:
options = dict(
tile=(256, 256),
compression="deflate",
photometric="minisblack",
metadata=None,
)
tif.write(
full_im,
subifds=subifds,
**options,
)
for pyr_idx in range(subifds):
if pyr_idx == 0:
subresimage = full_im[::2, ::2]
else:
subresimage = subresimage[::2, ::2]
tif.write(subresimage, **options, subfiletype=1)
return out_im
| 4,954 | 24.280612 | 79 | py |
wsireg | wsireg-master/tests/fixtures/transform_fixtures.py | import pickle
import numpy as np
import pytest
@pytest.fixture
def complex_transform():
return {
'initial': [
{
'Transform': ['EulerTransform'],
'NumberOfParameters': ['3'],
'TransformParameters': [
'1.5707963267948966',
'601.5749999999998',
'-601.5749999999998',
],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'HowToCombineTransforms': ['Compose'],
'FixedImageDimension': ['2'],
'MovingImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'MovingInternalImagePixelType': ['float'],
'Size': ['6769', '7386'],
'Index': ['0', '0'],
'Spacing': ['1.95', '1.95'],
'Origin': ['0.0000', '0.0000'],
'Direction': [
'1.0000000000',
'0.0000000000',
'0.0000000000',
'1.0000000000',
],
'UseDirectionCosines': ['true'],
'CenterOfRotationPoint': ['6598.8', '7200.375'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'DefaultPixelValue': ['0.000000'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'CompressResultImage': ['true'],
},
{
'Transform': ['AffineTransform'],
'NumberOfParameters': ['6'],
'TransformParameters': ['-1', '0', '0', '1', '0', '0'],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'HowToCombineTransforms': ['Compose'],
'FixedImageDimension': ['2'],
'MovingImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'MovingInternalImagePixelType': ['float'],
'Size': ['6769', '7386'],
'Index': ['0', '0'],
'Spacing': ['1.95', '1.95'],
'Origin': ['0.0000', '0.0000'],
'Direction': [
'1.0000000000',
'0.0000000000',
'0.0000000000',
'1.0000000000',
],
'UseDirectionCosines': ['true'],
'CenterOfRotationPoint': ['6598.8', '7200.375'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'DefaultPixelValue': ['0.000000'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'CompressResultImage': ['true'],
},
],
'0': [
{
'CenterOfRotationPoint': ['6622.2', '7784.4'],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['3'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['6793', '7985'],
'Spacing': ['1.95', '1.95'],
'Transform': ['EulerTransform'],
'TransformParameters': [
'-0.08284214564408873',
'-1306.3427228105772',
'-991.9182706690468',
],
'UseDirectionCosines': ['true'],
},
{
'BSplineTransformSplineOrder': ['3'],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'GridDirection': ['1', '0', '0', '1'],
'GridIndex': ['0', '0'],
'GridOrigin': ['-2067.5640452749403', '-1636.5330828131607'],
'GridSize': ['136', '159'],
'GridSpacing': ['109.38401959206463', '106.69639002714067'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['0'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['43248'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['1024', '1024'],
'Spacing': ['1.95', '1.95'],
'Transform': ['BSplineTransform'],
'TransformParameters': np.random.random((43248,)).tolist(),
'UseCyclicTransform': ['false'],
'UseDirectionCosines': ['true'],
},
],
'1': [
{
'CenterOfRotationPoint': ['5559', '7609'],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['3'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['1024', '1024'],
'Spacing': ['2', '2'],
'Transform': ['EulerTransform'],
'TransformParameters': [
'-0.0042257214326670706',
'1485.2882969318628',
'1562.9184348320616',
],
'UseDirectionCosines': ['true'],
},
{
'CenterOfRotationPoint': [
'7044.288296931862',
'9171.918434832063',
],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['0'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['6'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['1024', '1024'],
'Spacing': ['2', '2'],
'Transform': ['AffineTransform'],
'TransformParameters': [
'0.9950415227069288',
'-0.00010257497254207552',
'-0.0011168508123054915',
'0.994578583104989',
'0.42502362300245433',
'-7.2389671174396275',
],
'UseDirectionCosines': ['true'],
},
],
}
@pytest.fixture
def complex_transform_larger():
return {
'initial': [
{
'Transform': ['EulerTransform'],
'NumberOfParameters': ['3'],
'TransformParameters': [
'1.5707963267948966',
'601.5749999999998',
'-601.5749999999998',
],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'HowToCombineTransforms': ['Compose'],
'FixedImageDimension': ['2'],
'MovingImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'MovingInternalImagePixelType': ['float'],
'Size': ['6769', '7386'],
'Index': ['0', '0'],
'Spacing': ['1.95', '1.95'],
'Origin': ['0.0000', '0.0000'],
'Direction': [
'1.0000000000',
'0.0000000000',
'0.0000000000',
'1.0000000000',
],
'UseDirectionCosines': ['true'],
'CenterOfRotationPoint': ['6598.8', '7200.375'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'DefaultPixelValue': ['0.000000'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'CompressResultImage': ['true'],
},
{
'Transform': ['AffineTransform'],
'NumberOfParameters': ['6'],
'TransformParameters': ['-1', '0', '0', '1', '0', '0'],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'HowToCombineTransforms': ['Compose'],
'FixedImageDimension': ['2'],
'MovingImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'MovingInternalImagePixelType': ['float'],
'Size': ['6769', '7386'],
'Index': ['0', '0'],
'Spacing': ['1.95', '1.95'],
'Origin': ['0.0000', '0.0000'],
'Direction': [
'1.0000000000',
'0.0000000000',
'0.0000000000',
'1.0000000000',
],
'UseDirectionCosines': ['true'],
'CenterOfRotationPoint': ['6598.8', '7200.375'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'DefaultPixelValue': ['0.000000'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'CompressResultImage': ['true'],
},
],
'0': [
{
'CenterOfRotationPoint': ['6622.2', '7784.4'],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['3'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['6793', '7985'],
'Spacing': ['1.95', '1.95'],
'Transform': ['EulerTransform'],
'TransformParameters': [
'-0.08284214564408873',
'-1306.3427228105772',
'-991.9182706690468',
],
'UseDirectionCosines': ['true'],
},
{
'BSplineTransformSplineOrder': ['3'],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'GridDirection': ['1', '0', '0', '1'],
'GridIndex': ['0', '0'],
'GridOrigin': ['-2067.5640452749403', '-1636.5330828131607'],
'GridSize': ['136', '159'],
'GridSpacing': ['109.38401959206463', '106.69639002714067'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['0'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['43248'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['1024', '1024'],
'Spacing': ['1.95', '1.95'],
'Transform': ['BSplineTransform'],
'TransformParameters': np.random.random((43248,)).tolist(),
'UseCyclicTransform': ['false'],
'UseDirectionCosines': ['true'],
},
],
'1': [
{
'CenterOfRotationPoint': ['5559', '7609'],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['3'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['2099', '3099'],
'Spacing': ['2', '2'],
'Transform': ['EulerTransform'],
'TransformParameters': [
'-0.0042257214326670706',
'1485.2882969318628',
'1562.9184348320616',
],
'UseDirectionCosines': ['true'],
},
{
'CenterOfRotationPoint': [
'7044.288296931862',
'9171.918434832063',
],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['0'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['6'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['1024', '1024'],
'Spacing': ['2', '2'],
'Transform': ['AffineTransform'],
'TransformParameters': [
'0.9950415227069288',
'-0.00010257497254207552',
'-0.0011168508123054915',
'0.994578583104989',
'0.42502362300245433',
'-7.2389671174396275',
],
'UseDirectionCosines': ['true'],
},
],
}
@pytest.fixture
def complex_transform_larger_padded():
return {
'initial': [
{
'Transform': ['EulerTransform'],
'NumberOfParameters': ['3'],
'TransformParameters': [
'1.5707963267948966',
'601.5749999999998',
'-601.5749999999998',
],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'HowToCombineTransforms': ['Compose'],
'FixedImageDimension': ['2'],
'MovingImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'MovingInternalImagePixelType': ['float'],
'Size': ['6769', '7386'],
'Index': ['0', '0'],
'Spacing': ['1.95', '1.95'],
'Origin': ['0.0000', '0.0000'],
'Direction': [
'1.0000000000',
'0.0000000000',
'0.0000000000',
'1.0000000000',
],
'UseDirectionCosines': ['true'],
'CenterOfRotationPoint': ['6598.8', '7200.375'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'DefaultPixelValue': ['0.000000'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'CompressResultImage': ['true'],
},
{
'Transform': ['AffineTransform'],
'NumberOfParameters': ['6'],
'TransformParameters': ['-1', '0', '0', '1', '0', '0'],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'HowToCombineTransforms': ['Compose'],
'FixedImageDimension': ['2'],
'MovingImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'MovingInternalImagePixelType': ['float'],
'Size': ['6769', '7386'],
'Index': ['0', '0'],
'Spacing': ['1.95', '1.95'],
'Origin': ['0.0000', '0.0000'],
'Direction': [
'1.0000000000',
'0.0000000000',
'0.0000000000',
'1.0000000000',
],
'UseDirectionCosines': ['true'],
'CenterOfRotationPoint': ['6598.8', '7200.375'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'DefaultPixelValue': ['0.000000'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'CompressResultImage': ['true'],
},
],
'0': [
{
'CenterOfRotationPoint': ['6622.2', '7784.4'],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['3'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['6793', '7985'],
'Spacing': ['1.95', '1.95'],
'Transform': ['EulerTransform'],
'TransformParameters': [
'-0.08284214564408873',
'-1306.3427228105772',
'-991.9182706690468',
],
'UseDirectionCosines': ['true'],
},
{
'BSplineTransformSplineOrder': ['3'],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'GridDirection': ['1', '0', '0', '1'],
'GridIndex': ['0', '0'],
'GridOrigin': ['-2067.5640452749403', '-1636.5330828131607'],
'GridSize': ['136', '159'],
'GridSpacing': ['109.38401959206463', '106.69639002714067'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['0'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['43248'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['1024', '1024'],
'Spacing': ['1.95', '1.95'],
'Transform': ['BSplineTransform'],
'TransformParameters': np.random.random((43248,)).tolist(),
'UseCyclicTransform': ['false'],
'UseDirectionCosines': ['true'],
},
],
'1': [
{
'CenterOfRotationPoint': ['5559', '7609'],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['3'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['2099', '3099'],
'Spacing': ['2', '2'],
'Transform': ['EulerTransform'],
'TransformParameters': [
'-0.0042257214326670706',
'1485.2882969318628',
'1562.9184348320616',
],
'UseDirectionCosines': ['true'],
},
{
'CenterOfRotationPoint': [
'7044.288296931862',
'9171.918434832063',
],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['0'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['6'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['3099', '2099'],
'Spacing': ['2', '2'],
'Transform': ['AffineTransform'],
'TransformParameters': [
'0.9950415227069288',
'-0.00010257497254207552',
'-0.0011168508123054915',
'0.994578583104989',
'0.42502362300245433',
'-7.2389671174396275',
],
'UseDirectionCosines': ['true'],
},
],
}
@pytest.fixture
def simple_transform_affine():
return {
'initial': [],
'0': [
{
'CenterOfRotationPoint': ['1023', '1023'],
'CompressResultImage': ['true'],
'DefaultPixelValue': ['0'],
'Direction': ['1', '0', '0', '1'],
'FixedImageDimension': ['2'],
'FixedInternalImagePixelType': ['float'],
'HowToCombineTransforms': ['Compose'],
'Index': ['0', '0'],
'InitialTransformParametersFileName': ['NoInitialTransform'],
'MovingImageDimension': ['2'],
'MovingInternalImagePixelType': ['float'],
'NumberOfParameters': ['6'],
'Origin': ['0', '0'],
'ResampleInterpolator': ['FinalNearestNeighborInterpolator'],
'Resampler': ['DefaultResampler'],
'ResultImageFormat': ['mha'],
'ResultImagePixelType': ['float'],
'Size': ['1024', '1024'],
'Spacing': ['2', '2'],
'Transform': ['AffineTransform'],
'TransformParameters': [
'0.7567764805571071',
'0.03837669141118439',
'-0.13607342366687353',
'1.0305545112711545',
'469.55900326093416',
'156.0908167708924',
],
'UseDirectionCosines': ['true'],
}
],
}
@pytest.fixture
def simple_transform_affine_nl():
return {
"initial": [],
"0": [
{
"CenterOfRotationPoint": ["1549", "1049"],
"CompressResultImage": ["true"],
"DefaultPixelValue": ["0"],
"Direction": ["1", "0", "0", "1"],
"FixedImageDimension": ["2"],
"FixedInternalImagePixelType": ["float"],
"HowToCombineTransforms": ["Compose"],
"Index": ["0", "0"],
"InitialTransformParametersFileName": ["NoInitialTransform"],
"MovingImageDimension": ["2"],
"MovingInternalImagePixelType": ["float"],
"NumberOfParameters": ["6"],
"Origin": ["0", "0"],
"ResampleInterpolator": ["FinalNearestNeighborInterpolator"],
"Resampler": ["DefaultResampler"],
"ResultImageFormat": ["mha"],
"ResultImagePixelType": ["float"],
"Size": ["3099", "2099"],
"Spacing": ["1", "1"],
"Transform": ["AffineTransform"],
"TransformParameters": [
"0.6952751205730702",
"-0.03563788730868529",
"-0.05465117691985885",
"0.9017096836910463",
"-451.277133969974",
"-8.122965475836498",
],
"UseDirectionCosines": ["true"],
},
{
"BSplineTransformSplineOrder": ["3"],
"CompressResultImage": ["true"],
"DefaultPixelValue": ["0"],
"Direction": ["1", "0", "0", "1"],
"FixedImageDimension": ["2"],
"FixedInternalImagePixelType": ["float"],
"GridDirection": ["1", "0", "0", "1"],
"GridIndex": ["0", "0"],
"GridOrigin": ["-89.30279837599048", "-88.89451068735627"],
"GridSize": ["34", "24"],
"GridSpacing": ["71.94094935794038", "98.24100393143651"],
"HowToCombineTransforms": ["Compose"],
"Index": ["0", "0"],
"InitialTransformParametersFileName": ["0"],
"MovingImageDimension": ["2"],
"MovingInternalImagePixelType": ["float"],
"NumberOfParameters": ["1632"],
"Origin": ["0", "0"],
"ResampleInterpolator": ["FinalNearestNeighborInterpolator"],
"Resampler": ["DefaultResampler"],
"ResultImageFormat": ["mha"],
"ResultImagePixelType": ["float"],
"Size": ["3099", "2099"],
"Spacing": ["1", "1"],
"Transform": ["BSplineTransform"],
"TransformParameters": [
"346.1685176801543",
"327.4731409437514",
"324.41761059730425",
"319.55188298376845",
"315.5054982266721",
"311.8090280751891",
"308.63574902154454",
"305.5677365141201",
"302.451898481584",
"299.22134936237245",
"295.82550917191196",
"292.25256246884754",
"288.47978395143554",
"284.43127946856777",
"280.2309433328638",
"276.20482640671565",
"272.6266495821223",
"269.9344130037499",
"267.9484273671518",
"265.85911655701136",
"263.06367432655617",
"258.7900740668137",
"253.05783153035833",
"246.7041920051949",
"240.33509647662493",
"234.69970667906586",
"230.33279720420848",
"227.38273335915315",
"226.20062413251713",
"227.2725832160369",
"230.22158168410098",
"234.81850288299557",
"238.2380783068482",
"254.36124363615576",
"338.1333623132271",
"318.75963497528016",
"315.5674618149036",
"310.5467403438256",
"306.46668114194364",
"302.94267435129007",
"300.10818185759285",
"297.40340174210337",
"294.5209443579451",
"291.492297899927",
"288.396090253388",
"285.2085240000449",
"281.6997903965263",
"277.8917158193537",
"273.59786018177",
"270.1037678766348",
"267.6944806106202",
"265.7322236905214",
"263.9648320441369",
"261.4738922644175",
"256.91965254089087",
"249.08185693874574",
"237.60841957409198",
"225.52341695598344",
"212.57513470156124",
"200.42677009383195",
"192.22277812853812",
"186.95443927401362",
"185.4011423672134",
"188.82398206664433",
"196.1240675014792",
"205.9592026844881",
"213.49787256385952",
"236.72223737708487",
"338.02048118896914",
"318.546972581579",
"315.53186498826415",
"310.6242122256941",
"305.6737918954718",
"302.73209154716415",
"299.2985891195221",
"297.1329682815004",
"293.8452502540424",
"289.43209880875685",
"287.8140140881465",
"285.9707404466337",
"281.9732477443762",
"279.81221103348605",
"274.96152042194996",
"272.34314215423296",
"270.29862277661874",
"267.3027820794648",
"265.6527475354288",
"263.31453233248794",
"258.3223614514568",
"249.76961578484517",
"234.84825180323256",
"221.30639972307043",
"204.61166324295954",
"190.3974716361549",
"181.5993914816903",
"175.84373064258494",
"173.81706126013896",
"178.1931430199949",
"187.57449487223244",
"199.72289436206358",
"208.98347358595103",
"234.92722426762512",
"336.6399088966055",
"316.93316476047534",
"313.76904503025014",
"307.6796326215777",
"301.0778380342201",
"301.22876388094056",
"298.96922053989107",
"295.24111962712294",
"291.65107390778826",
"287.81515673702233",
"288.0521662324887",
"282.686487362343",
"278.2996104470501",
"277.75773099961094",
"274.652136491453",
"274.3626387218978",
"270.5024029968783",
"268.0890254853293",
"269.55195011264345",
"269.7958598669742",
"261.93222045998397",
"246.97761702760496",
"229.28842913432882",
"215.007854898739",
"197.09789223830046",
"179.49290661628194",
"168.61037818154202",
"159.08914219426157",
"157.98588966146505",
"164.36059201134609",
"176.01016668032045",
"190.7362845487527",
"201.9545524921357",
"231.0845811889134",
"334.92667911111243",
"315.0037778788224",
"311.2791377850094",
"304.9716050556274",
"301.3679880097212",
"298.688581995427",
"298.6629116786487",
"296.6162472896863",
"292.09658808009334",
"288.180592768034",
"283.67968534533674",
"277.3219446193805",
"274.7172190263485",
"273.13079145167336",
"271.6453852010701",
"272.30291166223503",
"270.1957199302014",
"268.6382658745035",
"270.9055375169434",
"271.7145387539127",
"261.31771386162575",
"244.4756294759902",
"228.85499362412753",
"208.73474211987568",
"190.16886487744472",
"169.67610987236336",
"156.9816098888557",
"148.39534823009677",
"147.1128193407157",
"154.2124093909722",
"167.2845822822831",
"183.69920268081617",
"196.2317249048742",
"227.56372600096137",
"333.0992865052926",
"313.22632048260033",
"309.2855448211806",
"305.17747061563324",
"302.25851257041603",
"296.25812410233084",
"296.1535842931649",
"296.1408805688194",
"291.68805574397817",
"285.4270699956124",
"280.2220747925449",
"274.5739513217589",
"271.92524214886566",
"268.83988366801765",
"270.968364882148",
"273.2047986310636",
"271.6143729252179",
"267.68059249405843",
"268.6429135282244",
"269.8133978542261",
"261.00130548354053",
"241.37068571103976",
"223.3083697066741",
"201.3107468908836",
"181.8045021823426",
"162.88026677917887",
"148.87969917005864",
"141.4979622752536",
"140.00121689945934",
"147.79870344047518",
"161.52458419898358",
"178.722228745679",
"191.91677004316315",
"224.44653945005643",
"331.7254007783348",
"311.88681691644103",
"308.43295604807946",
"304.55093088424616",
"303.16043931739966",
"299.87206374602073",
"298.9415344320633",
"295.98347069790196",
"291.10690011598933",
"283.58833401077214",
"277.5516097799515",
"273.17754331545524",
"270.244064134124",
"264.58485466383775",
"264.9707054604393",
"269.5647342626765",
"269.8389641186398",
"267.63709067522285",
"271.4807169086873",
"268.64963990961957",
"256.3111829811734",
"236.86600020387905",
"216.88524555926654",
"197.12478793744543",
"177.2046703912743",
"156.85390385400723",
"142.5275710053363",
"138.5455356733213",
"137.76701744502114",
"145.42450293271443",
"159.06401203643813",
"176.19641244159678",
"189.42456619300987",
"222.15184235200078",
"331.4755672429598",
"312.3034032663087",
"309.4430056464251",
"306.6207353233411",
"305.0924491021132",
"301.28387883370664",
"302.69022435986085",
"299.6016461677884",
"292.2867972287593",
"280.2880457275338",
"273.92064801170704",
"271.99643651153747",
"266.60397389608517",
"260.5104402039107",
"259.95238934554305",
"265.76387995069354",
"268.01380290028044",
"268.92866615475583",
"271.3868695228665",
"265.3024650868406",
"251.53351839497944",
"233.93664732546065",
"213.96317771012409",
"192.20406211114064",
"174.98372636042916",
"158.22627655258518",
"143.43355197371275",
"136.3114195715595",
"136.90324527711027",
"144.64411849598588",
"157.98285303109225",
"174.82142449288943",
"187.8777905025387",
"220.42930383589717",
"332.6382177675383",
"315.1409598494502",
"313.69615827193576",
"310.3951917531788",
"306.5748927516067",
"303.0649982771692",
"304.14815464356343",
"299.95762718861874",
"293.42980235760905",
"287.6208906501058",
"280.12873774101206",
"272.11774676931464",
"264.6050942956283",
"257.15240036301765",
"256.02257046513745",
"260.49094194325454",
"263.35582676198493",
"267.2501089762176",
"266.82292169414086",
"261.11222308701184",
"248.51924063725278",
"230.6184009467372",
"215.2701903013648",
"192.5197508166504",
"170.95632305674528",
"157.29943739717342",
"145.31292005301992",
"138.1191122595529",
"136.36863743026382",
"144.44735886122348",
"157.66387937155338",
"174.2035380769798",
"187.04182003906985",
"219.27940554360686",
"335.23148261255943",
"318.95412063712894",
"318.102201541211",
"317.02695453664046",
"314.77800785070997",
"313.02470208993395",
"308.66474805999906",
"303.9890380249832",
"296.46900011209146",
"291.0880997825945",
"283.59749721641356",
"273.31960634985364",
"264.82007144496293",
"256.3340297291796",
"253.4767485257203",
"254.95045215382947",
"258.25768902864706",
"261.88943244446966",
"260.61667543048713",
"254.454410926501",
"242.36585846017428",
"227.36519849630673",
"211.47389013903722",
"190.857927091437",
"165.36301018041814",
"150.7810092701164",
"144.3438087510288",
"138.4901282210333",
"136.83887309983456",
"145.09044751814506",
"158.1276907668684",
"174.35762474720715",
"186.951752571121",
"218.77807357649013",
"338.93859147647794",
"324.72446232238264",
"323.71873613097205",
"323.9142130359808",
"323.4491692979075",
"322.290899060616",
"314.4945800369632",
"313.37876979296595",
"304.8017625443029",
"294.0097620865666",
"284.22316701213697",
"274.2142822301352",
"265.7632866251417",
"257.70181194370645",
"251.4709200185006",
"248.95462864479225",
"253.35203930961555",
"255.1741818631165",
"254.11041559490712",
"246.61017218015814",
"237.00303605435914",
"222.2571717326766",
"205.5477670526594",
"185.19658953809866",
"164.00596461088375",
"149.5028749938405",
"141.93040459607658",
"138.3397998313917",
"139.6678854379909",
"146.82514023324794",
"159.5316117189098",
"175.38890305454987",
"187.67666364088808",
"218.93967668907146",
"342.9353248883213",
"331.19226914298383",
"330.3248143130418",
"330.75506284252185",
"332.0844750987303",
"328.3705873590471",
"320.66706596125084",
"317.1316365816014",
"309.538961550857",
"295.1794874968513",
"287.6417581211964",
"280.13685640801805",
"268.44240669278224",
"260.89215505838774",
"256.1820990752209",
"249.92481812282594",
"247.5583281904001",
"248.24896161499407",
"245.85460102981943",
"235.8092844807058",
"229.96954369756577",
"219.24335098840427",
"199.41062204679352",
"181.03178925608614",
"165.0734921497009",
"150.05637787357634",
"143.1695029690003",
"142.1741294622833",
"141.7562504905459",
"150.00003663937085",
"162.44382454313362",
"177.67157747211155",
"189.43354035324614",
"219.73478854132128",
"346.76031006404037",
"336.0399548017178",
"335.5449056763668",
"335.0783469393114",
"336.1863658528581",
"332.33016857846894",
"324.6631415792094",
"316.5289730223695",
"309.3581897453022",
"294.89859126616346",
"287.8113970230678",
"281.5862299549016",
"271.89276216389766",
"263.7626731275445",
"256.00002181968085",
"249.9607438938724",
"247.54287560437015",
"245.36047907844596",
"241.62198803031833",
"232.85816339859826",
"225.57573173126755",
"215.96630882543306",
"197.8497872985846",
"181.46796338312225",
"169.0543866028749",
"155.60454567397846",
"148.21528339622586",
"145.5859486825331",
"146.4786307194544",
"155.26654906815241",
"167.00600545180734",
"181.2920067935089",
"192.26164398445397",
"221.13218464521427",
"350.20151892581157",
"340.8477353294608",
"340.93546055564946",
"338.43964020038004",
"335.6277990145745",
"332.7938685798357",
"327.1351170967076",
"320.3570283306046",
"314.23672810691687",
"299.13214382350657",
"288.7073944247829",
"282.3783987030363",
"276.48757638181524",
"269.052422065156",
"259.73409174007503",
"254.02801944657608",
"250.6394632771059",
"244.46408484503254",
"238.17699352582747",
"231.3832734957137",
"221.77566075686084",
"215.0539340269202",
"202.370746279784",
"185.5438380151674",
"172.80593833976283",
"163.7547581277045",
"154.70581666771338",
"151.09869967829684",
"154.0579877284035",
"162.30421441978535",
"173.0813221720055",
"186.15816207790584",
"196.08894931567335",
"223.09167663822518",
"353.16534118641334",
"344.9890142064684",
"344.1605091553746",
"339.37048806406744",
"338.7127226080233",
"336.98738040393084",
"328.53962090373415",
"320.2978308284991",
"313.5657094290768",
"303.7608715998435",
"293.14312104095836",
"283.0579481986293",
"278.51459546048756",
"272.9475650146303",
"265.27241895908617",
"258.06274682598104",
"249.6780569393401",
"240.46182622458474",
"234.1726619633555",
"227.47361141120751",
"220.61871150660193",
"215.9119064544713",
"204.04251561599304",
"190.17579826605123",
"178.66404209773697",
"172.7809423650138",
"164.42337436156305",
"158.93573718633556",
"162.9350603731196",
"170.77147039752924",
"180.39487160524342",
"192.03905671253588",
"200.74385373593006",
"225.55754367766306",
"355.69418712923294",
"347.0893095138234",
"345.8814667800384",
"339.6548089217322",
"339.06635162308436",
"336.2380171849185",
"324.89788669850327",
"318.6545560620989",
"312.7661483120709",
"304.89607912557454",
"295.73490350717367",
"286.96881925791325",
"281.5658370799079",
"274.61266354455995",
"266.1220560422612",
"259.9978788759085",
"250.2121777082621",
"243.04894310078086",
"236.53303476216036",
"231.9345628879305",
"227.4097223596231",
"220.31288271452829",
"210.14126539054686",
"199.29314505936068",
"188.34456475539724",
"182.63565050471493",
"179.3440243664899",
"172.7989458450605",
"174.42929245043786",
"180.24398519203248",
"188.44266932600192",
"198.48626305739091",
"205.9043169458767",
"228.46222784499298",
"357.78579031324404",
"346.65985492621706",
"343.2946880701661",
"341.3655934619139",
"340.0443363800775",
"332.2686165762213",
"320.94042425871186",
"315.3597959702061",
"311.2551694846061",
"306.2025018868408",
"298.8873941850425",
"291.07391379104934",
"287.0620533270965",
"276.9551673947378",
"265.6729077524117",
"263.1824505253691",
"256.953888987858",
"250.59426831900362",
"245.45607572462285",
"237.16020225471476",
"232.98925449084086",
"225.35115765246837",
"218.55879541460263",
"207.84539129399568",
"196.92673412770324",
"190.69922201537418",
"188.85263459761174",
"185.24741493408624",
"186.1331556441477",
"190.13238120342444",
"196.787237747054",
"205.17059006274167",
"211.32952302453336",
"231.71418254611095",
"359.44600836467725",
"347.10263447542695",
"345.29729566310243",
"343.1622718194273",
"337.03412219662897",
"327.61877414677366",
"319.0025745803201",
"313.17135222344706",
"308.31188515807224",
"306.455083089386",
"302.5356244885869",
"295.8292151952945",
"291.2923510527841",
"281.51717163487695",
"269.10419353772664",
"265.5449901548181",
"262.14770124629496",
"257.7043032365323",
"252.91782047441743",
"247.95132009339503",
"243.27484093631557",
"236.04268616205727",
"224.31121441106612",
"215.87475431760538",
"207.29816144923515",
"198.4519839083223",
"194.85434619918027",
"193.6837225233743",
"196.4338064731942",
"199.65370894911763",
"204.93117022384186",
"211.79299500804737",
"216.8001366739762",
"235.18587931244068",
"360.69969091421433",
"347.308186172071",
"344.7053377705889",
"337.6076157253246",
"330.15752256337305",
"323.7317106027419",
"318.4368308839275",
"313.0977833911863",
"306.72217443132286",
"300.14157306923033",
"298.1262317957231",
"291.19445087763256",
"285.6446341315995",
"284.5116989613047",
"278.30883478141317",
"272.2578248313816",
"268.9615294265805",
"266.80596791287036",
"263.8311274424012",
"257.52151491638955",
"252.05977701834647",
"246.7021001527828",
"237.34741070189222",
"226.40618441570822",
"218.62884265414272",
"211.5313229127594",
"207.23699212248664",
"207.04454739668802",
"206.20554315811725",
"208.49062676770933",
"212.62830193869954",
"218.1771651418718",
"222.1766297676919",
"238.78908754337593",
"361.64926451927266",
"347.75808605641475",
"343.83664464551345",
"336.450427611368",
"330.4092881779977",
"326.77551909277776",
"317.3023315961222",
"307.94054254733027",
"301.3112000286185",
"295.4451927702177",
"294.1961280038609",
"291.198717324441",
"287.17395479411556",
"287.9593125780242",
"284.4890250022703",
"276.9631414015477",
"276.3588799328678",
"272.00757677991504",
"269.021231312163",
"265.20009751309897",
"261.0307307869228",
"256.40570120468954",
"251.2787761734925",
"239.1945588214004",
"230.3514097002957",
"222.08837496414438",
"219.07956446733667",
"216.65037940724076",
"215.24555148826394",
"216.91500611141288",
"220.02289627044271",
"224.4130772174371",
"227.53197376170073",
"242.57607056506328",
"362.65835632793784",
"351.3141331671815",
"347.1836065184676",
"339.41421676463983",
"331.42240861637765",
"323.95825939689104",
"317.7232922446654",
"308.3961169797909",
"300.97784103506035",
"295.11894976614235",
"291.3738172188519",
"291.5369370823008",
"291.1725683922608",
"289.46394101040244",
"287.35500271914424",
"281.37337040214686",
"281.8569343243541",
"275.25642158852236",
"270.4457560367746",
"269.96249278174054",
"264.54513698754295",
"257.29196034575006",
"253.3631883276376",
"245.95003520910393",
"238.11238822660954",
"231.67044719149783",
"228.6153953243655",
"224.9587014780344",
"223.5323556574065",
"224.52270470308608",
"226.71667158557815",
"230.11971945401925",
"232.49942907923017",
"246.217351241248",
"362.9388511375365",
"351.5128297256764",
"349.66450720294034",
"342.50910282352214",
"332.08943226684727",
"324.8526574823578",
"319.5281398171725",
"310.7567476051302",
"302.8566925151762",
"296.33117375948336",
"290.4243305175619",
"290.68028637852126",
"290.9926682811091",
"290.79980711561467",
"292.58817205745277",
"285.95650030359207",
"283.17713979559346",
"279.32981937882033",
"274.6623066868781",
"271.44830349539234",
"266.82823539998327",
"262.19517916111636",
"257.49000364815583",
"250.795655865449",
"244.01670790760178",
"238.60537262576875",
"234.28186792684937",
"231.69626774366748",
"230.7757313615746",
"231.21238372055515",
"232.67282307648864",
"235.27576467921585",
"237.05392301735753",
"249.67138258669007",
"362.65307624225073",
"350.2124968904047",
"347.59507610328524",
"341.1108899677573",
"334.2223828497661",
"326.4492673813734",
"318.23123824485515",
"309.8471083358216",
"302.1124591405119",
"296.6510437063646",
"293.27131300319957",
"292.01522939491315",
"291.5373361357364",
"290.23207303937073",
"289.07768548946353",
"285.5336315685959",
"282.137222881234",
"278.94303550029025",
"275.97067453139925",
"272.76392195545617",
"268.9542577299965",
"264.2278288081784",
"258.5326764808018",
"252.37216476884504",
"246.59677347310986",
"241.80708569671484",
"238.23092046526227",
"235.88211689565549",
"234.76806851881886",
"234.8646361214987",
"235.87836355466155",
"237.98882236108832",
"239.39335286997087",
"251.35935754327042",
"362.2962192116288",
"349.3640344258009",
"346.01249581420785",
"340.73987251000193",
"335.3791532162943",
"328.96097839286136",
"321.93510942584794",
"314.97252625553375",
"308.669212920541",
"303.91668363511445",
"300.80186730123074",
"298.71981184880156",
"297.1551426583149",
"295.31583101752653",
"292.95183260086856",
"290.2874861382837",
"287.4088445167523",
"284.3752077725106",
"281.1962627040185",
"277.8202664700321",
"274.22297173019825",
"270.347040437093",
"266.30154923030017",
"262.3661823984711",
"258.7734638357227",
"255.8010550736447",
"253.54818244382082",
"251.92494547453535",
"250.9336664920943",
"250.69709645490033",
"251.07943168489817",
"252.36530119126206",
"253.12805862540165",
"263.40204264037044",
"40.78409505994377",
"55.64044148341673",
"59.12163602688054",
"64.49190288980711",
"69.56262100872526",
"74.66593304992192",
"79.39464248880007",
"83.55501409331552",
"86.86787204626874",
"88.8737461577972",
"89.73845357971378",
"90.26310965579135",
"91.04535615921326",
"92.83772733671465",
"95.67343322177996",
"98.84203365991378",
"101.8822863587347",
"104.2679497621648",
"105.91692456019965",
"107.21390766404406",
"108.37770128521899",
"109.6414034829343",
"111.02797119778506",
"112.33839043990986",
"113.46060323511716",
"114.30187955218449",
"114.88407567682422",
"115.4356296461799",
"116.08362641002032",
"116.76405373378769",
"117.45473293754947",
"117.76902686975791",
"118.16596009560062",
"113.42781038826207",
"37.562429670518156",
"51.3234302376081",
"55.138965884880285",
"60.92701126587394",
"66.71502174709276",
"73.15609913884134",
"79.76144514728351",
"85.65591296692314",
"90.24850599371678",
"92.79657147223415",
"93.61725998413577",
"93.95924777998614",
"94.7273095161669",
"96.52858034200312",
"99.43014793908812",
"102.83681691721003",
"106.1486290049596",
"107.90624629550112",
"108.39781189855724",
"108.85075427205668",
"109.10706483310543",
"109.80825043190053",
"111.11061238090102",
"113.04888538922829",
"112.81504179637608",
"113.23430238896643",
"113.28676107557862",
"113.09333038967777",
"114.14143226587142",
"115.3506926904547",
"116.94973291094627",
"118.34074474819704",
"119.59318296747956",
"116.07188796751066",
"34.91709083175705",
"47.52854926450234",
"51.262921380255825",
"56.719147312361144",
"62.609381984804024",
"69.8466369710507",
"77.98302948413506",
"84.42732620037062",
"89.62154989226144",
"91.16360901351588",
"91.3538927059894",
"92.9332205009229",
"94.72220006620468",
"96.41451592228779",
"99.67759129654624",
"102.91572589103642",
"108.1923229407107",
"111.1761256400042",
"110.53855997020489",
"110.66204059571386",
"109.75172204842305",
"109.44190008676716",
"110.36401648884356",
"112.39666369777194",
"110.4195109662743",
"112.02000163716119",
"113.37543779897473",
"111.44138402571313",
"112.07578110869088",
"113.1763033961007",
"115.02880951375097",
"116.79791092413923",
"118.37117875302029",
"115.40605688033862",
"31.30289370782706",
"42.03587120063301",
"44.50077994187367",
"48.37226083312055",
"55.873060018185065",
"66.8018546904913",
"74.86354072881501",
"80.72033287493596",
"89.82569309767875",
"92.25511785423316",
"90.06446336388032",
"94.89041557841483",
"97.67433742842684",
"99.37070102479883",
"103.79413513560264",
"106.37358944533096",
"109.83980184172565",
"114.08649827453614",
"112.84885868540488",
"109.5118408150616",
"106.50416837502401",
"109.16345157005756",
"110.29079231715292",
"107.94035891669327",
"109.5351945458306",
"111.87016095420996",
"113.26680383931807",
"111.5275819415621",
"110.467240469935",
"111.21305122865523",
"113.25507651185215",
"115.38415957488434",
"117.28958443038984",
"114.91806407692489",
"27.445549747932212",
"35.86995189088632",
"35.221834830356016",
"38.378418287517434",
"48.0776677902057",
"56.505967729912236",
"67.35476226927632",
"73.77849556534922",
"85.16543722179588",
"96.70246415390373",
"95.66398901354643",
"99.08814462730906",
"104.2646790774392",
"106.39358319679587",
"104.74800098639764",
"105.42332347740064",
"108.95754023466857",
"111.17480673486578",
"108.43497444808926",
"106.48800803918634",
"103.94359743185721",
"108.09964226097183",
"109.9192236405246",
"108.2691290029947",
"113.25376247271782",
"112.8720246388863",
"111.59520067543036",
"110.10788748435486",
"108.32794733817417",
"109.98130464362217",
"112.03679140788861",
"114.30245974759636",
"116.37879818591516",
"114.37664585520655",
"23.399518303861964",
"29.792865934590477",
"31.03400616890889",
"34.01257852157038",
"41.60491888475505",
"50.23211819891462",
"60.029198300495345",
"70.88038862861184",
"81.07427632744603",
"92.42051258405787",
"97.38862767981121",
"101.87656603521333",
"106.37879760357366",
"105.88466649251072",
"107.15118900479024",
"105.90237373058166",
"108.22734465478834",
"111.36328245937332",
"109.92933331254551",
"106.65369936837466",
"108.10160916058427",
"110.98361431545108",
"111.5474094948179",
"110.54674149188347",
"114.0549753459099",
"111.02918539600881",
"109.79249134451514",
"107.7192737552683",
"106.59569529428208",
"109.17377036484488",
"111.19650190065842",
"113.45966427977874",
"115.57314439995939",
"113.74177884398026",
"19.47217503164552",
"24.003786000689633",
"27.431440962987168",
"30.03429121490335",
"33.83670020552386",
"42.5766685761927",
"55.87402193474221",
"66.90266613107988",
"73.89211645839666",
"87.22080901911721",
"100.08747782822914",
"106.77827561295229",
"108.7503001190677",
"108.4640177355942",
"113.0855825952221",
"113.115437330604",
"113.82106367570248",
"114.60587709634675",
"111.54517273591003",
"108.18349661103856",
"115.91923540086981",
"116.08508492809597",
"113.66791237388816",
"113.46705182199116",
"115.70010776141297",
"114.54379906041893",
"110.05388890511529",
"108.25898570716956",
"107.90886711519002",
"108.62611016165606",
"110.44730601319368",
"112.62793520223107",
"114.69138023726482",
"112.90615850508715",
"16.07721860724766",
"18.720853195717183",
"20.2130087750242",
"22.702905231126653",
"29.84255992536889",
"38.98499446991596",
"52.14186096075404",
"67.25498483311672",
"75.36577868374867",
"86.96437025107286",
"100.63664924687308",
"107.84136116390702",
"114.66310633668552",
"117.60032694092993",
"117.15374396633395",
"115.01486449568924",
"114.49628453974945",
"116.18320896067324",
"114.66650239133315",
"114.53792806163642",
"118.63889417908075",
"113.72778308445655",
"111.89334856123315",
"114.65817264905306",
"117.25782243251697",
"114.66020082658831",
"108.11685334697104",
"106.69139140361219",
"105.93261564709948",
"107.32538212471023",
"109.13708092636706",
"111.30595353665875",
"113.39189363576618",
"111.79217692579067",
"13.349111366197182",
"15.20233275304212",
"15.272351980011244",
"17.291100388558",
"26.27497559195861",
"36.56689091158289",
"50.091012710512935",
"64.24738942819558",
"72.14128443933994",
"83.31547701512937",
"98.56437832513515",
"108.23228386496373",
"112.68853704023508",
"117.03600109698218",
"119.08846626503481",
"115.08972868189002",
"112.72894691207284",
"114.97065780783542",
"115.98948961153553",
"114.85594291808046",
"118.37211350854984",
"115.70806396130736",
"113.48726995582298",
"113.0198232836689",
"114.48639514836688",
"110.87651045547211",
"110.91896609791036",
"109.41179370372701",
"106.01741080865658",
"105.37045543749836",
"107.01660196266302",
"109.31038946546786",
"111.54955068721891",
"110.36392998308851",
"11.174812961076594",
"12.882544711236244",
"14.040904486766204",
"16.937029141337845",
"21.45600877630881",
"31.645683366664727",
"46.063552718737824",
"57.6813015114083",
"68.33449612165073",
"81.5358616616035",
"95.07277457504652",
"102.6306981854329",
"107.449674441334",
"112.11512254922842",
"116.03005622927753",
"115.01250178318766",
"114.324724816112",
"115.73617146161092",
"113.65262277340686",
"114.06788267454226",
"117.74390995380654",
"115.86285899198943",
"116.82349752578972",
"115.11571265515607",
"111.88034512044189",
"109.21177018459356",
"109.47745695172252",
"105.73382743955199",
"102.81618629794305",
"102.34950130036134",
"104.07289983014284",
"106.64751631528767",
"109.17192916674594",
"108.61979850207726",
"9.412048283711773",
"12.093749954509567",
"15.490163262518628",
"15.669234291820713",
"19.19406432377831",
"31.771819793780626",
"41.621201252131804",
"53.08035100670346",
"62.192278630045664",
"75.6442894419023",
"89.34119349635537",
"95.99430739430927",
"104.33111746050297",
"110.55778461648526",
"115.80874699411929",
"113.76483333164181",
"112.84934166350968",
"112.9420742438095",
"111.0758011852561",
"112.61833482007468",
"115.76151695915723",
"113.38481598539171",
"116.24935488203654",
"116.95350297034454",
"112.31961921188181",
"109.68535205651519",
"102.80295297553738",
"99.73031026296128",
"98.48087080582063",
"98.51747458761977",
"100.5504589857132",
"103.53443569692566",
"106.42555369696427",
"106.62317421210496",
"7.7853580258660315",
"12.124020100049476",
"15.393058672723937",
"17.007359965839164",
"23.48378398222911",
"33.396302184156404",
"41.72667374060113",
"51.349911200956306",
"59.2925305722736",
"70.34034690524946",
"80.61081301918006",
"89.88894702907041",
"97.60949586112758",
"104.70308826084434",
"109.07414063523129",
"108.69537528028437",
"106.17517117986569",
"109.280678769615",
"112.2657172310045",
"108.9840497620102",
"110.5298771109884",
"108.50320570825399",
"108.28184440440991",
"110.34578062942116",
"108.00765141063967",
"105.63369418574338",
"99.87693217186067",
"96.02911355739377",
"95.16056269326991",
"94.67569670852531",
"97.08342402929031",
"100.52518586621667",
"103.73226710579722",
"104.53665774213502",
"6.107988320777217",
"11.267586309479961",
"12.417198384297366",
"16.528572630845",
"24.71546892533934",
"32.11594318842096",
"42.25966729088185",
"48.18811475861578",
"58.068508395154595",
"66.53001046626133",
"76.64156796779726",
"85.96741505354335",
"92.38994714250678",
"95.15483099018425",
"96.58364053311499",
"98.79410784755652",
"98.05958055555088",
"103.51338259406181",
"104.13175129069685",
"103.48039868651152",
"104.40349970436263",
"102.7046270091542",
"99.7329486077021",
"101.98752822409546",
"101.69456147827108",
"101.55364007137786",
"97.59534388812003",
"92.58706271627923",
"90.53311641694242",
"91.0524640350529",
"94.04322819296735",
"97.8870587688993",
"101.28326844545182",
"102.42979808356574",
"4.209860697403578",
"10.755906124805305",
"14.247794599948723",
"19.58229275368605",
"29.078795849408056",
"31.881123563174572",
"39.12506239722409",
"48.33766300919947",
"57.11121162742156",
"62.901215181969604",
"69.82128215144539",
"76.5016182941044",
"85.43572776853308",
"88.99696950417977",
"90.15491925252812",
"92.35114188975915",
"90.38855155969411",
"95.85666814642457",
"96.49811228479204",
"98.36126109800105",
"100.00274235286454",
"97.14185594343533",
"93.82082239464715",
"94.59272462866906",
"93.77392325104671",
"94.99244256183509",
"89.70997546363037",
"85.13476709507344",
"87.09179014112112",
"88.28699953837676",
"91.71386933904203",
"95.7300644169776",
"99.13573353076345",
"100.31991657454236",
"2.2348713801433147",
"9.128143162740393",
"12.564195379982106",
"16.075210034111986",
"25.475017731915518",
"30.874567111702248",
"36.20674927772434",
"46.4007112105667",
"55.45791927899421",
"61.885417960787954",
"64.86375119533862",
"67.19394067025405",
"75.76907548917492",
"81.87404990649867",
"81.1818520505565",
"80.98305106464036",
"80.83297827166932",
"83.93661422047697",
"85.6095444708237",
"89.23965042215868",
"89.84359267516689",
"89.44234040578247",
"86.29713184597817",
"84.14653512763354",
"85.85354219045774",
"86.37135770818291",
"84.38428290705407",
"82.02441176812843",
"84.64092261778138",
"86.5350294786016",
"90.11150979233378",
"94.01506922679053",
"97.23877392379195",
"98.16902276428065",
"0.7622497246229948",
"10.145351794935838",
"14.648348868798864",
"16.955065919682887",
"26.17241878787025",
"30.872516567958005",
"34.94419297683579",
"40.85312209750345",
"48.595339705948994",
"56.92946264953949",
"61.310752069374615",
"64.32916862701296",
"68.6515021857355",
"70.59422485051316",
"69.89225924211136",
"69.28052662587197",
"69.27000966040119",
"72.32225312035199",
"73.3872488711495",
"77.0475875506685",
"78.17171791537592",
"80.58168362392253",
"83.17061809222709",
"79.52001606986326",
"81.43654693996238",
"82.15609731670513",
"80.67629975365531",
"80.17878167020622",
"82.54058382456478",
"85.48101958763442",
"88.93611556311927",
"92.48182091824273",
"95.3663779380643",
"95.84427495702981",
"-0.2631493834957937",
"8.837498509948958",
"15.179067212200483",
"17.701081199051877",
"23.449286559556878",
"26.48414802981159",
"32.363666166618806",
"39.30331057269841",
"43.578180215254775",
"49.43582307495616",
"55.19511485294549",
"56.093634755754216",
"56.91173027981305",
"56.49787245605454",
"57.043061993307134",
"58.386351795791775",
"58.60804490632406",
"61.16361890436565",
"63.69519018745116",
"66.75231368388025",
"70.72585990962664",
"71.69401617737682",
"74.47488890536704",
"74.82112242241006",
"74.90638876631544",
"76.99987876110502",
"76.71783569484211",
"78.59930904127312",
"82.01803031862157",
"84.90680311608368",
"87.98795287720989",
"90.989138848023",
"93.41506208124913",
"93.31095097611677",
"-0.9014013038925366",
"5.606105927998476",
"8.968307109215885",
"13.172920515083264",
"19.21625390811881",
"22.35232374693543",
"27.982862268646443",
"36.40212560860543",
"37.92931202635785",
"42.79475879924985",
"48.24593827372131",
"48.01346777329906",
"45.74198949744935",
"46.560114408566825",
"48.73248035488485",
"49.05237024408965",
"48.654509817233034",
"51.7969168176192",
"56.556483720250384",
"63.27332677275956",
"68.03203540678429",
"69.54341268395851",
"71.6354047274178",
"73.39624054236164",
"74.26930780298771",
"75.38503058855264",
"75.73730735015744",
"77.68405724480323",
"81.62299577437649",
"84.45519189504813",
"87.08301987351993",
"89.46550705020135",
"91.36516457255485",
"90.6091902710927",
"-1.193401225675339",
"4.7194307015090295",
"6.1582742141684195",
"13.598608471698652",
"19.409347886057496",
"18.871912988672676",
"26.061417403091887",
"32.754532091322986",
"34.390238139812894",
"38.59924613282907",
"41.02370487062378",
"42.35705299257019",
"42.61539520902655",
"44.97692980942836",
"46.58528155482053",
"44.55118126350949",
"42.59703229528825",
"47.77357381243934",
"52.99307516954724",
"59.23029234222547",
"64.33682657367562",
"69.39733788815438",
"73.37461298733876",
"71.60746458245222",
"72.10343470209128",
"75.02123121715442",
"77.22735650844814",
"77.93981572296265",
"81.36462163681443",
"83.87277556973892",
"86.0305841681746",
"87.79454812243851",
"89.15879281874818",
"87.76561342536628",
"-1.6902526676185854",
"5.591331728219388",
"10.150837965365374",
"12.469566204363113",
"15.822979113625832",
"22.51595223807594",
"24.39162715144879",
"27.90391104396376",
"31.12022623209653",
"34.11064394772934",
"35.817693441783696",
"37.79270393325347",
"37.15612317056881",
"38.54747379135644",
"37.768851062299326",
"40.81166853536552",
"42.507962204504054",
"44.18716037248709",
"48.229227677819516",
"61.05104628768841",
"67.11225289211804",
"71.74661551548482",
"76.93837384095485",
"75.29418876923302",
"76.1214469634111",
"78.04137700660067",
"76.04785876914313",
"76.53002452994095",
"80.65463787452498",
"82.84099073984775",
"84.50128331775508",
"85.69995576772625",
"86.59890544264923",
"84.7153077170198",
"-2.1268579731609343",
"6.825498931560021",
"9.508100710092696",
"12.176389928366563",
"15.191788664164319",
"20.20305376087",
"22.846867304807816",
"24.65397818504794",
"26.031854307257742",
"29.346523848806903",
"31.585515769347335",
"31.002382436249622",
"30.982463054982784",
"35.76502640637854",
"36.789337966355596",
"37.5379441415702",
"39.4941526622382",
"43.872080890447435",
"48.8595274140996",
"58.59935971509011",
"63.64430016332763",
"67.61334494180835",
"73.22535286617702",
"76.58347244177472",
"76.48125630189155",
"77.47399502264918",
"78.40413040921696",
"77.99392963243871",
"79.82040770028325",
"81.46236343546163",
"82.63825007776701",
"83.37064180564067",
"83.91575495185106",
"81.72946971582576",
"-2.9589342896335533",
"7.4923628177534125",
"7.366464424577311",
"10.78169916104005",
"15.54066003742132",
"15.098731075517636",
"19.43051950620253",
"24.091379773703796",
"24.419276241578025",
"25.331412700394946",
"29.419750911347688",
"29.235891714201607",
"30.98273056329845",
"35.3162065558623",
"36.49516664723568",
"33.70945067795281",
"35.27591783750032",
"41.45570843493861",
"48.60210313581975",
"56.001626063627306",
"62.64280621603272",
"66.03154413141455",
"69.52972637180316",
"74.12764374957487",
"74.6683778166432",
"74.98709821478782",
"76.32106154640506",
"77.17532032821889",
"78.4839357010033",
"79.60894514354051",
"80.39425135053283",
"80.80551787574791",
"81.12567127725566",
"78.83613928984283",
"-3.3003009984381135",
"7.3206885748945725",
"8.250232198168264",
"11.059214660216199",
"14.04434844291037",
"15.257978985059456",
"18.39590417316893",
"22.613820797725825",
"24.47861652409641",
"25.892059032646696",
"28.6928872635443",
"30.349343849958515",
"31.80474704656206",
"33.327848183389946",
"34.40961718379498",
"35.49372363141536",
"38.260613516369716",
"43.240009930357026",
"49.64328685158372",
"56.272283339440534",
"62.2581550218691",
"66.8431540820915",
"70.18477281405967",
"72.73739042463214",
"74.38088161445047",
"75.4964082123291",
"76.29474399991388",
"77.10764429949825",
"78.00639842085587",
"78.74713894675355",
"79.28306939263481",
"79.52030023440179",
"79.73305825225668",
"77.41780471326928",
"-11.806863266530803",
"-1.1515412043384559",
"0.17260435553505427",
"2.548292912542326",
"4.650562390699587",
"7.01885392120895",
"9.634390835319738",
"12.432234263269843",
"15.196020054061108",
"17.746700496649247",
"20.095816311630003",
"22.324649608799586",
"24.511228685961726",
"26.719490874398343",
"29.07175044218383",
"31.816095014636705",
"35.122154867406344",
"39.207937193524764",
"43.91303342564869",
"48.66202900673857",
"53.06534336817004",
"56.732288798033046",
"59.56233780727515",
"61.77031599542492",
"63.469056914464296",
"64.75290314946922",
"65.71769772598769",
"66.46432346218911",
"67.06692018724564",
"67.58152320908655",
"68.03046725686066",
"68.28893625083548",
"68.51860056059424",
"66.92700036052011",
],
"UseCyclicTransform": ["false"],
"UseDirectionCosines": ["true"],
},
],
}
@pytest.fixture
def simple_transform_affine_large_output():
return {
"initial": [],
"0": [
{
"CenterOfRotationPoint": ["7010", "9005.5"],
"CompressResultImage": ["true"],
"DefaultPixelValue": ["0"],
"Direction": ["1", "0", "0", "1"],
"FixedImageDimension": ["2"],
"FixedInternalImagePixelType": ["float"],
"HowToCombineTransforms": ["Compose"],
"Index": ["0", "0"],
"InitialTransformParametersFileName": ["NoInitialTransform"],
"MovingImageDimension": ["2"],
"MovingInternalImagePixelType": ["float"],
"NumberOfParameters": ["6"],
"Origin": ["0", "0"],
"ResampleInterpolator": ["FinalNearestNeighborInterpolator"],
"Resampler": ["DefaultResampler"],
"ResultImageFormat": ["mha"],
"ResultImagePixelType": ["float"],
"Size": ["14021", "18012"],
"Spacing": ["1", "1"],
"Transform": ["AffineTransform"],
"TransformParameters": [
"0.9737116526894503",
"-0.02360806278113504",
"0.0040156561848048705",
"1.019349427711173",
"-260.6092127223852",
"-2498.1235259464324",
],
"UseDirectionCosines": ["true"],
}
],
}
@pytest.fixture
def simple_transform_affine_nl_large_output():
return pickle.load(open("./fixtures/large_nl_tform.pkl", "rb"))
| 99,567 | 40.923368 | 77 | py |
wsireg | wsireg-master/tests/fixtures/__init__.py | 0 | 0 | 0 | py | |
wsireg | wsireg-master/docs/conf.py | #!/usr/bin/env python
#
# wsireg documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import wsireg
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
"sphinx_rtd_theme",
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'm2r2',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
autosectionlabel_prefix_document = True
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'wsireg'
copyright = "2020-2023, Nathan Heath Patterson"
author = "Nathan Heath Patterson"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = wsireg.__version__
# The full version, including alpha/beta/rc tags.
release = wsireg.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'wsiregdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(
master_doc,
'wsireg.tex',
'wsireg Documentation',
'Nathan Heath Patterson',
'manual',
),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'wsireg', 'wsireg Documentation', [author], 1)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
'wsireg',
'wsireg Documentation',
author,
'wsireg',
'One line description of project.',
'Miscellaneous',
),
]
| 4,974 | 28.790419 | 77 | py |
AT-on-AD | AT-on-AD-main/test_fmnist.py | import argparse
import logging
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class MnistDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==1 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
args = parse_args()
folder_main = 'record_fmnist'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'test'
setup_logger(task_name, os.path.join(folder_sub, f'test.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_fmnist', f'data_R_1.pth'))
X_test, y_test = data['X_test'], data['y_test']
test_set = MnistDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 784
num_classes = 2
model = MyLR(input_size, num_classes)
model.load_state_dict(torch.load(osp.join(folder_sub, 'model.pth')))
model.cuda()
model.eval()
criterion = nn.CrossEntropyLoss()
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 784).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
| 3,685 | 28.023622 | 182 | py |
AT-on-AD | AT-on-AD-main/test_cifar_vgg.py | import argparse
import logging
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class CifarNet(nn.Module):
def __init__(self, vgg_name):
super(CifarNet, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 2)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
class CifarDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==3 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--hidden', type=int, default=200)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
args = parse_args()
folder_main = 'record_cifar'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}_vgg')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}_vgg')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'test'
setup_logger(task_name, os.path.join(folder_sub, f'test.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_cifar', f'data_R_1.pth'))
X_test, y_test = data['X_test'], data['y_test']
test_set = CifarDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 3072
num_classes = 2
model = CifarNet(vgg_name='VGG11')
model.load_state_dict(torch.load(osp.join(folder_sub, 'model.pth')))
model.cuda()
model.eval()
criterion = nn.CrossEntropyLoss()
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
| 4,759 | 29.709677 | 186 | py |
AT-on-AD | AT-on-AD-main/test_syn.py | import argparse
import logging
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class Dataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], self.y[index]
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--data-type', type=str, default='syn',
choices=['syn', 'cauchy', 'cauchy_2', 'levy_1.5'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
args = parse_args()
if args.data_type == 'syn':
folder_main = 'record'
else:
folder_main = f'record_{args.data_type}'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'test'
setup_logger(task_name, os.path.join(folder_sub, f'test.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join(f'data_{args.data_type}', f'data_R_1.pth'))
X_test, y_test = data['X_test'], data['y_test']
test_set = Dataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 100
num_classes = 2
model = MyLR(input_size, num_classes)
model.load_state_dict(torch.load(osp.join(folder_sub, 'model.pth')))
model.cuda()
model.eval()
criterion = nn.CrossEntropyLoss()
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
| 3,879 | 28.846154 | 182 | py |
AT-on-AD | AT-on-AD-main/train_syn.py | import argparse
import copy
import logging
import numpy as np
import os
import os.path as osp
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from advertorch.attacks.one_step_gradient import GradientSignAttack, GradientAttack
from advertorch.attacks.iterative_projected_gradient import LinfPGDAttack, L2PGDAttack
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class Dataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], self.y[index]
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--data-type', type=str, default='syn',
choices=['syn', 'cauchy', 'cauchy_2', 'levy_1.5'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
# TODO:
# 5. may add support for lr scheduler
# 1. set up logging
args = parse_args()
if args.data_type == 'syn':
folder_main = 'record'
else:
folder_main = f'record_{args.data_type}'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'train'
setup_logger(task_name, os.path.join(folder_sub, f'train.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join(f'data_{args.data_type}', f'data_R_{args.R}.pth'))
X_train, X_val, X_test, y_train, y_val, y_test = data['X_train'], data['X_valid'], data['X_test'], data['y_train'], data['y_valid'], data['y_test']
train_set = Dataset(X_train, y_train)
val_set = Dataset(X_val, y_val)
test_set = Dataset(X_test, y_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.bs, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 100
num_classes = 2
model = MyLR(input_size, num_classes).cuda()
criterion = nn.CrossEntropyLoss()
# optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
losses = []
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
if args.adv:
if args.norm_type == 'linf':
adversary = GradientSignAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=None, clip_max=None)
elif args.norm_type == 'l2':
adversary = GradientAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=None, clip_max=None)
best_val_loss = 1e10
best_model = None
t = time.time()
for epoch in (pbar := tqdm(range(args.n_epochs))):
# 1. training
model.train()
for i, (X, y) in enumerate(train_loader):
X = X.cuda()
y = y.cuda()
optimizer.zero_grad()
if args.adv:
X_adv = adversary.perturb(X, y)
output = model(X_adv)
else:
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
losses.append(loss.item())
# if (i+1) % 50 == 0:
# print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
# % (epoch+1, args.n_epochs, i+1, len(train_set)//batch_size, loss.item()))
# 2. validation
model.eval()
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
pbar.set_description(f"val_loss = {val_loss : .4f}, val_acc = {val_correct / val_total : .4f}")
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
best_epoch = epoch
if epoch - best_epoch == 50:
break
logger.info(f'training finishes using {time.time() - t} seconds!')
model = copy.deepcopy(best_model)
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
logger.info(f'val_correct = {val_correct}, val_correct_class = {val_correct_class}, val_total = {val_total}, val_loss = {val_loss}')
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
model.cpu()
model_path = osp.join(folder_sub, 'model.pth')
torch.save(model.state_dict(), model_path)
torch.save(losses, osp.join(folder_sub, 'train_losses.pth'))
logger.info(f'model at epoch {best_epoch} saved to {model_path}') | 6,544 | 29.584112 | 182 | py |
AT-on-AD | AT-on-AD-main/process_data.py | import argparse
import numpy as np
import os
import os.path as osp
import torch
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='syn',
choices=['syn', 'mnist', 'cifar', 'fmnist', 'cauchy', 'cauchy_2', 'levy_1.5'])
return parser.parse_args()
def parse(s, mode='val'):
# print(s)
s1 = f'{mode}_correct = '
s2 = f'{mode}_correct_class = ['
s3 = f'{mode}_total = '
s4 = f'{mode}_loss = '
p1 = s.find(s1)
p2 = s.find(s2)
p3 = s.find(s3)
p4 = s.find(s4)
# print(p1, p2, p3, p4)
val1 = int(s[p1+len(s1): p2-2])
s_sub = s[p2+len(s2): p3-3].split(',')
val2 = int(s_sub[0])
val3 = int(s_sub[1])
val4 = int(s[p3+len(s3): p4-2])
val5 = float(s[p4+len(s4):])
return (val1, val2, val3, val4), val5
epoch = 500
bs = 64
R_list = [1,2,5,10]
seed_list = [2,22,42,62,82]
lr_std_list = [0.1, 0.05, 0.01, 0.005, 0.001,0.0005,0.0002,0.0001]
lr_adv_list = [0.1, 0.05, 0.01, 0.005, 0.001,0.0005,0.0002,0.0001]
total_imb_class = np.asarray([[1000,1000],[1000,500],[1000,200],[1000,100]])
total_b_class = np.asarray([[1000,1000],[1000,1000],[1000,1000],[1000,1000]])
total_all = np.asarray([2000, 1500, 1200, 1100])
normtype_list = ['l2', 'linf']
args = parse_args()
if args.dataset == 'syn':
suffix = ''
norm_scale_list = {'l2': [5.0, 3.75, 2.5], 'linf': [1.0, 0.75, 0.5]} # synthetic dataset
elif args.dataset == 'cauchy':
suffix = '_cauchy'
norm_scale_list = {'l2': [23.425, 17.5688, 11.7125], 'linf': [14.403, 10.8023, 7.2015]} # synthetic dataset
elif args.dataset == 'cauchy_2':
suffix = '_cauchy_2'
norm_scale_list = {'l2': [70.0, 52.5, 35.0], 'linf': [53.0, 39.75, 26.5]}
elif args.dataset == 'levy_1.5':
suffix = '_levy_1.5'
norm_scale_list = {'l2': [2.94, 2.205, 1.47], 'linf': [0.555, 0.4163, 0.2775]}
elif args.dataset == 'mnist':
suffix = '_mnist'
norm_scale_list = {'l2': [2.6956,2.0217,1.3478], 'linf': [0.4341, 0.3256, 0.2171]} #
elif args.dataset == 'cifar':
suffix = '_cifar'
# norm_scale_list = {'l2': [0.7822, 0.5867, 0.3911, ], 'linf': [ 0.0305, 0.0228, 0.0152, ]}
# norm_scale_list = {'l2': [2.3468, 1.7601, 1.1734, ], 'linf': [ 0.0305, 0.0228, 0.0152, ]}
# norm_scale_list = {'l2': [1.7601, 1.1734, 0.7822], 'linf': [ 0.0305, 0.0228, 0.0152, ]}
# norm_scale_list = {'l2': [1.1734, 0.9778, 0.7822], 'linf': [ 0.0305, 0.0228, 0.0152, ]}
norm_scale_list = {'l2': [0.9778, 0.8800, 0.7822], 'linf': [ 0.0305, 0.0228, 0.0152, ]}
lr_adv_list = [0.005, 0.001,0.0005,0.0002,0.0001]
elif args.dataset == 'fmnist':
suffix = '_fmnist'
norm_scale_list = {'l2': [3.2306,2.4230,1.6153], 'linf': [0.2889,0.2166,0.1444]}
total_imb_class = np.asarray([[1000,1000],[500,1000],[200,1000],[100,1000]])
else:
raise NotImplementedError(f'dataset = {args.dataset} not implemented!')
# 1. collect raw data for std training
len_lr_std = len(lr_std_list)
len_lr_adv = len(lr_adv_list)
val_result = np.zeros((4,5,len_lr_std,4), dtype=np.int64)
test_imb_result = np.zeros((4,5,len_lr_std,4), dtype=np.int64)
test_b_result = np.zeros((4,5,len_lr_std,4), dtype=np.int64)
val_loss = np.zeros((4,5,len_lr_std))
test_imb_loss = np.zeros((4,5,len_lr_std))
test_b_loss = np.zeros((4,5,len_lr_std))
for i, R in enumerate(R_list):
for k, seed in enumerate(seed_list):
for j, lr in enumerate(lr_std_list):
file_name = osp.join(osp.join(f'record{suffix}', f'train-std_R-{R}_lr-{lr}_epoch-{epoch}_bs-{bs}_seed-{seed}'), 'train.log')
print(i,j,k, file_name)
with open(file_name) as f:
lines = f.readlines()
r1, r2 = parse(lines[-3], 'val')
val_result[i][k][j] = r1
val_loss[i][k][j] = r2
r1, r2 = parse(lines[-2], 'test')
test_imb_result[i][k][j] = r1
test_imb_loss[i][k][j] = r2
file_name = osp.join(osp.join(f'record{suffix}', f'train-std_R-{R}_lr-{lr}_epoch-{epoch}_bs-{bs}_seed-{seed}'), 'test.log')
with open(file_name) as f:
lines = f.readlines()
r1, r2 = parse(lines[-1], 'test')
test_b_result[i][k][j] = r1
test_b_loss[i][k][j] = r2
indmin = np.argmin(val_loss, axis=-1)
def get_std_accs(indmin, test_result, total_class):
acc_class = np.zeros((4,5,2))
correct_class = np.zeros((4,5,2), dtype=np.int64)
acc = np.zeros((4, 5))
for k in range(4):
for j in range(5):
v = indmin[k][j]
print(test_result[k][j][v])
print(test_result[k][j][v][1:-1] / total_class[k])
print(test_result[k][j][v][0] / total_all[k])
correct_class[k][j] = test_result[k][j][v][1:-1]
acc_class[k][j] = test_result[k][j][v][1:-1] / total_class[k]
acc[k][j] = test_result[k][j][v][0] / total_all[k]
return acc_class, correct_class, acc
acc_imb_class, correct_imb_class, acc_imb = get_std_accs(indmin, test_imb_result, total_imb_class)
acc_b_class, correct_b_class, acc_b = get_std_accs(indmin, test_b_result, total_b_class)
# 2. adversarial training
adv_val_result = np.zeros((2,3,4,5,len_lr_adv,4), dtype=np.int64)
adv_test_imb_result = np.zeros((2,3,4,5,len_lr_adv,4), dtype=np.int64)
adv_test_b_result = np.zeros((2,3,4,5,len_lr_adv,4), dtype=np.int64)
adv_val_loss = np.zeros((2,3,4,5,len_lr_adv))
adv_test_imb_loss = np.zeros((2,3,4,5,len_lr_adv))
adv_test_b_loss = np.zeros((2,3,4,5,len_lr_adv))
for k, normtype in enumerate(normtype_list):
for l, normscale in enumerate(norm_scale_list[normtype]):
for i, R in enumerate(R_list):
for p, seed in enumerate(seed_list):
for j, lr in enumerate(lr_adv_list):
print(k,l,i,p,j)
file_name = osp.join(osp.join(f'record{suffix}', f'train-adv_R-{R}_lr-{lr}_normtype-{normtype}_normscale-{normscale}_epoch-{epoch}_bs-{bs}_seed-{seed}'), 'train.log')
print(file_name)
with open(file_name) as f:
lines = f.readlines()
r1, r2 = parse(lines[-3], 'val')
adv_val_result[k][l][i][p][j] = r1
adv_val_loss[k][l][i][p][j] = r2
r1, r2 = parse(lines[-2], 'test')
adv_test_imb_result[k][l][i][p][j] = r1
adv_test_imb_loss[k][l][i][p][j] = r2
file_name = osp.join(osp.join(f'record{suffix}', f'train-adv_R-{R}_lr-{lr}_normtype-{normtype}_normscale-{normscale}_epoch-{epoch}_bs-{bs}_seed-{seed}'), 'test.log')
print(file_name)
with open(file_name) as f:
lines = f.readlines()
r1, r2 = parse(lines[-1], 'test')
adv_test_b_result[k][l][i][p][j] = r1
adv_test_b_loss[k][l][i][p][j] = r2
indmin_adv = np.argmin(adv_val_loss, axis=-1)
def get_adv_accs(indmin_adv, adv_test_result, total_class):
adv_acc_class = np.zeros((2,3,4,5,2))
adv_correct_class = np.zeros((2,3,4,5,2), dtype=np.int64)
adv_acc = np.zeros((2,3,4,5))
for i in range(2):
for j in range(3):
for k in range(4):
for l in range(5):
v = indmin_adv[i][j][k][l]
print(adv_test_result[i][j][k][l][v])
print(adv_test_result[i][j][k][l][v][1:-1] / total_class[k])
print(adv_test_result[i][j][k][l][v][0] / total_all[k])
adv_correct_class[i][j][k][l] = adv_test_result[i][j][k][l][v][1:-1]
adv_acc_class[i][j][k][l] = adv_test_result[i][j][k][l][v][1:-1] / total_class[k]
adv_acc[i][j][k][l] = adv_test_result[i][j][k][l][v][0] / total_all[k]
return adv_acc_class, adv_correct_class, adv_acc
adv_acc_imb_class, adv_correct_imb_class, adv_acc_imb = get_adv_accs(indmin_adv, adv_test_imb_result, total_imb_class)
adv_acc_b_class, adv_correct_b_class, adv_acc_b = get_adv_accs(indmin_adv, adv_test_b_result, total_b_class)
# 3. compute AD
ad_class = acc_b_class[:,:,0] - acc_b_class[:,:,1]
adv_ad_class = adv_acc_b_class[:,:,:,:,0] - adv_acc_b_class[:,:,:,:,1]
if args.dataset == 'fmnist':
ad_class = -ad_class
adv_ad_class = -adv_ad_class
# diff = np.abs(ad_class - adv_ad_class)
diff = adv_ad_class - ad_class
diff_ave = np.mean(diff, axis=-1)
diff_std = np.std(diff, axis=-1) / np.sqrt(5)
print('diff_ave', diff_ave)
print('diff_std', diff_std)
# 4. compute ACC diff
diff_acc = acc_imb - adv_acc_imb
diff_acc_ave = np.mean(diff_acc, axis=-1)
diff_acc_std = np.std(diff_acc, axis=-1) / np.sqrt(5)
# 4. write to disk
write_dir = f'result{suffix}'
if not osp.exists(write_dir):
os.makedirs(write_dir)
write_path = osp.join(write_dir, 'all_result.pth')
torch.save({
'val_result': val_result,
'val_loss': val_loss,
'test_imb_result': test_imb_result,
'test_b_result': test_b_result,
'test_imb_loss': test_imb_loss,
'test_b_loss': test_b_loss,
'adv_val_result': adv_val_result,
'adv_val_loss': adv_val_loss,
'adv_test_imb_result': adv_test_imb_result,
'adv_test_b_result': adv_test_b_result,
'adv_test_imb_loss': adv_test_imb_loss,
'adv_test_b_loss': adv_test_b_loss,
'correct_imb_class': correct_imb_class,
'correct_b_class': correct_b_class,
'acc_imb_class': acc_imb_class,
'acc_b_class': acc_b_class,
'acc_imb': acc_imb,
'acc_b': acc_b,
'ad_class': ad_class,
'adv_correct_imb_class': adv_correct_imb_class,
'adv_correct_b_class': adv_correct_b_class,
'adv_acc_imb_class': adv_acc_imb_class,
'adv_acc_b_class': adv_acc_b_class,
'adv_acc_imb': adv_acc_imb,
'adv_acc_b': adv_acc_b,
'adv_ad_class': adv_ad_class,
'indmin': indmin,
'indmin_adv': indmin_adv,
'diff': diff,
'diff_ave': diff_ave,
'diff_std': diff_std,
'diff_acc': diff_acc,
'diff_acc_ave': diff_acc_ave,
'diff_acc_std': diff_acc_std,
}, write_path)
| 10,126 | 33.56314 | 186 | py |
AT-on-AD | AT-on-AD-main/test_cifar.py | import argparse
import logging
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
class CifarLR(nn.Module):
def __init__(self, input_size, hidden=200, num_classes=2):
super(CifarLR, self).__init__()
self.fc1 = nn.Linear(input_size, hidden)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden, num_classes)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class CifarDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==3 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--hidden', type=int, default=200)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
args = parse_args()
folder_main = 'record_cifar'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'test'
setup_logger(task_name, os.path.join(folder_sub, f'test.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_cifar', f'data_R_1.pth'))
X_test, y_test = data['X_test'], data['y_test']
test_set = CifarDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 3072
num_classes = 2
model = CifarLR(input_size, hidden=args.hidden, num_classes=num_classes)
model.load_state_dict(torch.load(osp.join(folder_sub, 'model.pth')))
model.cuda()
model.eval()
criterion = nn.CrossEntropyLoss()
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 3072).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
| 3,914 | 28.43609 | 182 | py |
AT-on-AD | AT-on-AD-main/test_mnist.py | import argparse
import logging
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class MnistDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==1 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
args = parse_args()
folder_main = 'record_mnist'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'test'
setup_logger(task_name, os.path.join(folder_sub, f'test.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_mnist', f'data_R_1.pth'))
X_test, y_test = data['X_test'], data['y_test']
test_set = MnistDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 784
num_classes = 2
model = MyLR(input_size, num_classes)
model.load_state_dict(torch.load(osp.join(folder_sub, 'model.pth')))
model.cuda()
model.eval()
criterion = nn.CrossEntropyLoss()
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 784).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
| 3,683 | 28.007874 | 182 | py |
AT-on-AD | AT-on-AD-main/train_fmnist.py | import argparse
import copy
import logging
import numpy as np
import os
import os.path as osp
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision import transforms
from advertorch.attacks.one_step_gradient import GradientSignAttack, GradientAttack
from advertorch.attacks.iterative_projected_gradient import LinfPGDAttack, L2PGDAttack
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class MnistDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==1 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
# TODO:
# 1. change attack to PGD
# 1. set up logging
args = parse_args()
folder_main = 'record_fmnist'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'train'
setup_logger(task_name, os.path.join(folder_sub, f'train.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_fmnist', f'data_R_{args.R}.pth'))
X_train, X_val, X_test, y_train, y_val, y_test = data['X_train'], data['X_valid'], data['X_test'], data['y_train'], data['y_valid'], data['y_test']
train_set = MnistDataset(X_train, y_train)
val_set = MnistDataset(X_val, y_val)
test_set = MnistDataset(X_test, y_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.bs, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 784
num_classes = 2
model = MyLR(input_size, num_classes).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
losses = []
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 784).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
if args.adv:
if args.norm_type == 'linf':
adversary = GradientSignAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
elif args.norm_type == 'l2':
adversary = GradientAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
# if args.adv:
# if args.norm_type == 'linf':
# adversary = LinfPGDAttack(
# model, loss_fn=criterion, eps=args.norm_scale,
# nb_iter=40, eps_iter=args.norm_scale/40*4/3, rand_init=True, clip_min=0.0, clip_max=1.0,
# targeted=False)
# elif args.norm_type == 'l2':
# adversary = L2PGDAttack(
# model, loss_fn=criterion, eps=args.norm_scale,
# nb_iter=50, eps_iter=args.norm_scale/50*5/4, rand_init=True, clip_min=0.0, clip_max=1.0,
# targeted=False)
best_val_loss = 1e10
best_model = None
best_epoch = -1
t = time.time()
for epoch in (pbar := tqdm(range(args.n_epochs))):
# 1. training
model.train()
for i, (X, y) in enumerate(train_loader):
X = X.view(-1, 784).cuda()
y = y.cuda()
optimizer.zero_grad()
if args.adv:
X_adv = adversary.perturb(X, y)
output = model(X_adv)
else:
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
losses.append(loss.item())
# if (i+1) % 50 == 0:
# print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
# % (epoch+1, args.n_epochs, i+1, len(train_set)//batch_size, loss.item()))
# 2. validation
model.eval()
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
pbar.set_description(f"val_loss = {val_loss : .4f}, val_acc = {val_correct / val_total : .4f}")
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
best_epoch = epoch
if epoch - best_epoch == 50:
break
logger.info(f'training finishes using {time.time() - t} seconds!')
model = copy.deepcopy(best_model)
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
logger.info(f'val_correct = {val_correct}, val_correct_class = {val_correct_class}, val_total = {val_total}, val_loss = {val_loss}')
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
model.cpu()
model_path = osp.join(folder_sub, 'model.pth')
torch.save(model.state_dict(), model_path)
torch.save(losses, osp.join(folder_sub, 'train_losses.pth'))
logger.info(f'model at epoch {best_epoch} saved to {model_path}') | 6,889 | 30.176471 | 182 | py |
AT-on-AD | AT-on-AD-main/train_cifar_vgg.py | import argparse
import copy
import logging
import numpy as np
import os
import os.path as osp
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision import transforms
from advertorch.attacks.one_step_gradient import GradientSignAttack, GradientAttack
from advertorch.attacks.iterative_projected_gradient import LinfPGDAttack, L2PGDAttack
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class CifarNet(nn.Module):
def __init__(self, vgg_name):
super(CifarNet, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 2)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
class CifarDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==3 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--hidden', type=int, default=200)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
# TODO:
# 1. change attack to PGD
# 1. set up logging
args = parse_args()
folder_main = 'record_cifar'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}_vgg')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}_vgg')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'train'
setup_logger(task_name, os.path.join(folder_sub, f'train.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_cifar', f'data_R_{args.R}.pth'))
X_train, X_val, X_test, y_train, y_val, y_test = data['X_train'], data['X_valid'], data['X_test'], data['y_train'], data['y_valid'], data['y_test']
train_set = CifarDataset(X_train, y_train)
val_set = CifarDataset(X_val, y_val)
test_set = CifarDataset(X_test, y_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.bs, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 3072
num_classes = 2
model = CifarNet(vgg_name='VGG11').cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
losses = []
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
# if args.adv:
# if args.norm_type == 'linf':
# adversary = GradientSignAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
# elif args.norm_type == 'l2':
# adversary = GradientAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
if args.adv:
if args.norm_type == 'linf':
adversary = LinfPGDAttack(
model, loss_fn=criterion, eps=args.norm_scale,
nb_iter=40, eps_iter=args.norm_scale/40*4/3, rand_init=True, clip_min=0.0, clip_max=1.0,
targeted=False)
elif args.norm_type == 'l2':
adversary = L2PGDAttack(
model, loss_fn=criterion, eps=args.norm_scale,
nb_iter=50, eps_iter=args.norm_scale/50*2.5, rand_init=True, clip_min=0.0, clip_max=1.0,
targeted=False)
best_val_loss = 1e10
best_model = None
best_epoch = -1
t = time.time()
for epoch in (pbar := tqdm(range(args.n_epochs))):
# 1. training
model.train()
for i, (X, y) in enumerate(train_loader):
X = X.cuda()
y = y.cuda()
optimizer.zero_grad()
if args.adv:
X_adv = adversary.perturb(X, y)
output = model(X_adv)
else:
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
losses.append(loss.item())
# if (i+1) % 50 == 0:
# print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
# % (epoch+1, args.n_epochs, i+1, len(train_set)//batch_size, loss.item()))
# 2. validation
model.eval()
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
pbar.set_description(f"val_loss = {val_loss : .4f}, val_acc = {val_correct / val_total : .4f}")
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
best_epoch = epoch
if epoch - best_epoch == 50:
break
logger.info(f'training finishes using {time.time() - t} seconds!')
model = copy.deepcopy(best_model)
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
logger.info(f'val_correct = {val_correct}, val_correct_class = {val_correct_class}, val_total = {val_total}, val_loss = {val_loss}')
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
model.cpu()
model_path = osp.join(folder_sub, 'model.pth')
torch.save(model.state_dict(), model_path)
torch.save(losses, osp.join(folder_sub, 'train_losses.pth'))
logger.info(f'model at epoch {best_epoch} saved to {model_path}') | 7,950 | 31.060484 | 186 | py |
AT-on-AD | AT-on-AD-main/train_cifar.py | import argparse
import copy
import logging
import numpy as np
import os
import os.path as osp
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision import transforms
from advertorch.attacks.one_step_gradient import GradientSignAttack, GradientAttack
from advertorch.attacks.iterative_projected_gradient import LinfPGDAttack, L2PGDAttack
class CifarLR(nn.Module):
def __init__(self, input_size, hidden=200, num_classes=2):
super(CifarLR, self).__init__()
self.fc1 = nn.Linear(input_size, hidden)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden, num_classes)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class CifarDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==3 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--hidden', type=int, default=200)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
# TODO:
# 1. change attack to PGD
# 1. set up logging
args = parse_args()
folder_main = 'record_cifar'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'train'
setup_logger(task_name, os.path.join(folder_sub, f'train.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_cifar', f'data_R_{args.R}.pth'))
X_train, X_val, X_test, y_train, y_val, y_test = data['X_train'], data['X_valid'], data['X_test'], data['y_train'], data['y_valid'], data['y_test']
train_set = CifarDataset(X_train, y_train)
val_set = CifarDataset(X_val, y_val)
test_set = CifarDataset(X_test, y_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.bs, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 3072
num_classes = 2
model = CifarLR(input_size, hidden=args.hidden, num_classes=num_classes).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
losses = []
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 3072).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
# if args.adv:
# if args.norm_type == 'linf':
# adversary = GradientSignAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
# elif args.norm_type == 'l2':
# adversary = GradientAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
if args.adv:
if args.norm_type == 'linf':
adversary = LinfPGDAttack(
model, loss_fn=criterion, eps=args.norm_scale,
nb_iter=40, eps_iter=args.norm_scale/40*4/3, rand_init=True, clip_min=0.0, clip_max=1.0,
targeted=False)
elif args.norm_type == 'l2':
adversary = L2PGDAttack(
model, loss_fn=criterion, eps=args.norm_scale,
nb_iter=50, eps_iter=args.norm_scale/50*2.5, rand_init=True, clip_min=0.0, clip_max=1.0,
targeted=False)
best_val_loss = 1e10
best_model = None
best_epoch = -1
t = time.time()
for epoch in (pbar := tqdm(range(args.n_epochs))):
# 1. training
model.train()
for i, (X, y) in enumerate(train_loader):
X = X.view(-1, 3072).cuda()
y = y.cuda()
optimizer.zero_grad()
if args.adv:
X_adv = adversary.perturb(X, y)
output = model(X_adv)
else:
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
losses.append(loss.item())
# if (i+1) % 50 == 0:
# print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
# % (epoch+1, args.n_epochs, i+1, len(train_set)//batch_size, loss.item()))
# 2. validation
model.eval()
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
pbar.set_description(f"val_loss = {val_loss : .4f}, val_acc = {val_correct / val_total : .4f}")
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
best_epoch = epoch
if epoch - best_epoch == 50:
break
logger.info(f'training finishes using {time.time() - t} seconds!')
model = copy.deepcopy(best_model)
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
logger.info(f'val_correct = {val_correct}, val_correct_class = {val_correct_class}, val_total = {val_total}, val_loss = {val_loss}')
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
model.cpu()
model_path = osp.join(folder_sub, 'model.pth')
torch.save(model.state_dict(), model_path)
torch.save(losses, osp.join(folder_sub, 'train_losses.pth'))
logger.info(f'model at epoch {best_epoch} saved to {model_path}') | 7,120 | 30.50885 | 182 | py |
AT-on-AD | AT-on-AD-main/train_mnist.py | import argparse
import copy
import logging
import numpy as np
import os
import os.path as osp
import time
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision import transforms
from advertorch.attacks.one_step_gradient import GradientSignAttack, GradientAttack
from advertorch.attacks.iterative_projected_gradient import LinfPGDAttack, L2PGDAttack
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class MnistDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==1 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
# TODO:
# 1. change attack to PGD
# 1. set up logging
args = parse_args()
folder_main = 'record_mnist'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'train'
setup_logger(task_name, os.path.join(folder_sub, f'train.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_mnist', f'data_R_{args.R}.pth'))
X_train, X_val, X_test, y_train, y_val, y_test = data['X_train'], data['X_valid'], data['X_test'], data['y_train'], data['y_valid'], data['y_test']
train_set = MnistDataset(X_train, y_train)
val_set = MnistDataset(X_val, y_val)
test_set = MnistDataset(X_test, y_test)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.bs, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.bs, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 784
num_classes = 2
model = MyLR(input_size, num_classes).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
losses = []
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 784).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
if args.adv:
if args.norm_type == 'linf':
adversary = GradientSignAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
elif args.norm_type == 'l2':
adversary = GradientAttack(model, loss_fn=criterion, eps=args.norm_scale, clip_min=0.0, clip_max=1.0)
# if args.adv:
# if args.norm_type == 'linf':
# adversary = LinfPGDAttack(
# model, loss_fn=criterion, eps=args.norm_scale,
# nb_iter=40, eps_iter=args.norm_scale/40*4/3, rand_init=True, clip_min=0.0, clip_max=1.0,
# targeted=False)
# elif args.norm_type == 'l2':
# adversary = L2PGDAttack(
# model, loss_fn=criterion, eps=args.norm_scale,
# nb_iter=50, eps_iter=args.norm_scale/50*5/4, rand_init=True, clip_min=0.0, clip_max=1.0,
# targeted=False)
best_val_loss = 1e10
best_model = None
best_epoch = -1
t = time.time()
for epoch in (pbar := tqdm(range(args.n_epochs))):
# 1. training
model.train()
for i, (X, y) in enumerate(train_loader):
X = X.view(-1, 784).cuda()
y = y.cuda()
optimizer.zero_grad()
if args.adv:
X_adv = adversary.perturb(X, y)
output = model(X_adv)
else:
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
losses.append(loss.item())
# if (i+1) % 50 == 0:
# print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
# % (epoch+1, args.n_epochs, i+1, len(train_set)//batch_size, loss.item()))
# 2. validation
model.eval()
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
pbar.set_description(f"val_loss = {val_loss : .4f}, val_acc = {val_correct / val_total : .4f}")
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = copy.deepcopy(model)
best_epoch = epoch
if epoch - best_epoch == 50:
break
logger.info(f'training finishes using {time.time() - t} seconds!')
model = copy.deepcopy(best_model)
val_correct, val_correct_class, val_total, val_loss = test(val_loader)
logger.info(f'val_correct = {val_correct}, val_correct_class = {val_correct_class}, val_total = {val_total}, val_loss = {val_loss}')
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
model.cpu()
model_path = osp.join(folder_sub, 'model.pth')
torch.save(model.state_dict(), model_path)
torch.save(losses, osp.join(folder_sub, 'train_losses.pth'))
logger.info(f'model at epoch {best_epoch} saved to {model_path}') | 6,887 | 30.167421 | 182 | py |
aidgn | aidgn-main/domainbed/command_launchers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
A command launcher launches a list of commands on a cluster; implement your own
launcher to add support for your cluster. We've provided an example launcher
which runs all commands serially on the local machine.
"""
import subprocess
import time
import torch
def local_launcher(commands):
"""Launch commands serially on the local machine."""
for cmd in commands:
subprocess.call(cmd, shell=True)
def dummy_launcher(commands):
"""
Doesn't run anything; instead, prints each command.
Useful for testing.
"""
for cmd in commands:
print(f'Dummy launcher: {cmd}')
def multi_gpu_launcher(commands):
"""
Launch commands on the local machine, using all GPUs in parallel.
"""
print('WARNING: using experimental multi_gpu_launcher.')
n_gpus = torch.cuda.device_count()-4
procs_by_gpu = [None]*n_gpus
while len(commands) > 0:
for gpu_idx in range(n_gpus):
proc = procs_by_gpu[gpu_idx]
if (proc is None) or (proc.poll() is not None):
# Nothing is running on this GPU; launch a command.
cmd = commands.pop(0)
new_proc = subprocess.Popen(
f'CUDA_VISIBLE_DEVICES={gpu_idx+4} {cmd}', shell=True)
procs_by_gpu[gpu_idx] = new_proc
break
time.sleep(1)
# Wait for the last few tasks to finish before returning
for p in procs_by_gpu:
if p is not None:
p.wait()
REGISTRY = {
'local': local_launcher,
'dummy': dummy_launcher,
'multi_gpu': multi_gpu_launcher
}
try:
from domainbed import facebook
facebook.register_command_launchers(REGISTRY)
except ImportError:
pass
| 1,792 | 27.919355 | 79 | py |
aidgn | aidgn-main/domainbed/model_selection.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import numpy as np
def get_test_records(records):
"""Given records with a common test env, get the test records (i.e. the
records with *only* that single test env and no other test envs)"""
return records.filter(lambda r: len(r['args']['test_envs']) == 1)
class SelectionMethod:
"""Abstract class whose subclasses implement strategies for model
selection across hparams and timesteps."""
def __init__(self):
raise TypeError
@classmethod
def run_acc(self, run_records):
"""
Given records from a run, return a {val_acc, test_acc} dict representing
the best val-acc and corresponding test-acc for that run.
"""
raise NotImplementedError
@classmethod
def hparams_accs(self, records):
"""
Given all records from a single (dataset, algorithm, test env) pair,
return a sorted list of (run_acc, records) tuples.
"""
return (records.group('args.hparams_seed')
.map(lambda _, run_records:
(
self.run_acc(run_records),
run_records
)
).filter(lambda x: x[0] is not None)
.sorted(key=lambda x: x[0]['val_acc'])[::-1]
)
@classmethod
def sweep_acc(self, records):
"""
Given all records from a single (dataset, algorithm, test env) pair,
return the mean test acc of the k runs with the top val accs.
"""
_hparams_accs = self.hparams_accs(records)
if len(_hparams_accs):
return _hparams_accs[0][0]['test_acc']
else:
return None
class OracleSelectionMethod(SelectionMethod):
"""Like Selection method which picks argmax(test_out_acc) across all hparams
and checkpoints, but instead of taking the argmax over all
checkpoints, we pick the last checkpoint, i.e. no early stopping."""
name = "test-domain validation set (oracle)"
@classmethod
def run_acc(self, run_records):
run_records = run_records.filter(lambda r:
len(r['args']['test_envs']) == 1)
if not len(run_records):
return None
test_env = run_records[0]['args']['test_envs'][0]
test_out_acc_key = 'env{}_out_acc'.format(test_env)
test_in_acc_key = 'env{}_in_acc'.format(test_env)
chosen_record = run_records.sorted(lambda r: r['step'])[-1]
return {
'val_acc': chosen_record[test_out_acc_key],
'test_acc': chosen_record[test_in_acc_key]
}
class IIDAccuracySelectionMethod(SelectionMethod):
"""Picks argmax(mean(env_out_acc for env in train_envs))"""
name = "training-domain validation set"
@classmethod
def _step_acc(self, record):
"""Given a single record, return a {val_acc, test_acc} dict."""
test_env = record['args']['test_envs'][0]
val_env_keys = []
for i in itertools.count():
if f'env{i}_out_acc' not in record:
break
if i != test_env:
val_env_keys.append(f'env{i}_out_acc')
test_in_acc_key = 'env{}_in_acc'.format(test_env)
return {
'val_acc': np.mean([record[key] for key in val_env_keys]),
'test_acc': record[test_in_acc_key]
}
@classmethod
def run_acc(self, run_records):
test_records = get_test_records(run_records)
if not len(test_records):
return None
return test_records.map(self._step_acc).argmax('val_acc')
class LeaveOneOutSelectionMethod(SelectionMethod):
"""Picks (hparams, step) by leave-one-out cross validation."""
name = "leave-one-domain-out cross-validation"
@classmethod
def _step_acc(self, records):
"""Return the {val_acc, test_acc} for a group of records corresponding
to a single step."""
test_records = get_test_records(records)
if len(test_records) != 1:
return None
test_env = test_records[0]['args']['test_envs'][0]
n_envs = 0
for i in itertools.count():
if f'env{i}_out_acc' not in records[0]:
break
n_envs += 1
val_accs = np.zeros(n_envs) - 1
for r in records.filter(lambda r: len(r['args']['test_envs']) == 2):
val_env = (set(r['args']['test_envs']) - set([test_env])).pop()
val_accs[val_env] = r['env{}_in_acc'.format(val_env)]
val_accs = list(val_accs[:test_env]) + list(val_accs[test_env+1:])
if any([v==-1 for v in val_accs]):
return None
val_acc = np.sum(val_accs) / (n_envs-1)
return {
'val_acc': val_acc,
'test_acc': test_records[0]['env{}_in_acc'.format(test_env)]
}
@classmethod
def run_acc(self, records):
step_accs = records.group('step').map(lambda step, step_records:
self._step_acc(step_records)
).filter_not_none()
if len(step_accs):
return step_accs.argmax('val_acc')
else:
return None
| 5,163 | 35.366197 | 80 | py |
aidgn | aidgn-main/domainbed/networks.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models
import math
from domainbed.lib import wide_resnet
import copy
def remove_batch_norm_from_resnet(model):
fuse = torch.nn.utils.fusion.fuse_conv_bn_eval
model.eval()
model.conv1 = fuse(model.conv1, model.bn1)
model.bn1 = Identity()
for name, module in model.named_modules():
if name.startswith("layer") and len(name) == 6:
for b, bottleneck in enumerate(module):
for name2, module2 in bottleneck.named_modules():
if name2.startswith("conv"):
bn_name = "bn" + name2[-1]
setattr(bottleneck, name2,
fuse(module2, getattr(bottleneck, bn_name)))
setattr(bottleneck, bn_name, Identity())
if isinstance(bottleneck.downsample, torch.nn.Sequential):
bottleneck.downsample[0] = fuse(bottleneck.downsample[0],
bottleneck.downsample[1])
bottleneck.downsample[1] = Identity()
model.train()
return model
class Identity(nn.Module):
"""An identity layer"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class MLP(nn.Module):
"""Just an MLP"""
def __init__(self, n_inputs, n_outputs, hparams):
super(MLP, self).__init__()
self.input = nn.Linear(n_inputs, hparams['mlp_width'])
self.dropout = nn.Dropout(hparams['mlp_dropout'])
self.hiddens = nn.ModuleList([
nn.Linear(hparams['mlp_width'], hparams['mlp_width'])
for _ in range(hparams['mlp_depth']-2)])
self.output = nn.Linear(hparams['mlp_width'], n_outputs)
self.n_outputs = n_outputs
def forward(self, x):
x = self.input(x)
x = self.dropout(x)
x = F.relu(x)
for hidden in self.hiddens:
x = hidden(x)
x = self.dropout(x)
x = F.relu(x)
x = self.output(x)
return x
class ResNet(torch.nn.Module):
"""ResNet with the softmax chopped off and the batchnorm frozen"""
def __init__(self, input_shape, hparams):
super(ResNet, self).__init__()
if hparams['resnet18']:
self.network = torchvision.models.resnet18(pretrained=True)
self.n_outputs = 512
else:
self.network = torchvision.models.resnet50(pretrained=True)
self.n_outputs = 2048
# self.network = remove_batch_norm_from_resnet(self.network)
# adapt number of channels
nc = input_shape[0]
if nc != 3:
tmp = self.network.conv1.weight.data.clone()
self.network.conv1 = nn.Conv2d(
nc, 64, kernel_size=(7, 7),
stride=(2, 2), padding=(3, 3), bias=False)
for i in range(nc):
self.network.conv1.weight.data[:, i, :, :] = tmp[:, i % 3, :, :]
# save memory
del self.network.fc
self.network.fc = Identity()
self.freeze_bn()
self.hparams = hparams
self.dropout = nn.Dropout(hparams['resnet_dropout'])
def forward(self, x):
"""Encode x into a feature vector of size n_outputs."""
return self.dropout(self.network(x))
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super().train(mode)
self.freeze_bn()
def freeze_bn(self):
for m in self.network.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
class MNIST_CNN(nn.Module):
"""
Hand-tuned architecture for MNIST.
Weirdness I've noticed so far with this architecture:
- adding a linear layer after the mean-pool in features hurts
RotatedMNIST-100 generalization severely.
"""
n_outputs = 128
def __init__(self, input_shape):
super(MNIST_CNN, self).__init__()
self.conv1 = nn.Conv2d(input_shape[0], 64, 3, 1, padding=1)
self.conv2 = nn.Conv2d(64, 128, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(128, 128, 3, 1, padding=1)
self.conv4 = nn.Conv2d(128, 128, 3, 1, padding=1)
self.bn0 = nn.GroupNorm(8, 64)
self.bn1 = nn.GroupNorm(8, 128)
self.bn2 = nn.GroupNorm(8, 128)
self.bn3 = nn.GroupNorm(8, 128)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.bn0(x)
x = self.conv2(x)
x = F.relu(x)
x = self.bn1(x)
x = self.conv3(x)
x = F.relu(x)
x = self.bn2(x)
x = self.conv4(x)
x = F.relu(x)
x = self.bn3(x)
x = self.avgpool(x)
x = x.view(len(x), -1)
return x
class ContextNet(nn.Module):
def __init__(self, input_shape):
super(ContextNet, self).__init__()
# Keep same dimensions
padding = (5 - 1) // 2
self.context_net = nn.Sequential(
nn.Conv2d(input_shape[0], 64, 5, padding=padding),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, 5, padding=padding),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 1, 5, padding=padding),
)
def forward(self, x):
return self.context_net(x)
def Featurizer(input_shape, hparams):
"""Auto-select an appropriate featurizer for the given input shape."""
if len(input_shape) == 1:
return MLP(input_shape[0], hparams["mlp_width"], hparams)
elif input_shape[1:3] == (28, 28):
return MNIST_CNN(input_shape)
elif input_shape[1:3] == (32, 32):
return wide_resnet.Wide_ResNet(input_shape, 16, 2, 0.)
elif input_shape[1:3] == (224, 224):
return ResNet(input_shape, hparams)
else:
raise NotImplementedError
def Classifier(in_features, out_features, is_nonlinear=False):
if is_nonlinear:
return torch.nn.Sequential(
torch.nn.Linear(in_features, in_features // 2),
torch.nn.ReLU(),
torch.nn.Linear(in_features // 2, in_features // 4),
torch.nn.ReLU(),
torch.nn.Linear(in_features // 4, out_features))
else:
return torch.nn.Linear(in_features, out_features)
class WholeFish(nn.Module):
def __init__(self, input_shape, num_classes, hparams, weights=None):
super(WholeFish, self).__init__()
featurizer = Featurizer(input_shape, hparams)
classifier = Classifier(
featurizer.n_outputs,
num_classes,
hparams['nonlinear_classifier'])
self.net = nn.Sequential(
featurizer, classifier
)
if weights is not None:
self.load_state_dict(copy.deepcopy(weights))
def reset_weights(self, weights):
self.load_state_dict(copy.deepcopy(weights))
def forward(self, x):
return self.net(x)
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
| 7,258 | 30.154506 | 80 | py |
aidgn | aidgn-main/domainbed/datasets.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import torch
from PIL import Image, ImageFile
from torchvision import transforms
import torchvision.datasets.folder
from torch.utils.data import TensorDataset, Subset
from torchvision.datasets import MNIST, ImageFolder
from torchvision.transforms.functional import rotate
from wilds.datasets.camelyon17_dataset import Camelyon17Dataset
from wilds.datasets.fmow_dataset import FMoWDataset
ImageFile.LOAD_TRUNCATED_IMAGES = True
DATASETS = [
# Debug
"Debug28",
"Debug224",
# Small images
"ColoredMNIST",
"RotatedMNIST",
# Big images
"VLCS",
"PACS",
"OfficeHome",
"TerraIncognita",
"DomainNet",
"SVIRO",
# WILDS datasets
"WILDSCamelyon",
"WILDSFMoW"
]
def get_dataset_class(dataset_name):
"""Return the dataset class with the given name."""
if dataset_name not in globals():
raise NotImplementedError("Dataset not found: {}".format(dataset_name))
return globals()[dataset_name]
def num_environments(dataset_name):
return len(get_dataset_class(dataset_name).ENVIRONMENTS)
class MultipleDomainDataset:
N_STEPS = 5001 # Default, subclasses may override
CHECKPOINT_FREQ = 100 # Default, subclasses may override
N_WORKERS = 8 # Default, subclasses may override
ENVIRONMENTS = None # Subclasses should override
INPUT_SHAPE = None # Subclasses should override
def __getitem__(self, index):
return self.datasets[index]
def __len__(self):
return len(self.datasets)
class Debug(MultipleDomainDataset):
def __init__(self, root, test_envs, hparams):
super().__init__()
self.input_shape = self.INPUT_SHAPE
self.num_classes = 2
self.datasets = []
for _ in [0, 1, 2]:
self.datasets.append(
TensorDataset(
torch.randn(16, *self.INPUT_SHAPE),
torch.randint(0, self.num_classes, (16,))
)
)
class Debug28(Debug):
INPUT_SHAPE = (3, 28, 28)
ENVIRONMENTS = ['0', '1', '2']
class Debug224(Debug):
INPUT_SHAPE = (3, 224, 224)
ENVIRONMENTS = ['0', '1', '2']
class MultipleEnvironmentMNIST(MultipleDomainDataset):
def __init__(self, root, environments, dataset_transform, input_shape,
num_classes):
super().__init__()
if root is None:
raise ValueError('Data directory not specified!')
original_dataset_tr = MNIST(root, train=True, download=True)
original_dataset_te = MNIST(root, train=False, download=True)
original_images = torch.cat((original_dataset_tr.data,
original_dataset_te.data))
original_labels = torch.cat((original_dataset_tr.targets,
original_dataset_te.targets))
shuffle = torch.randperm(len(original_images))
original_images = original_images[shuffle]
original_labels = original_labels[shuffle]
self.datasets = []
for i in range(len(environments)):
images = original_images[i::len(environments)]
labels = original_labels[i::len(environments)]
self.datasets.append(dataset_transform(images, labels, environments[i]))
self.input_shape = input_shape
self.num_classes = num_classes
class ColoredMNIST(MultipleEnvironmentMNIST):
ENVIRONMENTS = ['+90%', '+80%', '-90%']
def __init__(self, root, test_envs, hparams):
super(ColoredMNIST, self).__init__(root, [0.1, 0.2, 0.9],
self.color_dataset, (2, 28, 28,), 2)
self.input_shape = (2, 28, 28,)
self.num_classes = 2
def color_dataset(self, images, labels, environment):
# # Subsample 2x for computational convenience
# images = images.reshape((-1, 28, 28))[:, ::2, ::2]
# Assign a binary label based on the digit
labels = (labels < 5).float()
# Flip label with probability 0.25
labels = self.torch_xor_(labels,
self.torch_bernoulli_(0.25, len(labels)))
# Assign a color based on the label; flip the color with probability e
colors = self.torch_xor_(labels,
self.torch_bernoulli_(environment,
len(labels)))
images = torch.stack([images, images], dim=1)
# Apply the color to the image by zeroing out the other color channel
images[torch.tensor(range(len(images))), (
1 - colors).long(), :, :] *= 0
x = images.float().div_(255.0)
y = labels.view(-1).long()
return TensorDataset(x, y)
def torch_bernoulli_(self, p, size):
return (torch.rand(size) < p).float()
def torch_xor_(self, a, b):
return (a - b).abs()
class RotatedMNIST(MultipleEnvironmentMNIST):
ENVIRONMENTS = ['0', '15', '30', '45', '60', '75']
def __init__(self, root, test_envs, hparams):
super(RotatedMNIST, self).__init__(root, [0, 15, 30, 45, 60, 75],
self.rotate_dataset, (1, 28, 28,), 10)
def rotate_dataset(self, images, labels, angle):
rotation = transforms.Compose([
transforms.ToPILImage(),
transforms.Lambda(lambda x: rotate(x, angle, fill=(0,),
interpolation=torchvision.transforms.InterpolationMode.BILINEAR)),
transforms.ToTensor()])
x = torch.zeros(len(images), 1, 28, 28)
for i in range(len(images)):
x[i] = rotation(images[i])
y = labels.view(-1)
return TensorDataset(x, y)
class EImageFolder(ImageFolder):
def __init__(self, root, transform, domain_label):
super(EImageFolder, self).__init__(root, transform)
self.domain_label = domain_label
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
sample = self.transform(sample)
return sample, target, self.domain_label
class MultipleEnvironmentImageFolder(MultipleDomainDataset):
def __init__(self, root, test_envs, augment, hparams):
super().__init__()
environments = [f.name for f in os.scandir(root) if f.is_dir()]
environments = sorted(environments)
transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
augment_transform = transforms.Compose([
# transforms.Resize((224,224)),
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.datasets = []
domain_index = 0
for i, environment in enumerate(environments):
if augment and (i not in test_envs):
env_transform = augment_transform
else:
env_transform = transform
path = os.path.join(root, environment)
if i not in test_envs:
env_dataset = EImageFolder(path, env_transform, domain_index)
domain_index += 1
else:
env_dataset = EImageFolder(path, env_transform, -1)
# if hparams['mDSDI']:
# if i not in test_envs:
# env_dataset = EImageFolder(path, env_transform, domain_index)
# domain_index += 1
# else:
# env_dataset = EImageFolder(path, env_transform, -1)
# else:
# env_dataset = ImageFolder(path, env_transform)
self.datasets.append(env_dataset)
self.input_shape = (3, 224, 224,)
self.num_classes = len(self.datasets[-1].classes)
class VLCS(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 50
N_STEPS = 5000
N_WORKERS = 6
ENVIRONMENTS = ["C", "L", "S", "V"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "VLCS/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class PACS(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 50
N_STEPS = 5000
N_WORKERS = 6
ENVIRONMENTS = ["A", "C", "P", "S"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "PACS/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class DomainNet(MultipleEnvironmentImageFolder):
N_STEPS = 15000
CHECKPOINT_FREQ = 500
N_WORKERS = 6
ENVIRONMENTS = ["clip", "info", "paint", "quick", "real", "sketch"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "DomainNet/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class OfficeHome(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 100
N_STEPS = 5001
N_WORKERS = 6
ENVIRONMENTS = ["A", "C", "P", "R"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "OfficeHome/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class TerraIncognita(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 100
N_WORKERS = 6
N_STEPS = 5001
ENVIRONMENTS = ["L100", "L38", "L43", "L46"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "terra_incognita/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class SVIRO(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 50
ENVIRONMENTS = ["aclass", "escape", "hilux", "i3", "lexus", "tesla", "tiguan", "tucson", "x5", "zoe"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "sviro/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class WILDSEnvironment:
def __init__(
self,
wilds_dataset,
metadata_name,
metadata_value,
transform=None):
self.name = metadata_name + "_" + str(metadata_value)
metadata_index = wilds_dataset.metadata_fields.index(metadata_name)
metadata_array = wilds_dataset.metadata_array
subset_indices = torch.where(
metadata_array[:, metadata_index] == metadata_value)[0]
self.dataset = wilds_dataset
self.indices = subset_indices
self.transform = transform
def __getitem__(self, i):
x = self.dataset.get_input(self.indices[i])
if type(x).__name__ != "Image":
x = Image.fromarray(x)
y = self.dataset.y_array[self.indices[i]]
if self.transform is not None:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.indices)
class WILDSDataset(MultipleDomainDataset):
INPUT_SHAPE = (3, 224, 224)
def __init__(self, dataset, metadata_name, test_envs, augment, hparams):
super().__init__()
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
augment_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.datasets = []
for i, metadata_value in enumerate(
self.metadata_values(dataset, metadata_name)):
if augment and (i not in test_envs):
env_transform = augment_transform
else:
env_transform = transform
env_dataset = WILDSEnvironment(
dataset, metadata_name, metadata_value, env_transform)
self.datasets.append(env_dataset)
self.input_shape = (3, 224, 224,)
self.num_classes = dataset.n_classes
def metadata_values(self, wilds_dataset, metadata_name):
metadata_index = wilds_dataset.metadata_fields.index(metadata_name)
metadata_vals = wilds_dataset.metadata_array[:, metadata_index]
return sorted(list(set(metadata_vals.view(-1).tolist())))
class WILDSCamelyon(WILDSDataset):
ENVIRONMENTS = [ "hospital_0", "hospital_1", "hospital_2", "hospital_3",
"hospital_4"]
def __init__(self, root, test_envs, hparams):
dataset = Camelyon17Dataset(root_dir=root)
super().__init__(
dataset, "hospital", test_envs, hparams['data_augmentation'], hparams)
class WILDSFMoW(WILDSDataset):
ENVIRONMENTS = [ "region_0", "region_1", "region_2", "region_3",
"region_4", "region_5"]
def __init__(self, root, test_envs, hparams):
dataset = FMoWDataset(root_dir=root)
super().__init__(
dataset, "region", test_envs, hparams['data_augmentation'], hparams)
| 13,608 | 33.805627 | 105 | py |
aidgn | aidgn-main/domainbed/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 72 | 23.333333 | 70 | py |
aidgn | aidgn-main/domainbed/algorithms.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from torch.autograd import Variable
import math
import copy
import numpy as np
from collections import defaultdict
from domainbed import networks
from domainbed.lib.misc import random_pairs_of_minibatches, ParamDict
ALGORITHMS = [
'ERM',
'AIDGN',
'Fish',
'IRM',
'GroupDRO',
'Mixup',
'MLDG',
'CORAL',
'MMD',
'DANN',
'CDANN',
'MTL',
'SagNet',
'ARM',
'VREx',
'RSC',
'SD',
'ANDMask',
'IGA',
'SelfReg',
'MDSDI'
]
def get_algorithm_class(algorithm_name):
"""Return the algorithm class with the given name."""
if algorithm_name not in globals():
raise NotImplementedError("Algorithm not found: {}".format(algorithm_name))
return globals()[algorithm_name]
class Algorithm(torch.nn.Module):
"""
A subclass of Algorithm implements a domain generalization algorithm.
Subclasses should implement the following:
- update()
- predict()
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(Algorithm, self).__init__()
self.hparams = hparams
def update(self, step, minibatches, unlabeled=None):
"""
Perform one update step, given a list of (x, y) tuples for all
environments.
Admits an optional list of unlabeled minibatches from the test domains,
when task is domain_adaptation.
"""
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
def magnitude(self, x):
raise NotImplementedError
class ERM(Algorithm):
"""
Empirical Risk Minimization (ERM)
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(ERM, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier'])
self.network = nn.Sequential(self.featurizer, self.classifier)
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams['lr'],
weight_decay=self.hparams['weight_decay']
)
def update(self, step, minibatches, unlabeled=None):
all_x = torch.cat([x for x,y,z in minibatches])
all_y = torch.cat([y for x,y,z in minibatches])
all_z = torch.cat([z for x,y,z in minibatches])
loss = F.cross_entropy(self.predict(all_x), all_y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
def predict(self, x):
return self.network(x)
class AIClassifier(torch.nn.Module):
def __init__(self, feat_dim, num_class, kappa=110, l_a=10, u_a=410, gamma=0.001, beta=0.3, eta=15):
super(AIClassifier, self).__init__()
self.weight = torch.nn.Parameter(torch.Tensor(feat_dim, num_class))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.scale = kappa
self.l_a = l_a
self.u_a = u_a
self.gamma = gamma
self.beta = beta
self.eta = eta
def calc_perturb(self, x, domain_mean):
pb = self.gamma * (x - self.l_a + self.beta * domain_mean) + 0.4
return pb
def predict(self, feats):
weight_norm = F.normalize(self.weight, dim=0)
feats_norm = F.normalize(feats)
cos_theta = torch.mm(feats_norm, weight_norm)
return cos_theta
def forward(self, feats, labels, ratio_h, domain_mean):
x_norm = torch.norm(feats, dim=1, keepdim=True).clamp(self.l_a, self.u_a)
pb = self.calc_perturb(x_norm, domain_mean)
cos_m, sin_m = torch.cos(pb), torch.sin(pb)
loss_kl = 1 / ((ratio_h * self.u_a) ** 2) * x_norm + 1 / (x_norm)
weight_norm = F.normalize(self.weight, dim=0)
feats = F.normalize(feats)
cos_theta = torch.mm(feats, weight_norm)
cos_theta = cos_theta.clamp(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(cos_theta, 2))
cos_theta_m = cos_theta * cos_m - sin_theta * sin_m
min_cos_theta = torch.cos(math.pi - pb)
cos_theta_m = torch.where(cos_theta > min_cos_theta, cos_theta_m, cos_theta)
index = torch.zeros_like(cos_theta)
index.scatter_(1, labels.data.view(-1, 1), 1)
index = index.byte().bool()
output = cos_theta * 1.0
output[index] = cos_theta_m[index]
output *= self.scale
return output, self.eta * loss_kl
class AIDGN(Algorithm):
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(AIDGN, self).__init__(input_shape, num_classes, num_domains, hparams)
self.featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = AIClassifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['kappa'],
self.hparams['l_a'],
self.hparams['u_a'],
self.hparams['gamma'],
self.hparams['beta'],
self.hparams['eta']
)
self.optimizer = torch.optim.Adam(
list(self.featurizer.parameters()) + list(self.classifier.parameters()),
lr=self.hparams['lr'],
weight_decay=self.hparams['weight_decay']
)
self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.optimizer,
milestones = [6000,12000],
gamma = 0.5
)
self.num_domains = num_domains
self.mean_mags = 10000 * torch.ones(num_domains).cuda()
self.batch_size = self.hparams['batch_size']
def predict(self, x):
f = self.featurizer(x)
y = self.classifier.predict(f)
return y
def magnitude(self, x):
f = self.featurizer(x)
f_norm = torch.norm(f, dim=1)
return f_norm
def update(self, step, minibatches, unlabeled=None):
all_x = torch.cat([x for x,y,z in minibatches])
all_y = torch.cat([y for x,y,z in minibatches])
all_z = torch.cat([z for x,y,z in minibatches])
f = self.featurizer(all_x)
mags = torch.norm(f, dim=1)
for i in range(0, self.num_domains):
mags_i = mags[i * self.batch_size : (i+1) * self.batch_size]
self.mean_mags[i] = torch.mean(mags_i, dim=0)
with torch.no_grad():
domain_mean = self.mean_mags[all_z]
ratio = mags.detach() / domain_mean
ones = torch.ones_like(ratio)
ratio_h = torch.where(ratio>self.hparams['ratiothreshold'], ratio, ones)
with torch.no_grad():
domain_mean = self.mean_mags[all_z]
domain_mean = domain_mean.reshape(domain_mean.size(0),1).detach()
logits, loss_kl = self.classifier.forward(f, all_y, ratio_h, domain_mean)
loss_kl = torch.mean(loss_kl)
loss_c = F.cross_entropy(logits, all_y)
loss = loss_c + loss_kl
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item(), 'loss_c': loss_c.item(), 'loss_kl': loss_kl.item()}
class Fish(Algorithm):
"""
Implementation of Fish, as seen in Gradient Matching for Domain
Generalization, Shi et al. 2021.
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(Fish, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.input_shape = input_shape
self.num_classes = num_classes
self.network = networks.WholeFish(input_shape, num_classes, hparams)
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
self.optimizer_inner_state = None
def create_clone(self, device):
self.network_inner = networks.WholeFish(self.input_shape, self.num_classes, self.hparams,
weights=self.network.state_dict()).to(device)
self.optimizer_inner = torch.optim.Adam(
self.network_inner.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
if self.optimizer_inner_state is not None:
self.optimizer_inner.load_state_dict(self.optimizer_inner_state)
def fish(self, meta_weights, inner_weights, lr_meta):
meta_weights = ParamDict(meta_weights)
inner_weights = ParamDict(inner_weights)
meta_weights += lr_meta * (inner_weights - meta_weights)
return meta_weights
def update(self, minibatches, unlabeled=None):
self.create_clone(minibatches[0][0].device)
for x, y in minibatches:
loss = F.cross_entropy(self.network_inner(x), y)
self.optimizer_inner.zero_grad()
loss.backward()
self.optimizer_inner.step()
self.optimizer_inner_state = self.optimizer_inner.state_dict()
meta_weights = self.fish(
meta_weights=self.network.state_dict(),
inner_weights=self.network_inner.state_dict(),
lr_meta=self.hparams["meta_lr"]
)
self.network.reset_weights(meta_weights)
return {'loss': loss.item()}
def predict(self, x):
return self.network(x)
class ARM(ERM):
""" Adaptive Risk Minimization (ARM) """
def __init__(self, input_shape, num_classes, num_domains, hparams):
original_input_shape = input_shape
input_shape = (1 + original_input_shape[0],) + original_input_shape[1:]
super(ARM, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.context_net = networks.ContextNet(original_input_shape)
self.support_size = hparams['batch_size']
def predict(self, x):
batch_size, c, h, w = x.shape
if batch_size % self.support_size == 0:
meta_batch_size = batch_size // self.support_size
support_size = self.support_size
else:
meta_batch_size, support_size = 1, batch_size
context = self.context_net(x)
context = context.reshape((meta_batch_size, support_size, 1, h, w))
context = context.mean(dim=1)
context = torch.repeat_interleave(context, repeats=support_size, dim=0)
x = torch.cat([x, context], dim=1)
return self.network(x)
class AbstractDANN(Algorithm):
"""Domain-Adversarial Neural Networks (abstract class)"""
def __init__(self, input_shape, num_classes, num_domains,
hparams, conditional, class_balance):
super(AbstractDANN, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.register_buffer('update_count', torch.tensor([0]))
self.conditional = conditional
self.class_balance = class_balance
# Algorithms
self.featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier'])
self.discriminator = networks.MLP(self.featurizer.n_outputs,
num_domains, self.hparams)
self.class_embeddings = nn.Embedding(num_classes,
self.featurizer.n_outputs)
# Optimizers
self.disc_opt = torch.optim.Adam(
(list(self.discriminator.parameters()) +
list(self.class_embeddings.parameters())),
lr=self.hparams["lr_d"],
weight_decay=self.hparams['weight_decay_d'],
betas=(self.hparams['beta1'], 0.9))
self.gen_opt = torch.optim.Adam(
(list(self.featurizer.parameters()) +
list(self.classifier.parameters())),
lr=self.hparams["lr_g"],
weight_decay=self.hparams['weight_decay_g'],
betas=(self.hparams['beta1'], 0.9))
def update(self, minibatches, unlabeled=None):
device = "cuda" if minibatches[0][0].is_cuda else "cpu"
self.update_count += 1
all_x = torch.cat([x for x, y in minibatches])
all_y = torch.cat([y for x, y in minibatches])
all_z = self.featurizer(all_x)
if self.conditional:
disc_input = all_z + self.class_embeddings(all_y)
else:
disc_input = all_z
disc_out = self.discriminator(disc_input)
disc_labels = torch.cat([
torch.full((x.shape[0], ), i, dtype=torch.int64, device=device)
for i, (x, y) in enumerate(minibatches)
])
if self.class_balance:
y_counts = F.one_hot(all_y).sum(dim=0)
weights = 1. / (y_counts[all_y] * y_counts.shape[0]).float()
disc_loss = F.cross_entropy(disc_out, disc_labels, reduction='none')
disc_loss = (weights * disc_loss).sum()
else:
disc_loss = F.cross_entropy(disc_out, disc_labels)
disc_softmax = F.softmax(disc_out, dim=1)
input_grad = autograd.grad(disc_softmax[:, disc_labels].sum(),
[disc_input], create_graph=True)[0]
grad_penalty = (input_grad**2).sum(dim=1).mean(dim=0)
disc_loss += self.hparams['grad_penalty'] * grad_penalty
d_steps_per_g = self.hparams['d_steps_per_g_step']
if (self.update_count.item() % (1+d_steps_per_g) < d_steps_per_g):
self.disc_opt.zero_grad()
disc_loss.backward()
self.disc_opt.step()
return {'disc_loss': disc_loss.item()}
else:
all_preds = self.classifier(all_z)
classifier_loss = F.cross_entropy(all_preds, all_y)
gen_loss = (classifier_loss +
(self.hparams['lambda'] * -disc_loss))
self.disc_opt.zero_grad()
self.gen_opt.zero_grad()
gen_loss.backward()
self.gen_opt.step()
return {'gen_loss': gen_loss.item()}
def predict(self, x):
return self.classifier(self.featurizer(x))
class DANN(AbstractDANN):
"""Unconditional DANN"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(DANN, self).__init__(input_shape, num_classes, num_domains,
hparams, conditional=False, class_balance=False)
class CDANN(AbstractDANN):
"""Conditional DANN"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(CDANN, self).__init__(input_shape, num_classes, num_domains,
hparams, conditional=True, class_balance=True)
class IRM(ERM):
"""Invariant Risk Minimization"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(IRM, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.register_buffer('update_count', torch.tensor([0]))
@staticmethod
def _irm_penalty(logits, y):
device = "cuda" if logits[0][0].is_cuda else "cpu"
scale = torch.tensor(1.).to(device).requires_grad_()
loss_1 = F.cross_entropy(logits[::2] * scale, y[::2])
loss_2 = F.cross_entropy(logits[1::2] * scale, y[1::2])
grad_1 = autograd.grad(loss_1, [scale], create_graph=True)[0]
grad_2 = autograd.grad(loss_2, [scale], create_graph=True)[0]
result = torch.sum(grad_1 * grad_2)
return result
def update(self, minibatches, unlabeled=None):
device = "cuda" if minibatches[0][0].is_cuda else "cpu"
penalty_weight = (self.hparams['irm_lambda'] if self.update_count
>= self.hparams['irm_penalty_anneal_iters'] else
1.0)
nll = 0.
penalty = 0.
all_x = torch.cat([x for x,y in minibatches])
all_logits = self.network(all_x)
all_logits_idx = 0
for i, (x, y) in enumerate(minibatches):
logits = all_logits[all_logits_idx:all_logits_idx + x.shape[0]]
all_logits_idx += x.shape[0]
nll += F.cross_entropy(logits, y)
penalty += self._irm_penalty(logits, y)
nll /= len(minibatches)
penalty /= len(minibatches)
loss = nll + (penalty_weight * penalty)
if self.update_count == self.hparams['irm_penalty_anneal_iters']:
# Reset Adam, because it doesn't like the sharp jump in gradient
# magnitudes that happens at this step.
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay'])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.update_count += 1
return {'loss': loss.item(), 'nll': nll.item(),
'penalty': penalty.item()}
class VREx(ERM):
"""V-REx algorithm from http://arxiv.org/abs/2003.00688"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(VREx, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.register_buffer('update_count', torch.tensor([0]))
def update(self, minibatches, unlabeled=None):
if self.update_count >= self.hparams["vrex_penalty_anneal_iters"]:
penalty_weight = self.hparams["vrex_lambda"]
else:
penalty_weight = 1.0
nll = 0.
all_x = torch.cat([x for x, y in minibatches])
all_logits = self.network(all_x)
all_logits_idx = 0
losses = torch.zeros(len(minibatches))
for i, (x, y) in enumerate(minibatches):
logits = all_logits[all_logits_idx:all_logits_idx + x.shape[0]]
all_logits_idx += x.shape[0]
nll = F.cross_entropy(logits, y)
losses[i] = nll
mean = losses.mean()
penalty = ((losses - mean) ** 2).mean()
loss = mean + penalty_weight * penalty
if self.update_count == self.hparams['vrex_penalty_anneal_iters']:
# Reset Adam (like IRM), because it doesn't like the sharp jump in
# gradient magnitudes that happens at this step.
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay'])
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.update_count += 1
return {'loss': loss.item(), 'nll': nll.item(),
'penalty': penalty.item()}
class Mixup(ERM):
"""
Mixup of minibatches from different domains
https://arxiv.org/pdf/2001.00677.pdf
https://arxiv.org/pdf/1912.01805.pdf
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(Mixup, self).__init__(input_shape, num_classes, num_domains,
hparams)
def update(self, minibatches, unlabeled=None):
objective = 0
for (xi, yi), (xj, yj) in random_pairs_of_minibatches(minibatches):
lam = np.random.beta(self.hparams["mixup_alpha"],
self.hparams["mixup_alpha"])
x = lam * xi + (1 - lam) * xj
predictions = self.predict(x)
objective += lam * F.cross_entropy(predictions, yi)
objective += (1 - lam) * F.cross_entropy(predictions, yj)
objective /= len(minibatches)
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
return {'loss': objective.item()}
class GroupDRO(ERM):
"""
Robust ERM minimizes the error at the worst minibatch
Algorithm 1 from [https://arxiv.org/pdf/1911.08731.pdf]
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(GroupDRO, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.register_buffer("q", torch.Tensor())
def update(self, minibatches, unlabeled=None):
device = "cuda" if minibatches[0][0].is_cuda else "cpu"
if not len(self.q):
self.q = torch.ones(len(minibatches)).to(device)
losses = torch.zeros(len(minibatches)).to(device)
for m in range(len(minibatches)):
x, y = minibatches[m]
losses[m] = F.cross_entropy(self.predict(x), y)
self.q[m] *= (self.hparams["groupdro_eta"] * losses[m].data).exp()
self.q /= self.q.sum()
loss = torch.dot(losses, self.q)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
class MLDG(ERM):
"""
Model-Agnostic Meta-Learning
Algorithm 1 / Equation (3) from: https://arxiv.org/pdf/1710.03463.pdf
Related: https://arxiv.org/pdf/1703.03400.pdf
Related: https://arxiv.org/pdf/1910.13580.pdf
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(MLDG, self).__init__(input_shape, num_classes, num_domains,
hparams)
def update(self, minibatches, unlabeled=None):
"""
Terms being computed:
* Li = Loss(xi, yi, params)
* Gi = Grad(Li, params)
* Lj = Loss(xj, yj, Optimizer(params, grad(Li, params)))
* Gj = Grad(Lj, params)
* params = Optimizer(params, Grad(Li + beta * Lj, params))
* = Optimizer(params, Gi + beta * Gj)
That is, when calling .step(), we want grads to be Gi + beta * Gj
For computational efficiency, we do not compute second derivatives.
"""
num_mb = len(minibatches)
objective = 0
self.optimizer.zero_grad()
for p in self.network.parameters():
if p.grad is None:
p.grad = torch.zeros_like(p)
for (xi, yi), (xj, yj) in random_pairs_of_minibatches(minibatches):
# fine tune clone-network on task "i"
inner_net = copy.deepcopy(self.network)
inner_opt = torch.optim.Adam(
inner_net.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
inner_obj = F.cross_entropy(inner_net(xi), yi)
inner_opt.zero_grad()
inner_obj.backward()
inner_opt.step()
# The network has now accumulated gradients Gi
# The clone-network has now parameters P - lr * Gi
for p_tgt, p_src in zip(self.network.parameters(),
inner_net.parameters()):
if p_src.grad is not None:
p_tgt.grad.data.add_(p_src.grad.data / num_mb)
# `objective` is populated for reporting purposes
objective += inner_obj.item()
# this computes Gj on the clone-network
loss_inner_j = F.cross_entropy(inner_net(xj), yj)
grad_inner_j = autograd.grad(loss_inner_j, inner_net.parameters(),
allow_unused=True)
# `objective` is populated for reporting purposes
objective += (self.hparams['mldg_beta'] * loss_inner_j).item()
for p, g_j in zip(self.network.parameters(), grad_inner_j):
if g_j is not None:
p.grad.data.add_(
self.hparams['mldg_beta'] * g_j.data / num_mb)
# The network has now accumulated gradients Gi + beta * Gj
# Repeat for all train-test splits, do .step()
objective /= len(minibatches)
self.optimizer.step()
return {'loss': objective}
# This commented "update" method back-propagates through the gradients of
# the inner update, as suggested in the original MAML paper. However, this
# is twice as expensive as the uncommented "update" method, which does not
# compute second-order derivatives, implementing the First-Order MAML
# method (FOMAML) described in the original MAML paper.
# def update(self, minibatches, unlabeled=None):
# objective = 0
# beta = self.hparams["beta"]
# inner_iterations = self.hparams["inner_iterations"]
# self.optimizer.zero_grad()
# with higher.innerloop_ctx(self.network, self.optimizer,
# copy_initial_weights=False) as (inner_network, inner_optimizer):
# for (xi, yi), (xj, yj) in random_pairs_of_minibatches(minibatches):
# for inner_iteration in range(inner_iterations):
# li = F.cross_entropy(inner_network(xi), yi)
# inner_optimizer.step(li)
#
# objective += F.cross_entropy(self.network(xi), yi)
# objective += beta * F.cross_entropy(inner_network(xj), yj)
# objective /= len(minibatches)
# objective.backward()
#
# self.optimizer.step()
#
# return objective
class AbstractMMD(ERM):
"""
Perform ERM while matching the pair-wise domain feature distributions
using MMD (abstract class)
"""
def __init__(self, input_shape, num_classes, num_domains, hparams, gaussian):
super(AbstractMMD, self).__init__(input_shape, num_classes, num_domains,
hparams)
if gaussian:
self.kernel_type = "gaussian"
else:
self.kernel_type = "mean_cov"
def my_cdist(self, x1, x2):
x1_norm = x1.pow(2).sum(dim=-1, keepdim=True)
x2_norm = x2.pow(2).sum(dim=-1, keepdim=True)
res = torch.addmm(x2_norm.transpose(-2, -1),
x1,
x2.transpose(-2, -1), alpha=-2).add_(x1_norm)
return res.clamp_min_(1e-30)
def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100,
1000]):
D = self.my_cdist(x, y)
K = torch.zeros_like(D)
for g in gamma:
K.add_(torch.exp(D.mul(-g)))
return K
def mmd(self, x, y):
if self.kernel_type == "gaussian":
Kxx = self.gaussian_kernel(x, x).mean()
Kyy = self.gaussian_kernel(y, y).mean()
Kxy = self.gaussian_kernel(x, y).mean()
return Kxx + Kyy - 2 * Kxy
else:
mean_x = x.mean(0, keepdim=True)
mean_y = y.mean(0, keepdim=True)
cent_x = x - mean_x
cent_y = y - mean_y
cova_x = (cent_x.t() @ cent_x) / (len(x) - 1)
cova_y = (cent_y.t() @ cent_y) / (len(y) - 1)
mean_diff = (mean_x - mean_y).pow(2).mean()
cova_diff = (cova_x - cova_y).pow(2).mean()
return mean_diff + cova_diff
def update(self, minibatches, unlabeled=None):
objective = 0
penalty = 0
nmb = len(minibatches)
features = [self.featurizer(xi) for xi, _ in minibatches]
classifs = [self.classifier(fi) for fi in features]
targets = [yi for _, yi in minibatches]
for i in range(nmb):
objective += F.cross_entropy(classifs[i], targets[i])
for j in range(i + 1, nmb):
penalty += self.mmd(features[i], features[j])
objective /= nmb
if nmb > 1:
penalty /= (nmb * (nmb - 1) / 2)
self.optimizer.zero_grad()
(objective + (self.hparams['mmd_gamma']*penalty)).backward()
self.optimizer.step()
if torch.is_tensor(penalty):
penalty = penalty.item()
return {'loss': objective.item(), 'penalty': penalty}
class MMD(AbstractMMD):
"""
MMD using Gaussian kernel
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(MMD, self).__init__(input_shape, num_classes,
num_domains, hparams, gaussian=True)
class CORAL(AbstractMMD):
"""
MMD using mean and covariance difference
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(CORAL, self).__init__(input_shape, num_classes,
num_domains, hparams, gaussian=False)
class MTL(Algorithm):
"""
A neural network version of
Domain Generalization by Marginal Transfer Learning
(https://arxiv.org/abs/1711.07910)
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(MTL, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs * 2,
num_classes,
self.hparams['nonlinear_classifier'])
self.optimizer = torch.optim.Adam(
list(self.featurizer.parameters()) +\
list(self.classifier.parameters()),
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
self.register_buffer('embeddings',
torch.zeros(num_domains,
self.featurizer.n_outputs))
self.ema = self.hparams['mtl_ema']
def update(self, minibatches, unlabeled=None):
loss = 0
for env, (x, y) in enumerate(minibatches):
loss += F.cross_entropy(self.predict(x, env), y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
def update_embeddings_(self, features, env=None):
return_embedding = features.mean(0)
if env is not None:
return_embedding = self.ema * return_embedding +\
(1 - self.ema) * self.embeddings[env]
self.embeddings[env] = return_embedding.clone().detach()
return return_embedding.view(1, -1).repeat(len(features), 1)
def predict(self, x, env=None):
features = self.featurizer(x)
embedding = self.update_embeddings_(features, env).normal_()
return self.classifier(torch.cat((features, embedding), 1))
class SagNet(Algorithm):
"""
Style Agnostic Network
Algorithm 1 from: https://arxiv.org/abs/1910.11645
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(SagNet, self).__init__(input_shape, num_classes, num_domains,
hparams)
# featurizer network
self.network_f = networks.Featurizer(input_shape, self.hparams)
# content network
self.network_c = networks.Classifier(
self.network_f.n_outputs,
num_classes,
self.hparams['nonlinear_classifier'])
# style network
self.network_s = networks.Classifier(
self.network_f.n_outputs,
num_classes,
self.hparams['nonlinear_classifier'])
# # This commented block of code implements something closer to the
# # original paper, but is specific to ResNet and puts in disadvantage
# # the other algorithms.
# resnet_c = networks.Featurizer(input_shape, self.hparams)
# resnet_s = networks.Featurizer(input_shape, self.hparams)
# # featurizer network
# self.network_f = torch.nn.Sequential(
# resnet_c.network.conv1,
# resnet_c.network.bn1,
# resnet_c.network.relu,
# resnet_c.network.maxpool,
# resnet_c.network.layer1,
# resnet_c.network.layer2,
# resnet_c.network.layer3)
# # content network
# self.network_c = torch.nn.Sequential(
# resnet_c.network.layer4,
# resnet_c.network.avgpool,
# networks.Flatten(),
# resnet_c.network.fc)
# # style network
# self.network_s = torch.nn.Sequential(
# resnet_s.network.layer4,
# resnet_s.network.avgpool,
# networks.Flatten(),
# resnet_s.network.fc)
def opt(p):
return torch.optim.Adam(p, lr=hparams["lr"],
weight_decay=hparams["weight_decay"])
self.optimizer_f = opt(self.network_f.parameters())
self.optimizer_c = opt(self.network_c.parameters())
self.optimizer_s = opt(self.network_s.parameters())
self.weight_adv = hparams["sag_w_adv"]
def forward_c(self, x):
# learning content network on randomized style
return self.network_c(self.randomize(self.network_f(x), "style"))
def forward_s(self, x):
# learning style network on randomized content
return self.network_s(self.randomize(self.network_f(x), "content"))
def randomize(self, x, what="style", eps=1e-5):
device = "cuda" if x.is_cuda else "cpu"
sizes = x.size()
alpha = torch.rand(sizes[0], 1).to(device)
if len(sizes) == 4:
x = x.view(sizes[0], sizes[1], -1)
alpha = alpha.unsqueeze(-1)
mean = x.mean(-1, keepdim=True)
var = x.var(-1, keepdim=True)
x = (x - mean) / (var + eps).sqrt()
idx_swap = torch.randperm(sizes[0])
if what == "style":
mean = alpha * mean + (1 - alpha) * mean[idx_swap]
var = alpha * var + (1 - alpha) * var[idx_swap]
else:
x = x[idx_swap].detach()
x = x * (var + eps).sqrt() + mean
return x.view(*sizes)
def update(self, minibatches, unlabeled=None):
all_x = torch.cat([x for x, y in minibatches])
all_y = torch.cat([y for x, y in minibatches])
# learn content
self.optimizer_f.zero_grad()
self.optimizer_c.zero_grad()
loss_c = F.cross_entropy(self.forward_c(all_x), all_y)
loss_c.backward()
self.optimizer_f.step()
self.optimizer_c.step()
# learn style
self.optimizer_s.zero_grad()
loss_s = F.cross_entropy(self.forward_s(all_x), all_y)
loss_s.backward()
self.optimizer_s.step()
# learn adversary
self.optimizer_f.zero_grad()
loss_adv = -F.log_softmax(self.forward_s(all_x), dim=1).mean(1).mean()
loss_adv = loss_adv * self.weight_adv
loss_adv.backward()
self.optimizer_f.step()
return {'loss_c': loss_c.item(), 'loss_s': loss_s.item(),
'loss_adv': loss_adv.item()}
def predict(self, x):
return self.network_c(self.network_f(x))
class RSC(ERM):
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(RSC, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.drop_f = (1 - hparams['rsc_f_drop_factor']) * 100
self.drop_b = (1 - hparams['rsc_b_drop_factor']) * 100
self.num_classes = num_classes
def update(self, minibatches, unlabeled=None):
device = "cuda" if minibatches[0][0].is_cuda else "cpu"
# inputs
all_x = torch.cat([x for x, y in minibatches])
# labels
all_y = torch.cat([y for _, y in minibatches])
# one-hot labels
all_o = torch.nn.functional.one_hot(all_y, self.num_classes)
# features
all_f = self.featurizer(all_x)
# predictions
all_p = self.classifier(all_f)
# Equation (1): compute gradients with respect to representation
all_g = autograd.grad((all_p * all_o).sum(), all_f)[0]
# Equation (2): compute top-gradient-percentile mask
percentiles = np.percentile(all_g.cpu(), self.drop_f, axis=1)
percentiles = torch.Tensor(percentiles)
percentiles = percentiles.unsqueeze(1).repeat(1, all_g.size(1))
mask_f = all_g.lt(percentiles.to(device)).float()
# Equation (3): mute top-gradient-percentile activations
all_f_muted = all_f * mask_f
# Equation (4): compute muted predictions
all_p_muted = self.classifier(all_f_muted)
# Section 3.3: Batch Percentage
all_s = F.softmax(all_p, dim=1)
all_s_muted = F.softmax(all_p_muted, dim=1)
changes = (all_s * all_o).sum(1) - (all_s_muted * all_o).sum(1)
percentile = np.percentile(changes.detach().cpu(), self.drop_b)
mask_b = changes.lt(percentile).float().view(-1, 1)
mask = torch.logical_or(mask_f, mask_b).float()
# Equations (3) and (4) again, this time mutting over examples
all_p_muted_again = self.classifier(all_f * mask)
# Equation (5): update
loss = F.cross_entropy(all_p_muted_again, all_y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
class SD(ERM):
"""
Gradient Starvation: A Learning Proclivity in Neural Networks
Equation 25 from [https://arxiv.org/pdf/2011.09468.pdf]
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(SD, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.sd_reg = hparams["sd_reg"]
def update(self, minibatches, unlabeled=None):
all_x = torch.cat([x for x,y in minibatches])
all_y = torch.cat([y for x,y in minibatches])
all_p = self.predict(all_x)
loss = F.cross_entropy(all_p, all_y)
penalty = (all_p ** 2).mean()
objective = loss + self.sd_reg * penalty
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
return {'loss': loss.item(), 'penalty': penalty.item()}
class ANDMask(ERM):
"""
Learning Explanations that are Hard to Vary [https://arxiv.org/abs/2009.00329]
AND-Mask implementation from [https://github.com/gibipara92/learning-explanations-hard-to-vary]
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(ANDMask, self).__init__(input_shape, num_classes, num_domains, hparams)
self.tau = hparams["tau"]
def update(self, minibatches, unlabeled=None):
total_loss = 0
param_gradients = [[] for _ in self.network.parameters()]
all_x = torch.cat([x for x,y in minibatches])
all_logits = self.network(all_x)
all_logits_idx = 0
for i, (x, y) in enumerate(minibatches):
logits = all_logits[all_logits_idx:all_logits_idx + x.shape[0]]
all_logits_idx += x.shape[0]
env_loss = F.cross_entropy(logits, y)
total_loss += env_loss
env_grads = autograd.grad(env_loss, self.network.parameters(), retain_graph=True)
for grads, env_grad in zip(param_gradients, env_grads):
grads.append(env_grad)
mean_loss = total_loss / len(minibatches)
self.optimizer.zero_grad()
self.mask_grads(self.tau, param_gradients, self.network.parameters())
self.optimizer.step()
return {'loss': mean_loss.item()}
def mask_grads(self, tau, gradients, params):
for param, grads in zip(params, gradients):
grads = torch.stack(grads, dim=0)
grad_signs = torch.sign(grads)
mask = torch.mean(grad_signs, dim=0).abs() >= self.tau
mask = mask.to(torch.float32)
avg_grad = torch.mean(grads, dim=0)
mask_t = (mask.sum() / mask.numel())
param.grad = mask * avg_grad
param.grad *= (1. / (1e-10 + mask_t))
return 0
class IGA(ERM):
"""
Inter-environmental Gradient Alignment
From https://arxiv.org/abs/2008.01883v2
"""
def __init__(self, in_features, num_classes, num_domains, hparams):
super(IGA, self).__init__(in_features, num_classes, num_domains, hparams)
def update(self, minibatches, unlabeled=False):
all_x = torch.cat([x for x,y in minibatches])
all_logits = self.network(all_x)
total_loss = 0
all_logits_idx = 0
grads = []
for i, (x, y) in enumerate(minibatches):
logits = all_logits[all_logits_idx:all_logits_idx + x.shape[0]]
all_logits_idx += x.shape[0]
env_loss = F.cross_entropy(logits, y)
total_loss += env_loss
env_grad = autograd.grad(env_loss, self.network.parameters(),
create_graph=True)
grads.append(env_grad)
mean_loss = total_loss / len(minibatches)
mean_grad = autograd.grad(mean_loss, self.network.parameters(),
create_graph=True)
# compute trace penalty
penalty_value = 0
for grad in grads:
for g, mean_g in zip(grad, mean_grad):
penalty_value += (g - mean_g).pow(2).sum()
objective = mean_loss + self.hparams['penalty'] * penalty_value
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
return {'loss': mean_loss.item(), 'penalty': penalty_value.item()}
class SelfReg(ERM):
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(SelfReg, self).__init__(input_shape, num_classes, num_domains,
hparams)
self.num_classes = num_classes
self.MSEloss = nn.MSELoss()
input_feat_size = self.featurizer.n_outputs
hidden_size = input_feat_size if input_feat_size==2048 else input_feat_size*2
self.cdpl = nn.Sequential(
nn.Linear(input_feat_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, input_feat_size),
nn.BatchNorm1d(input_feat_size)
)
def update(self, minibatches, unlabeled=None):
all_x = torch.cat([x for x, y in minibatches])
all_y = torch.cat([y for _, y in minibatches])
lam = np.random.beta(0.5, 0.5)
batch_size = all_y.size()[0]
# cluster and order features into same-class group
with torch.no_grad():
sorted_y, indices = torch.sort(all_y)
sorted_x = torch.zeros_like(all_x)
for idx, order in enumerate(indices):
sorted_x[idx] = all_x[order]
intervals = []
ex = 0
for idx, val in enumerate(sorted_y):
if ex==val:
continue
intervals.append(idx)
ex = val
intervals.append(batch_size)
all_x = sorted_x
all_y = sorted_y
feat = self.featurizer(all_x)
proj = self.cdpl(feat)
output = self.classifier(feat)
# shuffle
output_2 = torch.zeros_like(output)
feat_2 = torch.zeros_like(proj)
output_3 = torch.zeros_like(output)
feat_3 = torch.zeros_like(proj)
ex = 0
for end in intervals:
shuffle_indices = torch.randperm(end-ex)+ex
shuffle_indices2 = torch.randperm(end-ex)+ex
for idx in range(end-ex):
output_2[idx+ex] = output[shuffle_indices[idx]]
feat_2[idx+ex] = proj[shuffle_indices[idx]]
output_3[idx+ex] = output[shuffle_indices2[idx]]
feat_3[idx+ex] = proj[shuffle_indices2[idx]]
ex = end
# mixup
output_3 = lam*output_2 + (1-lam)*output_3
feat_3 = lam*feat_2 + (1-lam)*feat_3
# regularization
L_ind_logit = self.MSEloss(output, output_2)
L_hdl_logit = self.MSEloss(output, output_3)
L_ind_feat = 0.3 * self.MSEloss(feat, feat_2)
L_hdl_feat = 0.3 * self.MSEloss(feat, feat_3)
cl_loss = F.cross_entropy(output, all_y)
C_scale = min(cl_loss.item(), 1.)
loss = cl_loss + C_scale*(lam*(L_ind_logit + L_ind_feat)+(1-lam)*(L_hdl_logit + L_hdl_feat))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return {'loss': loss.item()}
class GradReverse(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return -grad_output
class Domain_Discriminator(nn.Module):
def __init__(self, feature_dim, domain_classes):
super(Domain_Discriminator, self).__init__()
self.class_classifier = nn.Sequential(
nn.Linear(feature_dim, feature_dim),
nn.ReLU(),
nn.Linear(feature_dim, feature_dim),
nn.ReLU(),
nn.Linear(feature_dim, domain_classes)
)
def forward(self, di_z):
y = self.class_classifier(GradReverse.apply(di_z))
return y
class Classifier(nn.Module):
def __init__(self, feature_dim, classes):
super(Classifier, self).__init__()
self.classifier = nn.Linear(int(feature_dim * 2), classes)
def forward(self, di_z, ds_z):
z = torch.cat((di_z, ds_z), dim = 1)
y = self.classifier(z)
return y
class ZS_Domain_Classifier(nn.Module):
def __init__(self, feature_dim, domain_classes):
super(ZS_Domain_Classifier, self).__init__()
self.class_classifier = nn.Sequential(
nn.Linear(feature_dim, domain_classes)
)
def forward(self, ds_z):
y = self.class_classifier(ds_z)
return y
class MDSDI(Algorithm):
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(MDSDI, self).__init__(input_shape, num_classes, num_domains, hparams)
self.di_featurizer = networks.Featurizer(input_shape, self.hparams)
self.ds_featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = networks.Classifier(
self.di_featurizer.n_outputs + self.ds_featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier'])
self.domain_discriminator = Domain_Discriminator(self.di_featurizer.n_outputs, num_domains)
self.domain_classifier = ZS_Domain_Classifier(self.ds_featurizer.n_outputs, num_domains)
optimizer_params = list(self.di_featurizer.parameters()) + list(self.ds_featurizer.parameters()) + list(self.classifier.parameters()) + list(self.domain_discriminator.parameters()) + list(self.domain_classifier.parameters())
self.optimizer = torch.optim.Adam(
optimizer_params,
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
meta_optimizer_params = list(self.ds_featurizer.parameters()) + list(self.classifier.parameters())
self.meta_optimizer = torch.optim.Adam(
meta_optimizer_params,
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
def update(self, minibatches, unlabeled=None):
all_x = torch.cat([x for x,y,z in minibatches])
all_y = torch.cat([y for x,y,z in minibatches])
all_z = torch.cat([z for x,y,z in minibatches])
di_z, ds_z = self.di_featurizer(all_x), self.ds_featurizer(all_x)
# Distangle by Covariance Matrix
mdi_z = torch.mean(di_z, 0)
mds_z = torch.mean(ds_z, 0)
di_z_n = (di_z - mdi_z[None, :])
ds_z_n = (ds_z - mds_z[None, :])
C = di_z_n[:, :, None] * ds_z_n[:,None,:]
target_cr = torch.zeros(C.shape[0], C.shape[1], C.shape[2]).cuda()
disentangle_loss = nn.MSELoss()(C, target_cr)
di_predicted_domain = self.domain_discriminator(di_z)
predicted_domain_di_loss = F.cross_entropy(di_predicted_domain, all_z)
ds_predicted_classes = self.domain_classifier(ds_z)
predicted_domain_ds_loss = F.cross_entropy(ds_predicted_classes, all_z)
z = torch.cat((di_z, ds_z), dim = 1)
predicted_classes = self.classifier(z)
acc_loss = F.cross_entropy(predicted_classes, all_y)
total_loss = acc_loss + predicted_domain_di_loss + disentangle_loss + predicted_domain_ds_loss
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
num_mb = len(minibatches)
meta_objective = 0
self.meta_optimizer.zero_grad()
self_param = list(self.ds_featurizer.parameters()) + list(self.classifier.parameters())
for p in self_param:
if p.grad is None:
p.grad = torch.zeros_like(p)
for (xi, yi), (xj, yj) in random_pairs_of_minibatches(minibatches):
inner_zs_model = copy.deepcopy(self.ds_featurizer)
inner_classifier = copy.deepcopy(self.classifier)
inner_param = list(inner_zs_model.parameters()) + list(inner_classifier.parameters())
inner_opt = torch.optim.Adam(
inner_param,
lr=self.hparams["lr"],
weight_decay=self.hparams['weight_decay']
)
di_z, ds_z = self.di_featurizer(xi), inner_zs_model(xi)
z = torch.cat((di_z, ds_z), dim = 1)
predicted_classes = inner_classifier(z)
inner_obj = F.cross_entropy(predicted_classes, yi)
inner_opt.zero_grad()
inner_obj.backward()
inner_opt.step()
for p_tgt, p_src in zip(self_param, inner_param):
if p_src.grad is not None:
p_tgt.grad.data.add_(p_src.grad.data / num_mb)
meta_objective += inner_obj.item()
di_z, ds_z = self.di_featurizer(xj), inner_zs_model(xj)
z = torch.cat((di_z, ds_z), dim = 1)
predicted_classes = inner_classifier(z)
loss_inner_j = F.cross_entropy(predicted_classes, yj)
grad_inner_j = autograd.grad(loss_inner_j, inner_param,
allow_unused=True)
meta_objective += (1.0 * loss_inner_j).item()
for p, g_j in zip(self_param, grad_inner_j):
if g_j is not None:
p.grad.data.add_(1.0 * g_j.data / num_mb)
meta_objective /= len(minibatches)
self.meta_optimizer.step()
total_loss = total_loss.item() + meta_objective
return {'loss': total_loss}
def predict(self, x):
di_z, ds_z = self.di_featurizer(x), self.ds_featurizer(x)
z = torch.cat((di_z, ds_z), dim = 1)
return self.classifier(z) | 51,299 | 35.305732 | 232 | py |
aidgn | aidgn-main/domainbed/hparams_registry.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
from domainbed.lib import misc
def _define_hparam(hparams, hparam_name, default_val, random_val_fn):
hparams[hparam_name] = (hparams, hparam_name, default_val, random_val_fn)
def _hparams(algorithm, dataset, random_seed):
"""
Global registry of hyperparams. Each entry is a (default, random) tuple.
New algorithms / networks / etc. should add entries here.
"""
SMALL_IMAGES = ['Debug28', 'RotatedMNIST', 'ColoredMNIST']
hparams = {"PAC": (False, False)}
def _hparam(name, default_val, random_val_fn):
"""Define a hyperparameter. random_val_fn takes a RandomState and
returns a random hyperparameter value."""
assert(name not in hparams)
random_state = np.random.RandomState(
misc.seed_hash(random_seed, name)
)
hparams[name] = (default_val, random_val_fn(random_state))
# Unconditional hparam definitions.
_hparam('data_augmentation', True, lambda r: True)
_hparam('resnet18', False, lambda r: False)
_hparam('resnet_dropout', 0., lambda r: 0.)
# _hparam('resnet_dropout', 0., lambda r: r.choice([0., 0.1, 0.5]))
_hparam('class_balanced', False, lambda r: False)
# TODO: nonlinear classifiers disabled
_hparam('nonlinear_classifier', False, lambda r: False)
# Algorithm-specific hparam definitions. Each block of code below
# corresponds to exactly one algorithm.
if algorithm in ['DANN', 'CDANN']:
_hparam('lambda', 1.0, lambda r: 10**r.uniform(-2, 2))
_hparam('weight_decay_d', 0., lambda r: 10**r.uniform(-6, -2))
_hparam('d_steps_per_g_step', 1, lambda r: int(2**r.uniform(0, 3)))
_hparam('grad_penalty', 0., lambda r: 10**r.uniform(-2, 1))
_hparam('beta1', 0.5, lambda r: r.choice([0., 0.5]))
_hparam('mlp_width', 256, lambda r: int(2 ** r.uniform(6, 10)))
_hparam('mlp_depth', 3, lambda r: int(r.choice([3, 4, 5])))
_hparam('mlp_dropout', 0., lambda r: r.choice([0., 0.1, 0.5]))
elif algorithm == 'Fish':
_hparam('meta_lr', 0.5, lambda r:r.choice([0.05, 0.1, 0.5]))
elif algorithm == "RSC":
_hparam('rsc_f_drop_factor', 1/3, lambda r: r.uniform(0, 0.5))
_hparam('rsc_b_drop_factor', 1/3, lambda r: r.uniform(0, 0.5))
elif algorithm == "SagNet":
_hparam('sag_w_adv', 0.1, lambda r: 10**r.uniform(-2, 1))
elif algorithm == "IRM":
_hparam('irm_lambda', 1e2, lambda r: 10**r.uniform(-1, 5))
_hparam('irm_penalty_anneal_iters', 500,
lambda r: int(10**r.uniform(0, 4)))
elif algorithm == "Mixup":
_hparam('mixup_alpha', 0.2, lambda r: 10**r.uniform(-1, -1))
elif algorithm == "GroupDRO":
_hparam('groupdro_eta', 1e-2, lambda r: 10**r.uniform(-3, -1))
elif algorithm == "MMD" or algorithm == "CORAL":
_hparam('mmd_gamma', 1., lambda r: 10**r.uniform(-1, 1))
elif algorithm == "MLDG":
_hparam('mldg_beta', 1., lambda r: 10**r.uniform(-1, 1))
elif algorithm == "MTL":
_hparam('mtl_ema', .99, lambda r: r.choice([0.5, 0.9, 0.99, 1.]))
elif algorithm == "VREx":
_hparam('vrex_lambda', 1e1, lambda r: 10**r.uniform(-1, 5))
_hparam('vrex_penalty_anneal_iters', 500,
lambda r: int(10**r.uniform(0, 4)))
elif algorithm == "SD":
_hparam('sd_reg', 0.1, lambda r: 10**r.uniform(-5, -1))
elif algorithm == "ANDMask":
_hparam('tau', 1, lambda r: r.uniform(0.5, 1.))
elif algorithm == "IGA":
_hparam('penalty', 1000, lambda r: 10**r.uniform(1, 5))
elif algorithm == "AIDGN":
_hparam('kappa', 110, lambda r: 110)
_hparam('gamma', 0.001, lambda r: 0.001)
_hparam('l_a', 10, lambda r: 10)
_hparam('u_a', 410, lambda r: 410)
_hparam('eta', 15.0, lambda r: r.uniform(10.0, 25.0))
_hparam('beta', 0.3, lambda r: r.uniform(0.05, 0.5))
_hparam('ratiothreshold', 1.0, lambda r: r.uniform(1.0, 3.0))
# Dataset-and-algorithm-specific hparam definitions. Each block of code
# below corresponds to exactly one hparam. Avoid nested conditionals.
if dataset in SMALL_IMAGES:
_hparam('lr', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
else:
_hparam('lr', 1.25e-5, lambda r: 1.25e-5)
if dataset in SMALL_IMAGES:
_hparam('weight_decay', 0., lambda r: 0.)
else:
_hparam('weight_decay', 1e-6, lambda r: 1e-6)
if dataset in SMALL_IMAGES:
_hparam('batch_size', 64, lambda r: int(2**r.uniform(3, 9)))
elif algorithm == 'ARM':
_hparam('batch_size', 8, lambda r: 8)
elif dataset == 'DomainNet':
_hparam('batch_size', 16, lambda r: 16)
else:
_hparam('batch_size', 32, lambda r: 32)
if algorithm in ['DANN', 'CDANN'] and dataset in SMALL_IMAGES:
_hparam('lr_g', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
elif algorithm in ['DANN', 'CDANN']:
_hparam('lr_g', 5e-5, lambda r: 10**r.uniform(-5, -3.5))
if algorithm in ['DANN', 'CDANN'] and dataset in SMALL_IMAGES:
_hparam('lr_d', 1e-3, lambda r: 10**r.uniform(-4.5, -2.5))
elif algorithm in ['DANN', 'CDANN']:
_hparam('lr_d', 5e-5, lambda r: 10**r.uniform(-5, -3.5))
if algorithm in ['DANN', 'CDANN'] and dataset in SMALL_IMAGES:
_hparam('weight_decay_g', 0., lambda r: 0.)
elif algorithm in ['DANN', 'CDANN']:
_hparam('weight_decay_g', 0., lambda r: 10**r.uniform(-6, -2))
return hparams
def default_hparams(algorithm, dataset):
return {a: b for a, (b, c) in _hparams(algorithm, dataset, 0).items()}
def random_hparams(algorithm, dataset, seed):
return {a: c for a, (b, c) in _hparams(algorithm, dataset, seed).items()}
| 5,819 | 36.792208 | 77 | py |
aidgn | aidgn-main/domainbed/test/test_datasets.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Unit tests."""
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import networks
from parameterized import parameterized
from domainbed.test import helpers
class TestDatasets(unittest.TestCase):
@parameterized.expand(itertools.product(datasets.DATASETS))
@unittest.skipIf('DATA_DIR' not in os.environ, 'needs DATA_DIR environment '
'variable')
def test_dataset_erm(self, dataset_name):
"""
Test that ERM can complete one step on a given dataset without raising
an error.
Also test that num_environments() works correctly.
"""
batch_size = 8
hparams = hparams_registry.default_hparams('ERM', dataset_name)
dataset = datasets.get_dataset_class(dataset_name)(
os.environ['DATA_DIR'], [], hparams)
self.assertEqual(datasets.num_environments(dataset_name),
len(dataset))
algorithm = algorithms.get_algorithm_class('ERM')(
dataset.input_shape,
dataset.num_classes,
len(dataset),
hparams).cuda()
minibatches = helpers.make_minibatches(dataset, batch_size)
algorithm.update(minibatches)
| 1,454 | 28.1 | 80 | py |
aidgn | aidgn-main/domainbed/test/test_hparams_registry.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import itertools
from domainbed import hparams_registry
from domainbed import datasets
from domainbed import algorithms
from parameterized import parameterized
class TestHparamsRegistry(unittest.TestCase):
@parameterized.expand(itertools.product(algorithms.ALGORITHMS, datasets.DATASETS))
def test_random_hparams_deterministic(self, algorithm_name, dataset_name):
"""Test that hparams_registry.random_hparams is deterministic"""
a = hparams_registry.random_hparams(algorithm_name, dataset_name, 0)
b = hparams_registry.random_hparams(algorithm_name, dataset_name, 0)
self.assertEqual(a.keys(), b.keys())
for key in a.keys():
self.assertEqual(a[key], b[key], key)
| 815 | 36.090909 | 86 | py |
aidgn | aidgn-main/domainbed/test/test_networks.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import networks
from domainbed.test import helpers
from parameterized import parameterized
class TestNetworks(unittest.TestCase):
@parameterized.expand(itertools.product(helpers.DEBUG_DATASETS))
def test_featurizer(self, dataset_name):
"""Test that Featurizer() returns a module which can take a
correctly-sized input and return a correctly-sized output."""
batch_size = 8
hparams = hparams_registry.default_hparams('ERM', dataset_name)
dataset = datasets.get_dataset_class(dataset_name)('', [], hparams)
input_ = helpers.make_minibatches(dataset, batch_size)[0][0]
input_shape = dataset.input_shape
algorithm = networks.Featurizer(input_shape, hparams).cuda()
output = algorithm(input_)
self.assertEqual(list(output.shape), [batch_size, algorithm.n_outputs])
| 1,181 | 30.105263 | 79 | py |
aidgn | aidgn-main/domainbed/test/test_models.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Unit tests."""
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import networks
from domainbed.test import helpers
from parameterized import parameterized
class TestAlgorithms(unittest.TestCase):
@parameterized.expand(itertools.product(helpers.DEBUG_DATASETS, algorithms.ALGORITHMS))
def test_init_update_predict(self, dataset_name, algorithm_name):
"""Test that a given algorithm inits, updates and predicts without raising
errors."""
batch_size = 8
hparams = hparams_registry.default_hparams(algorithm_name, dataset_name)
dataset = datasets.get_dataset_class(dataset_name)('', [], hparams)
minibatches = helpers.make_minibatches(dataset, batch_size)
algorithm_class = algorithms.get_algorithm_class(algorithm_name)
algorithm = algorithm_class(dataset.input_shape, dataset.num_classes, len(dataset),
hparams).cuda()
for _ in range(3):
self.assertIsNotNone(algorithm.update(minibatches))
algorithm.eval()
self.assertEqual(list(algorithm.predict(minibatches[0][0]).shape),
[batch_size, dataset.num_classes])
| 1,427 | 31.454545 | 91 | py |
aidgn | aidgn-main/domainbed/test/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 73 | 17.5 | 70 | py |
aidgn | aidgn-main/domainbed/test/helpers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
DEBUG_DATASETS = ['Debug28', 'Debug224']
def make_minibatches(dataset, batch_size):
"""Test helper to make a minibatches array like train.py"""
minibatches = []
for env in dataset:
X = torch.stack([env[i][0] for i in range(batch_size)]).cuda()
y = torch.stack([torch.as_tensor(env[i][1])
for i in range(batch_size)]).cuda()
minibatches.append((X, y))
return minibatches
| 509 | 30.875 | 70 | py |
aidgn | aidgn-main/domainbed/test/test_model_selection.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Unit tests."""
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import model_selection
from domainbed.lib.query import Q
from parameterized import parameterized
def make_record(step, hparams_seed, envs):
"""envs is a list of (in_acc, out_acc, is_test_env) tuples"""
result = {
'args': {'test_envs': [], 'hparams_seed': hparams_seed},
'step': step
}
for i, (in_acc, out_acc, is_test_env) in enumerate(envs):
if is_test_env:
result['args']['test_envs'].append(i)
result[f'env{i}_in_acc'] = in_acc
result[f'env{i}_out_acc'] = out_acc
return result
class TestSelectionMethod(unittest.TestCase):
class MySelectionMethod(model_selection.SelectionMethod):
@classmethod
def run_acc(self, run_records):
return {
'val_acc': run_records[0]['env0_out_acc'],
'test_acc': run_records[0]['env0_in_acc']
}
def test_sweep_acc(self):
sweep_records = Q([
make_record(0, 0, [(0.7, 0.8, True)]),
make_record(0, 1, [(0.9, 0.5, True)])
])
self.assertEqual(
self.MySelectionMethod.sweep_acc(sweep_records),
0.7
)
def test_sweep_acc_empty(self):
self.assertEqual(
self.MySelectionMethod.sweep_acc(Q([])),
None
)
class TestOracleSelectionMethod(unittest.TestCase):
def test_run_acc_best_first(self):
"""Test run_acc() when the run has two records and the best one comes
first"""
run_records = Q([
make_record(0, 0, [(0.75, 0.70, True)]),
make_record(1, 0, [(0.65, 0.60, True)])
])
self.assertEqual(
model_selection.OracleSelectionMethod.run_acc(run_records),
{'val_acc': 0.60, 'test_acc': 0.65}
)
def test_run_acc_best_last(self):
"""Test run_acc() when the run has two records and the best one comes
last"""
run_records = Q([
make_record(0, 0, [(0.75, 0.70, True)]),
make_record(1, 0, [(0.85, 0.80, True)])
])
self.assertEqual(
model_selection.OracleSelectionMethod.run_acc(run_records),
{'val_acc': 0.80, 'test_acc': 0.85}
)
def test_run_acc_empty(self):
"""Test run_acc() when there are no valid records to choose from."""
self.assertEqual(
model_selection.OracleSelectionMethod.run_acc(Q([])),
None
)
class TestIIDAccuracySelectionMethod(unittest.TestCase):
def test_run_acc(self):
run_records = Q([
make_record(0, 0,
[(0.1, 0.2, True), (0.5, 0.6, False), (0.6, 0.7, False)]),
make_record(1, 0,
[(0.3, 0.4, True), (0.6, 0.7, False), (0.7, 0.8, False)]),
])
self.assertEqual(
model_selection.IIDAccuracySelectionMethod.run_acc(run_records),
{'val_acc': 0.75, 'test_acc': 0.3}
)
def test_run_acc_empty(self):
self.assertEqual(
model_selection.IIDAccuracySelectionMethod.run_acc(Q([])),
None)
class TestLeaveOneOutSelectionMethod(unittest.TestCase):
def test_run_acc(self):
run_records = Q([
make_record(0, 0,
[(0.1, 0., True), (0.0, 0., False), (0.0, 0., False)]),
make_record(0, 0,
[(0.0, 0., True), (0.5, 0., True), (0., 0., False)]),
make_record(0, 0,
[(0.0, 0., True), (0.0, 0., False), (0.6, 0., True)]),
])
self.assertEqual(
model_selection.LeaveOneOutSelectionMethod.run_acc(run_records),
{'val_acc': 0.55, 'test_acc': 0.1}
)
def test_run_acc_empty(self):
run_records = Q([
make_record(0, 0,
[(0.1, 0., True), (0.0, 0., False), (0.0, 0., False)]),
make_record(0, 0,
[(0.0, 0., True), (0.5, 0., True), (0., 0., False)]),
])
self.assertEqual(
model_selection.LeaveOneOutSelectionMethod.run_acc(run_records),
None
)
| 4,334 | 29.744681 | 77 | py |
aidgn | aidgn-main/domainbed/test/scripts/test_train.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# import argparse
# import itertools
import json
import os
import subprocess
# import sys
# import time
import unittest
import uuid
import torch
# import datasets
# import hparams_registry
# import algorithms
# import networks
# from parameterized import parameterized
# import test.helpers
class TestTrain(unittest.TestCase):
@unittest.skipIf('DATA_DIR' not in os.environ, 'needs DATA_DIR environment '
'variable')
def test_end_to_end(self):
"""Test that train.py successfully completes one step"""
output_dir = os.path.join('/tmp', str(uuid.uuid4()))
os.makedirs(output_dir, exist_ok=True)
subprocess.run(f'python -m domainbed.scripts.train --dataset RotatedMNIST '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--steps=501', shell=True)
with open(os.path.join(output_dir, 'results.jsonl')) as f:
lines = [l[:-1] for l in f]
last_epoch = json.loads(lines[-1])
self.assertEqual(last_epoch['step'], 500)
# Conservative values; anything lower and something's likely wrong.
self.assertGreater(last_epoch['env0_in_acc'], 0.80)
self.assertGreater(last_epoch['env1_in_acc'], 0.95)
self.assertGreater(last_epoch['env2_in_acc'], 0.95)
self.assertGreater(last_epoch['env3_in_acc'], 0.95)
self.assertGreater(last_epoch['env3_in_acc'], 0.95)
with open(os.path.join(output_dir, 'out.txt')) as f:
text = f.read()
self.assertTrue('500' in text)
| 1,654 | 32.1 | 83 | py |
aidgn | aidgn-main/domainbed/test/scripts/test_sweep.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import networks
from domainbed.test import helpers
from domainbed.scripts import sweep
from parameterized import parameterized
class TestSweep(unittest.TestCase):
def test_job(self):
"""Test that a newly-created job has valid
output_dir, state, and command_str properties."""
train_args = {'foo': 'bar'}
sweep_output_dir = f'/tmp/{str(uuid.uuid4())}'
job = sweep.Job(train_args, sweep_output_dir)
self.assertTrue(job.output_dir.startswith(sweep_output_dir))
self.assertEqual(job.state, sweep.Job.NOT_LAUNCHED)
self.assertEqual(job.command_str,
f'python -m domainbed.scripts.train --foo bar --output_dir {job.output_dir}')
def test_job_launch(self):
"""Test that launching a job calls the launcher_fn with appropariate
arguments, and sets the job to INCOMPLETE state."""
train_args = {'foo': 'bar'}
sweep_output_dir = f'/tmp/{str(uuid.uuid4())}'
job = sweep.Job(train_args, sweep_output_dir)
launcher_fn_called = False
def launcher_fn(commands):
nonlocal launcher_fn_called
launcher_fn_called = True
self.assertEqual(len(commands), 1)
self.assertEqual(commands[0], job.command_str)
sweep.Job.launch([job], launcher_fn)
self.assertTrue(launcher_fn_called)
job = sweep.Job(train_args, sweep_output_dir)
self.assertEqual(job.state, sweep.Job.INCOMPLETE)
def test_job_delete(self):
"""Test that deleting a launched job returns it to the NOT_LAUNCHED
state"""
train_args = {'foo': 'bar'}
sweep_output_dir = f'/tmp/{str(uuid.uuid4())}'
job = sweep.Job(train_args, sweep_output_dir)
sweep.Job.launch([job], (lambda commands: None))
sweep.Job.delete([job])
job = sweep.Job(train_args, sweep_output_dir)
self.assertEqual(job.state, sweep.Job.NOT_LAUNCHED)
def test_make_args_list(self):
"""Test that, for a typical input, make_job_list returns a list
of the correct length"""
args_list = sweep.make_args_list(
n_trials=2,
dataset_names=['Debug28'],
algorithms=['ERM'],
n_hparams_from=0,
n_hparams=3,
steps=123,
data_dir='/tmp/data',
task='domain_generalization',
holdout_fraction=0.2,
single_test_envs=False,
hparams=None
)
assert(len(args_list) == 2*3*(3+3))
@unittest.skipIf('DATA_DIR' not in os.environ, 'needs DATA_DIR environment '
'variable')
def test_end_to_end(self):
output_dir = os.path.join('/tmp', str(uuid.uuid4()))
result = subprocess.run(f'python -m domainbed.scripts.sweep launch '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 '
f'--command_launcher dummy --skip_confirmation',
shell=True, capture_output=True)
stdout_lines = result.stdout.decode('utf8').split("\n")
dummy_launcher_lines = [l for l in stdout_lines
if l.startswith('Dummy launcher:')]
self.assertEqual(len(dummy_launcher_lines), 6)
# Now run it again and make sure it doesn't try to relaunch those jobs
result = subprocess.run(f'python -m domainbed.scripts.sweep launch '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 '
f'--command_launcher dummy --skip_confirmation',
shell=True, capture_output=True)
stdout_lines = result.stdout.decode('utf8').split("\n")
dummy_launcher_lines = [l for l in stdout_lines
if l.startswith('Dummy launcher:')]
self.assertEqual(len(dummy_launcher_lines), 0)
# Delete the incomplete jobs, try launching again, and make sure they
# get relaunched.
subprocess.run(f'python -m domainbed.scripts.sweep delete_incomplete '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 '
f'--command_launcher dummy --skip_confirmation',
shell=True, capture_output=True)
result = subprocess.run(f'python -m domainbed.scripts.sweep launch '
f'--data_dir={os.environ["DATA_DIR"]} --output_dir={output_dir} '
f'--algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 '
f'--command_launcher dummy --skip_confirmation',
shell=True, capture_output=True)
stdout_lines = result.stdout.decode('utf8').split("\n")
dummy_launcher_lines = [l for l in stdout_lines
if l.startswith('Dummy launcher:')]
self.assertEqual(len(dummy_launcher_lines), 6)
| 5,273 | 39.569231 | 89 | py |
aidgn | aidgn-main/domainbed/test/scripts/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 73 | 17.5 | 70 | py |
aidgn | aidgn-main/domainbed/test/scripts/test_collect_results.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import itertools
import json
import os
import subprocess
import sys
import time
import unittest
import uuid
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import networks
from domainbed.test import helpers
from domainbed.scripts import collect_results
from parameterized import parameterized
import io
import textwrap
class TestCollectResults(unittest.TestCase):
def test_format_mean(self):
self.assertEqual(
collect_results.format_mean([0.1, 0.2, 0.3], False)[2],
'20.0 +/- 4.7')
self.assertEqual(
collect_results.format_mean([0.1, 0.2, 0.3], True)[2],
'20.0 $\pm$ 4.7')
def test_print_table_non_latex(self):
temp_out = io.StringIO()
sys.stdout = temp_out
table = [['1', '2'], ['3', '4']]
collect_results.print_table(table, 'Header text', ['R1', 'R2'],
['C1', 'C2'], colwidth=10, latex=False)
sys.stdout = sys.__stdout__
self.assertEqual(
temp_out.getvalue(),
textwrap.dedent("""
-------- Header text
C1 C2
R1 1 2
R2 3 4
""")
)
def test_print_table_latex(self):
temp_out = io.StringIO()
sys.stdout = temp_out
table = [['1', '2'], ['3', '4']]
collect_results.print_table(table, 'Header text', ['R1', 'R2'],
['C1', 'C2'], colwidth=10, latex=True)
sys.stdout = sys.__stdout__
self.assertEqual(
temp_out.getvalue(),
textwrap.dedent(r"""
\begin{center}
\adjustbox{max width=\textwidth}{%
\begin{tabular}{lcc}
\toprule
\textbf{C1 & \textbf{C2 \\
\midrule
R1 & 1 & 2 \\
R2 & 3 & 4 \\
\bottomrule
\end{tabular}}
\end{center}
""")
)
def test_get_grouped_records(self):
pass # TODO
def test_print_results_tables(self):
pass # TODO
def test_load_records(self):
pass # TODO
def test_end_to_end(self):
"""
Test that collect_results.py's output matches a manually-verified
ground-truth when run on a given directory of test sweep data.
If you make any changes to the output of collect_results.py, you'll need
to update the ground-truth and manually verify that it's still
correct. The command used to update the ground-truth is:
python -m domainbed.scripts.collect_results --input_dir=domainbed/misc/test_sweep_data \
| tee domainbed/misc/test_sweep_results.txt
Furthermore, if you make any changes to the data format, you'll also
need to rerun the test sweep. The command used to run the test sweep is:
python -m domainbed.scripts.sweep launch --data_dir=$DATA_DIR \
--output_dir=domainbed/misc/test_sweep_data --algorithms ERM \
--datasets VLCS --steps 1001 --n_hparams 2 --n_trials 2 \
--command_launcher local
"""
result = subprocess.run('python -m domainbed.scripts.collect_results'
' --input_dir=domainbed/misc/test_sweep_data', shell=True,
stdout=subprocess.PIPE)
with open('domainbed/misc/test_sweep_results.txt', 'r') as f:
ground_truth = f.read()
self.assertEqual(result.stdout.decode('utf8'), ground_truth)
| 3,718 | 31.622807 | 96 | py |
aidgn | aidgn-main/domainbed/test/lib/test_misc.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from domainbed.lib import misc
class TestMisc(unittest.TestCase):
def test_make_weights_for_balanced_classes(self):
dataset = [('A', 0), ('B', 1), ('C', 0), ('D', 2), ('E', 3), ('F', 0)]
result = misc.make_weights_for_balanced_classes(dataset)
self.assertEqual(result.sum(), 1)
self.assertEqual(result[0], result[2])
self.assertEqual(result[1], result[3])
self.assertEqual(3 * result[0], result[1])
| 541 | 35.133333 | 78 | py |
aidgn | aidgn-main/domainbed/test/lib/test_query.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from domainbed.lib.query import Q, make_selector_fn
class TestQuery(unittest.TestCase):
def test_everything(self):
numbers = Q([1, 4, 2])
people = Q([
{'name': 'Bob', 'age': 40},
{'name': 'Alice', 'age': 20},
{'name': 'Bob', 'age': 10}
])
self.assertEqual(numbers.select(lambda x: 2*x), [2, 8, 4])
self.assertEqual(numbers.min(), 1)
self.assertEqual(numbers.max(), 4)
self.assertEqual(numbers.mean(), 7/3)
self.assertEqual(people.select('name'), ['Bob', 'Alice', 'Bob'])
self.assertEqual(
set(people.group('name').map(lambda _,g: g.select('age').mean())),
set([25, 20])
)
self.assertEqual(people.argmax('age'), people[0])
def test_group_by_unhashable(self):
jobs = Q([
{'hparams': {1:2}, 'score': 3},
{'hparams': {1:2}, 'score': 4},
{'hparams': {2:4}, 'score': 5}
])
grouped = jobs.group('hparams')
self.assertEqual(grouped, [
({1:2}, [jobs[0], jobs[1]]),
({2:4}, [jobs[2]])
])
def test_comma_selector(self):
struct = {'a': {'b': 1}, 'c': 2}
fn = make_selector_fn('a.b,c')
self.assertEqual(fn(struct), (1, 2))
def test_unique(self):
numbers = Q([1,2,1,3,2,1,3,1,2,3])
self.assertEqual(numbers.unique(), [1,2,3])
| 1,513 | 29.28 | 78 | py |
aidgn | aidgn-main/domainbed/test/lib/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 73 | 17.5 | 70 | py |
aidgn | aidgn-main/domainbed/scripts/save_images.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Save some representative images from each dataset to disk.
"""
import random
import torch
import argparse
from domainbed import hparams_registry
from domainbed import datasets
import imageio
import os
from tqdm import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Domain generalization')
parser.add_argument('--data_dir', type=str)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
datasets_to_save = ['OfficeHome', 'TerraIncognita', 'DomainNet', 'RotatedMNIST', 'ColoredMNIST', 'SVIRO']
for dataset_name in tqdm(datasets_to_save):
hparams = hparams_registry.default_hparams('ERM', dataset_name)
dataset = datasets.get_dataset_class(dataset_name)(
args.data_dir,
list(range(datasets.num_environments(dataset_name))),
hparams)
for env_idx, env in enumerate(tqdm(dataset)):
for i in tqdm(range(50)):
idx = random.choice(list(range(len(env))))
x, y = env[idx]
while y > 10:
idx = random.choice(list(range(len(env))))
x, y = env[idx]
if x.shape[0] == 2:
x = torch.cat([x, torch.zeros_like(x)], dim=0)[:3,:,:]
if x.min() < 0:
mean = torch.tensor([0.485, 0.456, 0.406])[:,None,None]
std = torch.tensor([0.229, 0.224, 0.225])[:,None,None]
x = (x * std) + mean
assert(x.min() >= 0)
assert(x.max() <= 1)
x = (x * 255.99)
x = x.numpy().astype('uint8').transpose(1,2,0)
imageio.imwrite(
os.path.join(args.output_dir,
f'{dataset_name}_env{env_idx}{dataset.ENVIRONMENTS[env_idx]}_{i}_idx{idx}_class{y}.png'),
x)
| 2,029 | 38.803922 | 113 | py |
aidgn | aidgn-main/domainbed/scripts/sweep.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Run sweeps
"""
import argparse
import copy
import getpass
import hashlib
import json
import os
import random
import shutil
import time
import uuid
import numpy as np
import torch
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed.lib import misc
from domainbed import command_launchers
import tqdm
import shlex
class Job:
NOT_LAUNCHED = 'Not launched'
INCOMPLETE = 'Incomplete'
DONE = 'Done'
def __init__(self, train_args, sweep_output_dir):
args_str = json.dumps(train_args, sort_keys=True)
args_hash = hashlib.md5(args_str.encode('utf-8')).hexdigest()
self.output_dir = os.path.join(sweep_output_dir, args_hash)
self.train_args = copy.deepcopy(train_args)
self.train_args['output_dir'] = self.output_dir
command = ['python', '-m', 'domainbed.scripts.train']
for k, v in sorted(self.train_args.items()):
if isinstance(v, list):
v = ' '.join([str(v_) for v_ in v])
elif isinstance(v, str):
v = shlex.quote(v)
command.append(f'--{k} {v}')
self.command_str = ' '.join(command)
if os.path.exists(os.path.join(self.output_dir, 'done')):
self.state = Job.DONE
elif os.path.exists(self.output_dir):
self.state = Job.INCOMPLETE
else:
self.state = Job.NOT_LAUNCHED
def __str__(self):
job_info = (self.train_args['dataset'],
self.train_args['algorithm'],
self.train_args['test_envs'],
self.train_args['hparams_seed'])
return '{}: {} {}'.format(
self.state,
self.output_dir,
job_info)
@staticmethod
def launch(jobs, launcher_fn):
print('Launching...')
jobs = jobs.copy()
np.random.shuffle(jobs)
print('Making job directories:')
for job in tqdm.tqdm(jobs, leave=False):
os.makedirs(job.output_dir, exist_ok=True)
commands = [job.command_str for job in jobs]
launcher_fn(commands)
print(f'Launched {len(jobs)} jobs!')
@staticmethod
def delete(jobs):
print('Deleting...')
for job in jobs:
shutil.rmtree(job.output_dir)
print(f'Deleted {len(jobs)} jobs!')
def all_test_env_combinations(n):
"""
For a dataset with n >= 3 envs, return all combinations of 1 and 2 test
envs.
"""
assert(n >= 3)
for i in range(n):
yield [i]
for j in range(i+1, n):
yield [i, j]
def make_args_list(n_trials, dataset_names, algorithms, n_hparams_from, n_hparams, steps,
data_dir, task, holdout_fraction, single_test_envs, hparams):
args_list = []
for trial_seed in range(n_trials):
for dataset in dataset_names:
for algorithm in algorithms:
if single_test_envs:
all_test_envs = [
[i] for i in range(datasets.num_environments(dataset))]
else:
all_test_envs = all_test_env_combinations(
datasets.num_environments(dataset))
for test_envs in all_test_envs:
for hparams_seed in range(n_hparams_from, n_hparams):
train_args = {}
train_args['dataset'] = dataset
train_args['algorithm'] = algorithm
train_args['test_envs'] = test_envs
train_args['holdout_fraction'] = holdout_fraction
train_args['hparams_seed'] = hparams_seed
train_args['data_dir'] = data_dir
train_args['task'] = task
train_args['trial_seed'] = trial_seed
train_args['seed'] = 0
# train_args['seed'] = misc.seed_hash(dataset,
# algorithm, test_envs, hparams_seed, trial_seed)
if steps is not None:
train_args['steps'] = steps
if hparams is not None:
train_args['hparams'] = hparams
args_list.append(train_args)
return args_list
def ask_for_confirmation():
response = input('Are you sure? (y/n) ')
if not response.lower().strip()[:1] == "y":
print('Nevermind!')
exit(0)
DATASETS = [d for d in datasets.DATASETS if "Debug" not in d]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run a sweep')
parser.add_argument('command', choices=['launch', 'delete_incomplete'])
parser.add_argument('--datasets', nargs='+', type=str, default=DATASETS)
parser.add_argument('--algorithms', nargs='+', type=str, default=algorithms.ALGORITHMS)
parser.add_argument('--task', type=str, default="domain_generalization")
parser.add_argument('--n_hparams_from', type=int, default=0)
parser.add_argument('--n_hparams', type=int, default=20)
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n_trials', type=int, default=3)
parser.add_argument('--command_launcher', type=str, required=True)
parser.add_argument('--steps', type=int, default=None)
parser.add_argument('--hparams', type=str, default=None)
parser.add_argument('--holdout_fraction', type=float, default=0.2)
parser.add_argument('--single_test_envs', type=bool, default=True)#action='store_true')
parser.add_argument('--skip_confirmation', type=bool, default=True)#action='store_true')
args = parser.parse_args()
args_list = make_args_list(
n_trials=args.n_trials,
dataset_names=args.datasets,
algorithms=args.algorithms,
n_hparams_from=args.n_hparams_from,
n_hparams=args.n_hparams,
steps=args.steps,
data_dir=args.data_dir,
task=args.task,
holdout_fraction=args.holdout_fraction,
single_test_envs=args.single_test_envs,
hparams=args.hparams
)
jobs = [Job(train_args, args.output_dir) for train_args in args_list]
for job in jobs:
print(job)
print("{} jobs: {} done, {} incomplete, {} not launched.".format(
len(jobs),
len([j for j in jobs if j.state == Job.DONE]),
len([j for j in jobs if j.state == Job.INCOMPLETE]),
len([j for j in jobs if j.state == Job.NOT_LAUNCHED]))
)
if args.command == 'launch':
to_launch = [j for j in jobs if j.state == Job.NOT_LAUNCHED]
print(f'About to launch {len(to_launch)} jobs.')
if not args.skip_confirmation:
ask_for_confirmation()
launcher_fn = command_launchers.REGISTRY[args.command_launcher]
Job.launch(to_launch, launcher_fn)
elif args.command == 'delete_incomplete':
to_delete = [j for j in jobs if j.state == Job.INCOMPLETE]
print(f'About to delete {len(to_delete)} jobs.')
if not args.skip_confirmation:
ask_for_confirmation()
Job.delete(to_delete)
| 7,332 | 36.035354 | 92 | py |
aidgn | aidgn-main/domainbed/scripts/download.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from torchvision.datasets import MNIST
import xml.etree.ElementTree as ET
from zipfile import ZipFile
import argparse
import tarfile
import shutil
import gdown
import uuid
import json
import os
from wilds.datasets.camelyon17_dataset import Camelyon17Dataset
from wilds.datasets.fmow_dataset import FMoWDataset
# utils #######################################################################
def stage_path(data_dir, name):
full_path = os.path.join(data_dir, name)
if not os.path.exists(full_path):
os.makedirs(full_path)
return full_path
def download_and_extract(url, dst, remove=True):
gdown.download(url, dst, quiet=False)
if dst.endswith(".tar.gz"):
tar = tarfile.open(dst, "r:gz")
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith(".tar"):
tar = tarfile.open(dst, "r:")
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith(".zip"):
zf = ZipFile(dst, "r")
zf.extractall(os.path.dirname(dst))
zf.close()
if remove:
os.remove(dst)
# VLCS ########################################################################
# Slower, but builds dataset from the original sources
#
# def download_vlcs(data_dir):
# full_path = stage_path(data_dir, "VLCS")
#
# tmp_path = os.path.join(full_path, "tmp/")
# if not os.path.exists(tmp_path):
# os.makedirs(tmp_path)
#
# with open("domainbed/misc/vlcs_files.txt", "r") as f:
# lines = f.readlines()
# files = [line.strip().split() for line in lines]
#
# download_and_extract("http://pjreddie.com/media/files/VOCtrainval_06-Nov-2007.tar",
# os.path.join(tmp_path, "voc2007_trainval.tar"))
#
# download_and_extract("https://drive.google.com/uc?id=1I8ydxaAQunz9R_qFFdBFtw6rFTUW9goz",
# os.path.join(tmp_path, "caltech101.tar.gz"))
#
# download_and_extract("http://groups.csail.mit.edu/vision/Hcontext/data/sun09_hcontext.tar",
# os.path.join(tmp_path, "sun09_hcontext.tar"))
#
# tar = tarfile.open(os.path.join(tmp_path, "sun09.tar"), "r:")
# tar.extractall(tmp_path)
# tar.close()
#
# for src, dst in files:
# class_folder = os.path.join(data_dir, dst)
#
# if not os.path.exists(class_folder):
# os.makedirs(class_folder)
#
# dst = os.path.join(class_folder, uuid.uuid4().hex + ".jpg")
#
# if "labelme" in src:
# # download labelme from the web
# gdown.download(src, dst, quiet=False)
# else:
# src = os.path.join(tmp_path, src)
# shutil.copyfile(src, dst)
#
# shutil.rmtree(tmp_path)
def download_vlcs(data_dir):
# Original URL: http://www.eecs.qmul.ac.uk/~dl307/project_iccv2017
full_path = stage_path(data_dir, "VLCS")
download_and_extract("https://drive.google.com/uc?id=1skwblH1_okBwxWxmRsp9_qi15hyPpxg8",
os.path.join(data_dir, "VLCS.tar.gz"))
# MNIST #######################################################################
def download_mnist(data_dir):
# Original URL: http://yann.lecun.com/exdb/mnist/
full_path = stage_path(data_dir, "MNIST")
MNIST(full_path, download=True)
# PACS ########################################################################
def download_pacs(data_dir):
# Original URL: http://www.eecs.qmul.ac.uk/~dl307/project_iccv2017
full_path = stage_path(data_dir, "PACS")
download_and_extract("https://drive.google.com/uc?id=0B6x7gtvErXgfbF9CSk53UkRxVzg",
os.path.join(data_dir, "PACS.zip"))
os.rename(os.path.join(data_dir, "kfold"),
full_path)
# Office-Home #################################################################
def download_office_home(data_dir):
# Original URL: http://hemanthdv.org/OfficeHome-Dataset/
full_path = stage_path(data_dir, "office_home")
download_and_extract("https://drive.google.com/uc?id=0B81rNlvomiwed0V1YUxQdC1uOTg",
os.path.join(data_dir, "office_home.zip"))
os.rename(os.path.join(data_dir, "OfficeHomeDataset_10072016"),
full_path)
# DomainNET ###################################################################
def download_domain_net(data_dir):
# Original URL: http://ai.bu.edu/M3SDA/
full_path = stage_path(data_dir, "domain_net")
urls = [
"http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/clipart.zip",
"http://csr.bu.edu/ftp/visda/2019/multi-source/infograph.zip",
"http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/painting.zip",
"http://csr.bu.edu/ftp/visda/2019/multi-source/quickdraw.zip",
"http://csr.bu.edu/ftp/visda/2019/multi-source/real.zip",
"http://csr.bu.edu/ftp/visda/2019/multi-source/sketch.zip"
]
for url in urls:
download_and_extract(url, os.path.join(full_path, url.split("/")[-1]))
with open("domainbed/misc/domain_net_duplicates.txt", "r") as f:
for line in f.readlines():
try:
os.remove(os.path.join(full_path, line.strip()))
except OSError:
pass
# TerraIncognita ##############################################################
def download_terra_incognita(data_dir):
# Original URL: https://beerys.github.io/CaltechCameraTraps/
full_path = stage_path(data_dir, "terra_incognita")
download_and_extract(
"http://www.vision.caltech.edu/~sbeery/datasets/caltechcameratraps18/eccv_18_all_images_sm.tar.gz",
os.path.join(full_path, "terra_incognita_images.tar.gz"))
download_and_extract(
"http://www.vision.caltech.edu/~sbeery/datasets/caltechcameratraps18/eccv_18_all_annotations.tar.gz",
os.path.join(full_path, "terra_incognita_annotations.tar.gz"))
include_locations = [38, 46, 100, 43]
include_categories = [
"bird", "bobcat", "cat", "coyote", "dog", "empty", "opossum", "rabbit",
"raccoon", "squirrel"
]
images_folder = os.path.join(full_path, "eccv_18_all_images_sm/")
annotations_file = os.path.join(full_path, "CaltechCameraTrapsECCV18.json")
destination_folder = full_path
stats = {}
if not os.path.exists(destination_folder):
os.mkdir(destination_folder)
with open(annotations_file, "r") as f:
data = json.load(f)
category_dict = {}
for item in data['categories']:
category_dict[item['id']] = item['name']
for image in data['images']:
image_location = image['location']
if image_location not in include_locations:
continue
loc_folder = os.path.join(destination_folder,
'location_' + str(image_location) + '/')
if not os.path.exists(loc_folder):
os.mkdir(loc_folder)
image_id = image['id']
image_fname = image['file_name']
for annotation in data['annotations']:
if annotation['image_id'] == image_id:
if image_location not in stats:
stats[image_location] = {}
category = category_dict[annotation['category_id']]
if category not in include_categories:
continue
if category not in stats[image_location]:
stats[image_location][category] = 0
else:
stats[image_location][category] += 1
loc_cat_folder = os.path.join(loc_folder, category + '/')
if not os.path.exists(loc_cat_folder):
os.mkdir(loc_cat_folder)
dst_path = os.path.join(loc_cat_folder, image_fname)
src_path = os.path.join(images_folder, image_fname)
shutil.copyfile(src_path, dst_path)
shutil.rmtree(images_folder)
os.remove(annotations_file)
def download_terra_incognita_domainbed(data_dir):
# Original URL: https://beerys.github.io/CaltechCameraTraps/
# New URL: http://lila.science/datasets/caltech-camera-traps
full_path = stage_path(data_dir, "terra_incognita")
download_and_extract(
"https://lilablobssc.blob.core.windows.net/caltechcameratraps/eccv_18_all_images_sm.tar.gz",
os.path.join(full_path, "terra_incognita_images.tar.gz"))
download_and_extract(
"https://lilablobssc.blob.core.windows.net/caltechcameratraps/labels/caltech_camera_traps.json.zip",
os.path.join(full_path, "caltech_camera_traps.json.zip"))
include_locations = ["38", "46", "100", "43"]
include_categories = [
"bird", "bobcat", "cat", "coyote", "dog", "empty", "opossum", "rabbit",
"raccoon", "squirrel"
]
images_folder = os.path.join(full_path, "eccv_18_all_images_sm/")
annotations_file = os.path.join(full_path, "caltech_images_20210113.json")
destination_folder = full_path
stats = {}
if not os.path.exists(destination_folder):
os.mkdir(destination_folder)
with open(annotations_file, "r") as f:
data = json.load(f)
category_dict = {}
for item in data['categories']:
category_dict[item['id']] = item['name']
for image in data['images']:
image_location = image['location']
if image_location not in include_locations:
continue
loc_folder = os.path.join(destination_folder,
'location_' + str(image_location) + '/')
if not os.path.exists(loc_folder):
os.mkdir(loc_folder)
image_id = image['id']
image_fname = image['file_name']
for annotation in data['annotations']:
if annotation['image_id'] == image_id:
if image_location not in stats:
stats[image_location] = {}
category = category_dict[annotation['category_id']]
if category not in include_categories:
continue
if category not in stats[image_location]:
stats[image_location][category] = 0
else:
stats[image_location][category] += 1
loc_cat_folder = os.path.join(loc_folder, category + '/')
if not os.path.exists(loc_cat_folder):
os.mkdir(loc_cat_folder)
dst_path = os.path.join(loc_cat_folder, image_fname)
src_path = os.path.join(images_folder, image_fname)
shutil.copyfile(src_path, dst_path)
shutil.rmtree(images_folder)
os.remove(annotations_file)
def process_terra_incognita(data_dir):
# Original URL: https://beerys.github.io/CaltechCameraTraps/
# New URL: http://lila.science/datasets/caltech-camera-traps
full_path = stage_path(data_dir, "Terra_incognita")
dst = os.path.join(full_path, "terra_incognita_images.tar.gz")
tar = tarfile.open(dst, "r:gz")
tar.extractall(os.path.dirname(dst))
tar.close()
os.remove(dst)
dst = os.path.join(full_path, "caltech_camera_traps.json.zip")
zf = ZipFile(dst, "r")
zf.extractall(os.path.dirname(dst))
zf.close()
include_locations = ["38", "46", "100", "43"]
include_categories = [
"bird", "bobcat", "cat", "coyote", "dog", "empty", "opossum", "rabbit",
"raccoon", "squirrel"
]
images_folder = os.path.join(full_path, "eccv_18_all_images_sm/")
annotations_file = os.path.join(full_path, "caltech_images_20210113.json")
destination_folder = full_path
stats = {}
if not os.path.exists(destination_folder):
os.mkdir(destination_folder)
with open(annotations_file, "r") as f:
data = json.load(f)
category_dict = {}
for item in data['categories']:
category_dict[item['id']] = item['name']
for image in data['images']:
image_location = image['location']
if image_location not in include_locations:
continue
loc_folder = os.path.join(destination_folder,
'location_' + str(image_location) + '/')
if not os.path.exists(loc_folder):
os.mkdir(loc_folder)
image_id = image['id']
image_fname = image['file_name']
for annotation in data['annotations']:
if annotation['image_id'] == image_id:
if image_location not in stats:
stats[image_location] = {}
category = category_dict[annotation['category_id']]
if category not in include_categories:
continue
if category not in stats[image_location]:
stats[image_location][category] = 0
else:
stats[image_location][category] += 1
loc_cat_folder = os.path.join(loc_folder, category + '/')
if not os.path.exists(loc_cat_folder):
os.mkdir(loc_cat_folder)
dst_path = os.path.join(loc_cat_folder, image_fname)
src_path = os.path.join(images_folder, image_fname)
shutil.copyfile(src_path, dst_path)
shutil.rmtree(images_folder)
os.remove(annotations_file)
# SVIRO #################################################################
def download_sviro(data_dir):
# Original URL: https://sviro.kl.dfki.de
full_path = stage_path(data_dir, "sviro")
download_and_extract("https://sviro.kl.dfki.de/?wpdmdl=1731",
os.path.join(data_dir, "sviro_grayscale_rectangle_classification.zip"))
os.rename(os.path.join(data_dir, "SVIRO_DOMAINBED"),
full_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Download datasets')
parser.add_argument('--data_dir', type=str, required=True)
args = parser.parse_args()
# download_mnist(args.data_dir)
# download_pacs(args.data_dir)
# download_office_home(args.data_dir)
# download_domain_net(args.data_dir)
# download_vlcs(args.data_dir)
process_terra_incognita(args.data_dir)
# download_sviro(args.data_dir)
# Camelyon17Dataset(root_dir=args.data_dir, download=True)
#FMoWDataset(root_dir=args.data_dir, download=True)
| 14,359 | 31.860412 | 109 | py |
aidgn | aidgn-main/domainbed/scripts/collect_results.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import collections
import argparse
import functools
import glob
import pickle
import itertools
import json
import os
import random
import sys
import numpy as np
import tqdm
from domainbed import datasets
from domainbed import algorithms
from domainbed.lib import misc, reporting
from domainbed import model_selection
from domainbed.lib.query import Q
import warnings
def format_mean(data, latex):
"""Given a list of datapoints, return a string describing their mean and
standard error"""
if len(data) == 0:
return None, None, "X"
mean = 100 * np.mean(list(data))
err = 100 * np.std(list(data) / np.sqrt(len(data)))
if latex:
return mean, err, "{:.1f} $\\pm$ {:.1f}".format(mean, err)
else:
return mean, err, "{:.1f} +/- {:.1f}".format(mean, err)
def print_table(table, header_text, row_labels, col_labels, colwidth=10,
latex=True):
"""Pretty-print a 2D array of data, optionally with row/col labels"""
print("")
if latex:
num_cols = len(table[0])
print("\\begin{center}")
print("\\adjustbox{max width=\\textwidth}{%")
print("\\begin{tabular}{l" + "c" * num_cols + "}")
print("\\toprule")
else:
print("--------", header_text)
for row, label in zip(table, row_labels):
row.insert(0, label)
if latex:
col_labels = ["\\textbf{" + str(col_label).replace("%", "\\%") + "}"
for col_label in col_labels]
table.insert(0, col_labels)
for r, row in enumerate(table):
misc.print_row(row, colwidth=colwidth, latex=latex)
if latex and r == 0:
print("\\midrule")
if latex:
print("\\bottomrule")
print("\\end{tabular}}")
print("\\end{center}")
def print_results_tables(records, selection_method, latex):
"""Given all records, print a results table for each dataset."""
grouped_records = reporting.get_grouped_records(records).map(lambda group:
{ **group, "sweep_acc": selection_method.sweep_acc(group["records"]) }
).filter(lambda g: g["sweep_acc"] is not None)
# read algorithm names and sort (predefined order)
alg_names = Q(records).select("args.algorithm").unique()
alg_names = ([n for n in algorithms.ALGORITHMS if n in alg_names] +
[n for n in alg_names if n not in algorithms.ALGORITHMS])
# read dataset names and sort (lexicographic order)
dataset_names = Q(records).select("args.dataset").unique().sorted()
dataset_names = [d for d in datasets.DATASETS if d in dataset_names]
for dataset in dataset_names:
if latex:
print()
print("\\subsubsection{{{}}}".format(dataset))
test_envs = range(datasets.num_environments(dataset))
table = [[None for _ in [*test_envs, "Avg"]] for _ in alg_names]
for i, algorithm in enumerate(alg_names):
means = []
for j, test_env in enumerate(test_envs):
trial_accs = (grouped_records
.filter_equals(
"dataset, algorithm, test_env",
(dataset, algorithm, test_env)
).select("sweep_acc"))
mean, err, table[i][j] = format_mean(trial_accs, latex)
means.append(mean)
if None in means:
table[i][-1] = "X"
else:
table[i][-1] = "{:.1f}".format(sum(means) / len(means))
col_labels = [
"Algorithm",
*datasets.get_dataset_class(dataset).ENVIRONMENTS,
"Avg"
]
header_text = (f"Dataset: {dataset}, "
f"model selection method: {selection_method.name}")
print_table(table, header_text, alg_names, list(col_labels),
colwidth=20, latex=latex)
# Print an "averages" table
if latex:
print()
print("\\subsubsection{Averages}")
table = [[None for _ in [*dataset_names, "Avg"]] for _ in alg_names]
for i, algorithm in enumerate(alg_names):
means = []
for j, dataset in enumerate(dataset_names):
trial_averages = (grouped_records
.filter_equals("algorithm, dataset", (algorithm, dataset))
.group("trial_seed")
.map(lambda trial_seed, group:
group.select("sweep_acc").mean()
)
)
mean, err, table[i][j] = format_mean(trial_averages, latex)
means.append(mean)
if None in means:
table[i][-1] = "X"
else:
table[i][-1] = "{:.1f}".format(sum(means) / len(means))
col_labels = ["Algorithm", *dataset_names, "Avg"]
header_text = f"Averages, model selection method: {selection_method.name}"
print_table(table, header_text, alg_names, col_labels, colwidth=25,
latex=latex)
if __name__ == "__main__":
np.set_printoptions(suppress=True)
parser = argparse.ArgumentParser(
description="Domain generalization testbed")
parser.add_argument("--input_dir", type=str, required=True)
parser.add_argument("--latex", action="store_true")
args = parser.parse_args()
results_file = "results.tex" if args.latex else "results.txt"
sys.stdout = misc.Tee(os.path.join(args.input_dir, results_file), "w")
records = reporting.load_records(args.input_dir)
if args.latex:
print("\\documentclass{article}")
print("\\usepackage{booktabs}")
print("\\usepackage{adjustbox}")
print("\\begin{document}")
print("\\section{Full DomainBed results}")
print("% Total records:", len(records))
else:
print("Total records:", len(records))
SELECTION_METHODS = [
model_selection.IIDAccuracySelectionMethod,
model_selection.LeaveOneOutSelectionMethod,
model_selection.OracleSelectionMethod,
]
for selection_method in SELECTION_METHODS:
if args.latex:
print()
print("\\subsection{{Model selection: {}}}".format(
selection_method.name))
print_results_tables(records, selection_method, args.latex)
if args.latex:
print("\\end{document}")
| 6,272 | 33.092391 | 78 | py |
aidgn | aidgn-main/domainbed/scripts/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| 72 | 23.333333 | 70 | py |
aidgn | aidgn-main/domainbed/scripts/train.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import collections
import json
import os
import random
import sys
import time
import uuid
import numpy as np
import PIL
import torch
import torchvision
import torch.utils.data
from matplotlib import pyplot as plt
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed import model_selection
from domainbed.lib import misc
from domainbed.lib.fast_data_loader import InfiniteDataLoader, FastDataLoader
from domainbed.lib.query import Q
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Domain generalization')
parser.add_argument('--data_dir', type=str)
parser.add_argument('--dataset', type=str, default="RotatedMNIST")
parser.add_argument('--algorithm', type=str, default="ERM")
parser.add_argument('--task', type=str, default="domain_generalization",
choices=["domain_generalization", "domain_adaptation"])
parser.add_argument('--hparams', type=str,
help='JSON-serialized hparams dict')
parser.add_argument('--hparams_seed', type=int, default=0,
help='Seed for random hparams (0 means "default hparams")')
parser.add_argument('--trial_seed', type=int, default=0,
help='Trial number (used for seeding split_dataset and '
'random_hparams).')
parser.add_argument('--seed', type=int, default=0,
help='Seed for everything else')
parser.add_argument('--steps', type=int, default=None,
help='Number of steps. Default is dataset-dependent.')
parser.add_argument('--checkpoint_freq', type=int, default=None,
help='Checkpoint every N steps. Default is dataset-dependent.')
parser.add_argument('--test_envs', type=int, nargs='+', default=[0])
parser.add_argument('--output_dir', type=str, default="train_output")
parser.add_argument('--holdout_fraction', type=float, default=0.2)
parser.add_argument('--uda_holdout_fraction', type=float, default=0,
help="For domain adaptation, % of test to use unlabeled for training.")
parser.add_argument('--skip_model_save', action='store_true')
parser.add_argument('--save_model_every_checkpoint', action='store_true')
parser.add_argument('--gpu', default='0', type=str)
parser.add_argument('--select_M', default=1, type=int)
args = parser.parse_args()
# If we ever want to implement checkpointing, just persist these values
# every once in a while, and then load them from disk here.
start_step = 0
algorithm_dict = None
os.makedirs(args.output_dir, exist_ok=True)
sys.stdout = misc.Tee(os.path.join(args.output_dir, 'out.txt'))
sys.stderr = misc.Tee(os.path.join(args.output_dir, 'err.txt'))
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset,
misc.seed_hash(args.hparams_seed, args.trial_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if args.dataset in vars(datasets):
dataset = vars(datasets)[args.dataset](args.data_dir,
args.test_envs, hparams)
else:
raise NotImplementedError
# Split each env into an 'in-split' and an 'out-split'. We'll train on
# each in-split except the test envs, and evaluate on all splits.
# To allow unsupervised domain adaptation experiments, we split each test
# env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used
# by collect_results.py to compute classification accuracies. The
# 'out-split' is used by the Oracle model selectino method. The unlabeled
# samples in 'uda-split' are passed to the algorithm at training time if
# args.task == "domain_adaptation". If we are interested in comparing
# domain generalization and domain adaptation results, then domain
# generalization algorithms should create the same 'uda-splits', which will
# be discared at training.
in_splits = []
out_splits = []
uda_splits = []
for env_i, env in enumerate(dataset):
uda = []
out, in_ = misc.split_dataset(env,
int(len(env)*args.holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if env_i in args.test_envs:
uda, in_ = misc.split_dataset(in_,
int(len(in_)*args.uda_holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if hparams['class_balanced']:
in_weights = misc.make_weights_for_balanced_classes(in_)
out_weights = misc.make_weights_for_balanced_classes(out)
if uda is not None:
uda_weights = misc.make_weights_for_balanced_classes(uda)
else:
in_weights, out_weights, uda_weights = None, None, None
in_splits.append((in_, in_weights))
out_splits.append((out, out_weights))
if len(uda):
uda_splits.append((uda, uda_weights))
if args.task == "domain_adaptation" and len(uda_splits) == 0:
raise ValueError("Not enough unlabeled samples for domain adaptation.")
train_loaders = [InfiniteDataLoader(
dataset=env,
weights=env_weights,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(in_splits)
if i not in args.test_envs]
uda_loaders = [InfiniteDataLoader(
dataset=env,
weights=env_weights,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(uda_splits)
if i in args.test_envs]
eval_loaders = [FastDataLoader(
dataset=env,
batch_size=64,
num_workers=dataset.N_WORKERS)
for env, _ in (in_splits + out_splits + uda_splits)]
eval_weights = [None for _, weights in (in_splits + out_splits + uda_splits)]
eval_loader_names = ['env{}_in'.format(i)
for i in range(len(in_splits))]
eval_loader_names += ['env{}_out'.format(i)
for i in range(len(out_splits))]
eval_loader_names += ['env{}_uda'.format(i)
for i in range(len(uda_splits))]
algorithm_class = algorithms.get_algorithm_class(args.algorithm)
algorithm = algorithm_class(dataset.input_shape, dataset.num_classes,
len(dataset) - len(args.test_envs), hparams)
if algorithm_dict is not None:
algorithm.load_state_dict(algorithm_dict)
algorithm.to(device)
train_minibatches_iterator = zip(*train_loaders)
uda_minibatches_iterator = zip(*uda_loaders)
checkpoint_vals = collections.defaultdict(lambda: [])
steps_per_epoch = min([len(env)/hparams['batch_size'] for env,_ in in_splits])
n_steps = args.steps or dataset.N_STEPS
checkpoint_freq = args.checkpoint_freq or dataset.CHECKPOINT_FREQ
def save_checkpoint(filename):
if args.skip_model_save:
return
save_dict = {
"args": vars(args),
"model_input_shape": dataset.input_shape,
"model_num_classes": dataset.num_classes,
"model_num_domains": len(dataset) - len(args.test_envs),
"model_hparams": hparams,
"model_dict": algorithm.cpu().state_dict()
}
torch.save(save_dict, os.path.join(args.output_dir, filename))
max_val_acc = 0
training_val_max_model = algorithm.state_dict()
last_results_keys = None
for step in range(start_step, n_steps):
step_start_time = time.time()
if hparams['PAC']:
minibatches_device = [(x.to(device), y.to(device), z.to(device))
for x,y,z in next(train_minibatches_iterator)]
else:
minibatches_device = [(x.to(device), y.to(device), z.to(device))
for x,y,z in next(train_minibatches_iterator)]
if args.task == "domain_adaptation":
uda_device = [x.to(device)
for x,_ in next(uda_minibatches_iterator)]
else:
uda_device = None
step_vals = algorithm.update(step, minibatches_device, uda_device)
algorithm.lr_scheduler.step()
checkpoint_vals['step_time'].append(time.time() - step_start_time)
for key, val in step_vals.items():
checkpoint_vals[key].append(val)
if (step % checkpoint_freq == 0) or (step == n_steps - 1):
results = {
'step': step,
'epoch': step / steps_per_epoch,
}
for key, val in checkpoint_vals.items():
results[key] = np.mean(val)
evals = zip(eval_loader_names, eval_loaders, eval_weights)
for name, loader, weights in evals:
acc = misc.maccuracy(algorithm, loader, weights, device)
results[name+'_acc'] = acc
results_keys = sorted(results.keys())
if results_keys != last_results_keys:
misc.print_row(results_keys, colwidth=12)
last_results_keys = results_keys
misc.print_row([results[key] for key in results_keys],
colwidth=12)
results.update({
'hparams': hparams,
'args': vars(args)
})
epochs_path = os.path.join(args.output_dir, 'results.jsonl')
with open(epochs_path, 'a') as f:
f.write(json.dumps(results, sort_keys=True) + "\n")
algorithm_dict = algorithm.state_dict()
start_step = step + 1
checkpoint_vals = collections.defaultdict(lambda: [])
if args.save_model_every_checkpoint:
save_checkpoint(f'model_step{step}.pkl')
with open(os.path.join(args.output_dir, 'done'), 'w') as f:
f.write('done')
| 10,857 | 37.778571 | 82 | py |
aidgn | aidgn-main/domainbed/scripts/list_top_hparams.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Example usage:
python -u -m domainbed.scripts.list_top_hparams \
--input_dir domainbed/misc/test_sweep_data --algorithm ERM \
--dataset VLCS --test_env 0
"""
import collections
import argparse
import functools
import glob
import pickle
import itertools
import json
import os
import random
import sys
import numpy as np
import tqdm
from domainbed import datasets
from domainbed import algorithms
from domainbed.lib import misc, reporting
from domainbed import model_selection
from domainbed.lib.query import Q
import warnings
def todo_rename(records, selection_method, latex):
grouped_records = reporting.get_grouped_records(records).map(lambda group:
{ **group, "sweep_acc": selection_method.sweep_acc(group["records"]) }
).filter(lambda g: g["sweep_acc"] is not None)
# read algorithm names and sort (predefined order)
alg_names = Q(records).select("args.algorithm").unique()
alg_names = ([n for n in algorithms.ALGORITHMS if n in alg_names] +
[n for n in alg_names if n not in algorithms.ALGORITHMS])
# read dataset names and sort (lexicographic order)
dataset_names = Q(records).select("args.dataset").unique().sorted()
dataset_names = [d for d in datasets.DATASETS if d in dataset_names]
for dataset in dataset_names:
if latex:
print()
print("\\subsubsection{{{}}}".format(dataset))
test_envs = range(datasets.num_environments(dataset))
table = [[None for _ in [*test_envs, "Avg"]] for _ in alg_names]
for i, algorithm in enumerate(alg_names):
means = []
for j, test_env in enumerate(test_envs):
trial_accs = (grouped_records
.filter_equals(
"dataset, algorithm, test_env",
(dataset, algorithm, test_env)
).select("sweep_acc"))
mean, err, table[i][j] = format_mean(trial_accs, latex)
means.append(mean)
if None in means:
table[i][-1] = "X"
else:
table[i][-1] = "{:.1f}".format(sum(means) / len(means))
col_labels = [
"Algorithm",
*datasets.get_dataset_class(dataset).ENVIRONMENTS,
"Avg"
]
header_text = (f"Dataset: {dataset}, "
f"model selection method: {selection_method.name}")
print_table(table, header_text, alg_names, list(col_labels),
colwidth=20, latex=latex)
# Print an "averages" table
if latex:
print()
print("\\subsubsection{Averages}")
table = [[None for _ in [*dataset_names, "Avg"]] for _ in alg_names]
for i, algorithm in enumerate(alg_names):
means = []
for j, dataset in enumerate(dataset_names):
trial_averages = (grouped_records
.filter_equals("algorithm, dataset", (algorithm, dataset))
.group("trial_seed")
.map(lambda trial_seed, group:
group.select("sweep_acc").mean()
)
)
mean, err, table[i][j] = format_mean(trial_averages, latex)
means.append(mean)
if None in means:
table[i][-1] = "X"
else:
table[i][-1] = "{:.1f}".format(sum(means) / len(means))
col_labels = ["Algorithm", *dataset_names, "Avg"]
header_text = f"Averages, model selection method: {selection_method.name}"
print_table(table, header_text, alg_names, col_labels, colwidth=25,
latex=latex)
if __name__ == "__main__":
np.set_printoptions(suppress=True)
parser = argparse.ArgumentParser(
description="Domain generalization testbed")
parser.add_argument("--input_dir", required=True)
parser.add_argument('--dataset', required=True)
parser.add_argument('--algorithm', required=True)
parser.add_argument('--test_env', type=int, required=True)
args = parser.parse_args()
records = reporting.load_records(args.input_dir)
print("Total records:", len(records))
records = reporting.get_grouped_records(records)
records = records.filter(
lambda r:
r['dataset'] == args.dataset and
r['algorithm'] == args.algorithm and
r['test_env'] == args.test_env
)
SELECTION_METHODS = [
model_selection.IIDAccuracySelectionMethod,
model_selection.LeaveOneOutSelectionMethod,
model_selection.OracleSelectionMethod,
]
for selection_method in SELECTION_METHODS:
print(f'Model selection: {selection_method.name}')
for group in records:
print(f"trial_seed: {group['trial_seed']}")
best_hparams = selection_method.hparams_accs(group['records'])
for run_acc, hparam_records in best_hparams:
print(f"\t{run_acc}")
for r in hparam_records:
assert(r['hparams'] == hparam_records[0]['hparams'])
print("\t\thparams:")
for k, v in sorted(hparam_records[0]['hparams'].items()):
print('\t\t\t{}: {}'.format(k, v))
print("\t\toutput_dirs:")
output_dirs = hparam_records.select('args.output_dir').unique()
for output_dir in output_dirs:
print(f"\t\t\t{output_dir}") | 5,440 | 34.796053 | 79 | py |
aidgn | aidgn-main/domainbed/lib/tsne.py | import argparse
import os
import pickle
import numpy as np
import torch
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
# def plot_TNSE(X_2d_tr, tr_labels, label_target_names, filename):
# colors = ["red", "green", "blue", "black", "brown", "grey", "orange", "yellow", "pink", "cyan", "magenta"]
# fig = plt.figure(figsize=(16, 16))
# ax = plt.axes(projection='3d')
# for i, label in zip(range(len(label_target_names)), label_target_names):
# ax.scatter(X_2d_tr[tr_labels == i, 0], X_2d_tr[tr_labels == i, 1], X_2d_tr[tr_labels == i, 2], c=colors[i], marker=".", label=label)
#
# plt.savefig(filename)
def plot_TNSE_domain(X_2d_tr, tr_labels, label_target_names, filename):
colors = ["black", "green", "blue", "orange", "brown", "grey", "orange", "yellow", "pink", "cyan", "magenta"]
plt.figure(figsize=(16, 16))
plt.tick_params(labelsize=23)
for i, label in zip(range(len(label_target_names)), label_target_names):
if (i<3):
plt.scatter(X_2d_tr[tr_labels == i, 0], X_2d_tr[tr_labels == i, 1], c=colors[i], marker=".", label=label)
else:
plt.scatter(X_2d_tr[tr_labels == i, 0], X_2d_tr[tr_labels == i, 1], c=colors[i], marker=".", label=label)
plt.savefig(filename)
def plot_TNSE_class(X_2d_tr, tr_labels, label_target_names, filename):
colors = ["red", "green", "blue", "brown", "orange", "yellow", "cyan", "magenta", "brown", "grey"]
plt.figure(figsize=(16, 16))
plt.tick_params(labelsize=23)
for i, label in zip(range(len(label_target_names)), label_target_names):
plt.scatter(X_2d_tr[tr_labels == i, 0], X_2d_tr[tr_labels == i, 1], c=colors[i], marker=".", label=label)
plt.savefig(filename)
def unique(list1):
unique_list = []
for x in list1:
if x not in unique_list:
unique_list.append(x)
return unique_list
def tsne_plot(Z_out, labels, domain_labels, dir_name):
labels = np.asarray(labels)
domain_labels = np.asarray(domain_labels)
label_target_names = unique(labels)
domain_label_target_names = unique(domain_labels)
tsne_model = TSNE(n_components=2, init="pca")
Z_2d = tsne_model.fit_transform(Z_out)
plot_TNSE_class(Z_2d, labels, label_target_names, dir_name + "Z_class_tSNE.png")
plot_TNSE_domain(Z_2d, domain_labels, domain_label_target_names, dir_name + "Z_domain_tSNE.png")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--plotdir", help="Path to configuration file")
bash_args = parser.parse_args()
dir_name = bash_args.plotdir
with open(dir_name + "Z_out.pkl", "rb") as fp:
Z_out = pickle.load(fp)
with open(dir_name + "Y_out.pkl", "rb") as fp:
Y_out = pickle.load(fp)
with open(dir_name + "Y_domain_out.pkl", "rb") as fp:
Y_domain_out = pickle.load(fp)
with open(dir_name + "Z_test.pkl", "rb") as fp:
Z_test = pickle.load(fp)
with open(dir_name + "Y_test.pkl", "rb") as fp:
Y_test = pickle.load(fp)
with open(dir_name + "Y_domain_test.pkl", "rb") as fp:
Y_domain_test = pickle.load(fp)
# Change label of target domain from -1 to #source_domains + 1
Y_domain_label = len(unique(Y_domain_out))
for i in range(len(Y_domain_test)):
Y_domain_test[i] = Y_domain_label
Z_out += Z_test
Y_out += Y_test
Y_domain_out += Y_domain_test
tsne_plot(Z_out, Y_out, Y_domain_out, dir_name) | 3,525 | 37.326087 | 142 | py |
aidgn | aidgn-main/domainbed/lib/reporting.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import collections
import json
import os
import tqdm
from domainbed.lib.query import Q
def load_records(path):
records = []
for i, subdir in tqdm.tqdm(list(enumerate(os.listdir(path))),
ncols=80,
leave=False):
results_path = os.path.join(path, subdir, "results.jsonl")
try:
with open(results_path, "r") as f:
for line in f:
records.append(json.loads(line[:-1]))
except IOError:
pass
return Q(records)
def get_grouped_records(records):
"""Group records by (trial_seed, dataset, algorithm, test_env). Because
records can have multiple test envs, a given record may appear in more than
one group."""
result = collections.defaultdict(lambda: [])
for r in records:
for test_env in r["args"]["test_envs"]:
group = (r["args"]["trial_seed"],
r["args"]["dataset"],
r["args"]["algorithm"],
test_env)
result[group].append(r)
return Q([{"trial_seed": t, "dataset": d, "algorithm": a, "test_env": e,
"records": Q(r)} for (t,d,a,e),r in result.items()])
| 1,288 | 30.439024 | 79 | py |
aidgn | aidgn-main/domainbed/lib/misc.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Things that don't belong anywhere else
"""
import hashlib
import json
import os
import sys
from shutil import copyfile
from collections import OrderedDict
from numbers import Number
import operator
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
import pickle
from collections import Counter
def make_weights_for_balanced_classes(dataset):
counts = Counter()
classes = []
for _, y in dataset:
y = int(y)
counts[y] += 1
classes.append(y)
n_classes = len(counts)
weight_per_class = {}
for y in counts:
weight_per_class[y] = 1 / (counts[y] * n_classes)
weights = torch.zeros(len(dataset))
for i, y in enumerate(classes):
weights[i] = weight_per_class[int(y)]
return weights
def pdb():
sys.stdout = sys.__stdout__
import pdb
print("Launching PDB, enter 'n' to step to parent function.")
pdb.set_trace()
def seed_hash(*args):
"""
Derive an integer hash from all args, for use as a random seed.
"""
args_str = str(args)
return int(hashlib.md5(args_str.encode("utf-8")).hexdigest(), 16) % (2**31)
def print_separator():
print("="*80)
def print_row(row, colwidth=10, latex=False):
if latex:
sep = " & "
end_ = "\\\\"
else:
sep = " "
end_ = ""
def format_val(x):
if np.issubdtype(type(x), np.floating):
x = "{:.10f}".format(x)
return str(x).ljust(colwidth)[:colwidth]
print(sep.join([format_val(x) for x in row]), end_)
class _SplitDataset(torch.utils.data.Dataset):
"""Used by split_dataset"""
def __init__(self, underlying_dataset, keys):
super(_SplitDataset, self).__init__()
self.underlying_dataset = underlying_dataset
self.keys = keys
def __getitem__(self, key):
return self.underlying_dataset[self.keys[key]]
def __len__(self):
return len(self.keys)
def split_dataset(dataset, n, seed=0):
"""
Return a pair of datasets corresponding to a random split of the given
dataset, with n datapoints in the first dataset and the rest in the last,
using the given random seed
"""
assert(n <= len(dataset))
keys = list(range(len(dataset)))
np.random.RandomState(seed).shuffle(keys)
keys_1 = keys[:n]
keys_2 = keys[n:]
return _SplitDataset(dataset, keys_1), _SplitDataset(dataset, keys_2)
def random_pairs_of_minibatches(minibatches):
perm = torch.randperm(len(minibatches)).tolist()
pairs = []
for i in range(len(minibatches)):
j = i + 1 if i < (len(minibatches) - 1) else 0
xi, yi = minibatches[perm[i]][0], minibatches[perm[i]][1]
xj, yj = minibatches[perm[j]][0], minibatches[perm[j]][1]
min_n = min(len(xi), len(xj))
pairs.append(((xi[:min_n], yi[:min_n]), (xj[:min_n], yj[:min_n])))
return pairs
def accuracy(network, loader, weights, device):
correct = 0
total = 0
weights_offset = 0
network.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device)
y = y.to(device)
p = network.predict(x)
if weights is None:
batch_weights = torch.ones(len(x))
else:
batch_weights = weights[weights_offset : weights_offset + len(x)]
weights_offset += len(x)
batch_weights = batch_weights.to(device)
if p.size(1) == 1:
correct += (p.gt(0).eq(y).float() * batch_weights.view(-1, 1)).sum().item()
else:
correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()
total += batch_weights.sum().item()
network.train()
return correct / total
class Tee:
def __init__(self, fname, mode="a"):
self.stdout = sys.stdout
self.file = open(fname, mode)
def write(self, message):
self.stdout.write(message)
self.file.write(message)
self.flush()
def flush(self):
self.stdout.flush()
self.file.flush()
class ParamDict(OrderedDict):
"""Code adapted from https://github.com/Alok/rl_implementations/tree/master/reptile.
A dictionary where the values are Tensors, meant to represent weights of
a model. This subclass lets you perform arithmetic on weights directly."""
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
def _prototype(self, other, op):
if isinstance(other, Number):
return ParamDict({k: op(v, other) for k, v in self.items()})
elif isinstance(other, dict):
return ParamDict({k: op(self[k], other[k]) for k in self})
else:
raise NotImplementedError
def __add__(self, other):
return self._prototype(other, operator.add)
def __rmul__(self, other):
return self._prototype(other, operator.mul)
__mul__ = __rmul__
def __neg__(self):
return ParamDict({k: -v for k, v in self.items()})
def __rsub__(self, other):
# a- b := a + (-b)
return self.__add__(other.__neg__())
__sub__ = __rsub__
def __truediv__(self, other):
return self._prototype(other, operator.truediv)
def maccuracy(network, loader, weights, device):
correct = 0
total = 0
weights_offset = 0
network.eval()
with torch.no_grad():
for x, y, _ in loader:
x = x.to(device)
y = y.to(device)
p = network.predict(x)
if weights is None:
batch_weights = torch.ones(len(x))
else:
batch_weights = weights[weights_offset : weights_offset + len(x)]
weights_offset += len(x)
batch_weights = batch_weights.to(device)
if p.size(1) == 1:
correct += (p.gt(0).eq(y).float() * batch_weights.view(-1, 1)).sum().item()
else:
correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()
total += batch_weights.sum().item()
network.train()
return correct / total
def get_feat_label(network, loader, device):
source_feats = None
source_labels = None
network.eval()
with torch.no_grad():
for x, y, _ in loader:
x = x.to(device)
y = y.to(device)
f = network.featurizer(x)
if source_feats is None:
source_feats = f
source_labels = y
else:
source_feats = torch.cat((source_feats, f), dim=0)
source_labels = torch.cat((source_labels, y), dim=0)
network.train()
return source_feats, source_labels
def knn_accuracy(network, loader, device, source_feats, source_labels, K=1):
correct = 0
total = 0
source_labels = source_labels.view(-1)
network.eval()
with torch.no_grad():
for x, y, _ in loader:
x = x.to(device)
y = y.to(device)
f = network.featurizer(x)
dist_matrix = cosine_matrix(f, source_feats)
idx = dist_matrix.argsort()[:,:K]
for i in range(x.size(0)):
total += 1
knn_labels = source_labels[idx[i]]
p_label = torch.bincount(knn_labels).argmax()
if (p_label == y[i]):
correct += 1
network.train()
return correct / total
def cosine_matrix(x,y):
x=F.normalize(x,dim=1)
y=F.normalize(y,dim=1)
xty=torch.sum(x.unsqueeze(1)*y.unsqueeze(0),2)
return 1-xty #x.size(0) * y.size(0)
| 7,663 | 28.251908 | 91 | py |
aidgn | aidgn-main/domainbed/lib/wide_resnet.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
From https://github.com/meliketoy/wide-resnet.pytorch
"""
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes, planes, kernel_size=1, stride=stride,
bias=True), )
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
"""Wide Resnet with the softmax layer chopped off"""
def __init__(self, input_shape, depth, widen_factor, dropout_rate):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) / 6
k = widen_factor
# print('| Wide-Resnet %dx%d' % (depth, k))
nStages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3(input_shape[0], nStages[0])
self.layer1 = self._wide_layer(
wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(
wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(
wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.n_outputs = nStages[3]
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1] * (int(num_blocks) - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
return out[:, :, 0, 0]
| 3,242 | 29.885714 | 79 | py |
aidgn | aidgn-main/domainbed/lib/query.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""Small query library."""
import collections
import inspect
import json
import types
import unittest
import warnings
import math
import numpy as np
def make_selector_fn(selector):
"""
If selector is a function, return selector.
Otherwise, return a function corresponding to the selector string. Examples
of valid selector strings and the corresponding functions:
x lambda obj: obj['x']
x.y lambda obj: obj['x']['y']
x,y lambda obj: (obj['x'], obj['y'])
"""
if isinstance(selector, str):
if ',' in selector:
parts = selector.split(',')
part_selectors = [make_selector_fn(part) for part in parts]
return lambda obj: tuple(sel(obj) for sel in part_selectors)
elif '.' in selector:
parts = selector.split('.')
part_selectors = [make_selector_fn(part) for part in parts]
def f(obj):
for sel in part_selectors:
obj = sel(obj)
return obj
return f
else:
key = selector.strip()
return lambda obj: obj[key]
elif isinstance(selector, types.FunctionType):
return selector
else:
raise TypeError
def hashable(obj):
try:
hash(obj)
return obj
except TypeError:
return json.dumps({'_':obj}, sort_keys=True)
class Q(object):
def __init__(self, list_):
super(Q, self).__init__()
self._list = list_
def __len__(self):
return len(self._list)
def __getitem__(self, key):
return self._list[key]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._list == other._list
else:
return self._list == other
def __str__(self):
return str(self._list)
def __repr__(self):
return repr(self._list)
def _append(self, item):
"""Unsafe, be careful you know what you're doing."""
self._list.append(item)
def group(self, selector):
"""
Group elements by selector and return a list of (group, group_records)
tuples.
"""
selector = make_selector_fn(selector)
groups = {}
for x in self._list:
group = selector(x)
group_key = hashable(group)
if group_key not in groups:
groups[group_key] = (group, Q([]))
groups[group_key][1]._append(x)
results = [groups[key] for key in sorted(groups.keys())]
return Q(results)
def group_map(self, selector, fn):
"""
Group elements by selector, apply fn to each group, and return a list
of the results.
"""
return self.group(selector).map(fn)
def map(self, fn):
"""
map self onto fn. If fn takes multiple args, tuple-unpacking
is applied.
"""
if len(inspect.signature(fn).parameters) > 1:
return Q([fn(*x) for x in self._list])
else:
return Q([fn(x) for x in self._list])
def select(self, selector):
selector = make_selector_fn(selector)
return Q([selector(x) for x in self._list])
def min(self):
return min(self._list)
def max(self):
return max(self._list)
def sum(self):
return sum(self._list)
def len(self):
return len(self._list)
def mean(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return float(np.mean(self._list))
def std(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return float(np.std(self._list))
def mean_std(self):
return (self.mean(), self.std())
def argmax(self, selector):
selector = make_selector_fn(selector)
return max(self._list, key=selector)
def filter(self, fn):
return Q([x for x in self._list if fn(x)])
def filter_equals(self, selector, value):
"""like [x for x in y if x.selector == value]"""
selector = make_selector_fn(selector)
return self.filter(lambda r: selector(r) == value)
def filter_not_none(self):
return self.filter(lambda r: r is not None)
def filter_not_nan(self):
return self.filter(lambda r: not np.isnan(r))
def flatten(self):
return Q([y for x in self._list for y in x])
def unique(self):
result = []
result_set = set()
for x in self._list:
hashable_x = hashable(x)
if hashable_x not in result_set:
result_set.add(hashable_x)
result.append(x)
return Q(result)
def sorted(self, key=None):
if key is None:
key = lambda x: x
def key2(x):
x = key(x)
if isinstance(x, (np.floating, float)) and np.isnan(x):
return float('-inf')
else:
return x
return Q(sorted(self._list, key=key2))
| 5,134 | 27.060109 | 79 | py |
aidgn | aidgn-main/domainbed/lib/fast_data_loader.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
class _InfiniteSampler(torch.utils.data.Sampler):
"""Wraps another Sampler to yield an infinite stream."""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
for batch in self.sampler:
yield batch
class InfiniteDataLoader:
def __init__(self, dataset, weights, batch_size, num_workers):
super().__init__()
if weights is not None:
sampler = torch.utils.data.WeightedRandomSampler(weights,
replacement=True,
num_samples=batch_size)
else:
sampler = torch.utils.data.RandomSampler(dataset,
replacement=True)
if weights == None:
weights = torch.ones(len(dataset))
batch_sampler = torch.utils.data.BatchSampler(
sampler,
batch_size=batch_size,
drop_last=True)
self._infinite_iterator = iter(torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=_InfiniteSampler(batch_sampler)
))
def __iter__(self):
while True:
yield next(self._infinite_iterator)
def __len__(self):
raise ValueError
class FastDataLoader:
"""DataLoader wrapper with slightly improved speed by not respawning worker
processes at every epoch."""
def __init__(self, dataset, batch_size, num_workers):
super().__init__()
batch_sampler = torch.utils.data.BatchSampler(
torch.utils.data.RandomSampler(dataset, replacement=False),
batch_size=batch_size,
drop_last=False
)
self._infinite_iterator = iter(torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=_InfiniteSampler(batch_sampler)
))
self._length = len(batch_sampler)
def __iter__(self):
for _ in range(len(self)):
yield next(self._infinite_iterator)
def __len__(self):
return self._length
| 2,156 | 28.148649 | 79 | py |
VP-Net | VP-Net-master/fusion_net.py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 15:52:20 2021
@author: KunLi
"""
#%%
import tensorflow as tf
import numpy as np
def vp_net( n , M_input, P_input, X_output):
layers_predict = []
layers_symetric = []
layers_variable_Q = []
X0 = tf.image.resize_images(M_input, size=[M_input.shape[1]*4,M_input.shape[2]*4], method=0)
X0_av_single = tf.reduce_mean(X0, axis=3)
X0_av_copy = tf.tile( tf.expand_dims(X0_av_single,axis=-1) , multiples=[1, 1, 1, 4])
Q0 = X0_av_copy
layers_predict.append(X0)
layers_variable_Q.append(Q0)
for i in range(n):
with tf.variable_scope('conv_%d' %i):
[pred, sym, Q] = ista_block (layers_predict, layers_variable_Q, M_input, P_input, X_output, i)
layers_predict .append(pred)
layers_symetric .append(sym)
layers_variable_Q.append(Q)
return layers_predict, layers_symetric, layers_variable_Q
def ista_block(pred_layers, Q_layers, M_input, P_input, X_output, layer_no):
filter_size = 3
conv_size = 40
step_coffi = tf.Variable(0.1, dtype=tf.float32)
mue_coffi = tf.Variable(0.05, dtype=tf.float32)
soft_thr = tf.Variable(0.1, dtype=tf.float32)
[Weights_, bias_] = add_con2d_weight_bias([filter_size, filter_size, 4, conv_size] , [conv_size], 4)
[Weights0, bias0] = add_con2d_weight_bias([filter_size, filter_size, 1, conv_size] , [conv_size], 0)
[Weights1, bias1] = add_con2d_weight_bias([filter_size, filter_size, conv_size, conv_size], [conv_size], 1)
[Weights11, bias11] = add_con2d_weight_bias([filter_size, filter_size, conv_size, conv_size], [conv_size], 11)
[Weights2, bias2] = add_con2d_weight_bias([filter_size, filter_size, conv_size, conv_size], [conv_size], 2)
[Weights22, bias22] = add_con2d_weight_bias([filter_size, filter_size, conv_size, conv_size], [conv_size], 22)
[Weights3, bias3] = add_con2d_weight_bias([filter_size, filter_size, conv_size, 4] , [4] , 3)
down_input = tf.image.resize_images(pred_layers[-1], size=[(pred_layers[-1].shape[1])//4,(pred_layers[-1].shape[2])//4], method=0)
down_input_M_input = down_input - M_input
UP_down_input_M_input = tf.image.resize_images(down_input_M_input, size=[(down_input_M_input.shape[1])*4,(down_input_M_input.shape[2])*4], method=0)
UP_down_input_M_input = tf.scalar_mul(step_coffi, UP_down_input_M_input )
r_k = pred_layers[-1] - UP_down_input_M_input
up_M = tf.image.resize_images(M_input, size=[(M_input.shape[1])*4,(M_input.shape[2])*4], method=0)
up_M_av = tf.reduce_mean(up_M, axis=3)
up_M_av_copy = tf.tile( tf.expand_dims(up_M_av,axis=-1) , multiples=[1, 1, 1, 4])
R_T = tf.div(up_M , tf.add(up_M_av_copy, 0.1) )
R_T_Q = tf.multiply(Q_layers[-1] , R_T)
x_k = tf.div( r_k + tf.scalar_mul(mue_coffi, R_T_Q) , (1 + mue_coffi))
R = tf.div(up_M_av_copy , tf.add(up_M, 0.1) )
R_X = tf.multiply(x_k , R)
# % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
x3_ista = tf.nn.conv2d(R_X, Weights_, strides=[1, 1, 1, 1], padding='SAME') # —— Weights0 ——>> R_X to use
x4_ista = tf.nn.relu(tf.nn.conv2d(x3_ista, Weights1, strides=[1, 1, 1, 1], padding='SAME')) # —— Weights1
x44_ista = tf.nn.conv2d(x4_ista, Weights11, strides=[1, 1, 1, 1], padding='SAME') # —— Weights11 ——>> F(R_X)
xp_ista = tf.nn.conv2d(P_input, Weights0, strides=[1, 1, 1, 1], padding='SAME') # —— Weights0
xpp_ista = tf.nn.relu(tf.nn.conv2d(xp_ista, Weights1, strides=[1, 1, 1, 1], padding='SAME'))
xppp_ista = tf.nn.conv2d(xpp_ista, Weights11, strides=[1, 1, 1, 1], padding='SAME') # —— Weights11 ——>> F(p)
Frk_Fp = x44_ista - xppp_ista
# soft()
x5_ista = tf.multiply(tf.sign(x44_ista), tf.nn.relu(tf.abs(Frk_Fp) - soft_thr)) # ——>> F(rk) - F(p)
x55_ista = x5_ista + xppp_ista
# F(x)^hat
x6_ista = tf.nn.relu(tf.nn.conv2d(x55_ista, Weights2, strides=[1, 1, 1, 1], padding='SAME'))
x66_ista = tf.nn.conv2d(x6_ista, Weights22, strides=[1, 1, 1, 1], padding='SAME') # —— Weights22 ——>> x_k
x7_ista = tf.nn.conv2d(x66_ista, Weights3, strides=[1, 1, 1, 1], padding='SAME') # —— Weights3 ——>> x_k
x7_ista = x7_ista + R_X
x3_ista_sym = tf.nn.relu(tf.nn.conv2d(x3_ista, Weights1, strides=[1, 1, 1, 1], padding='SAME'))
x4_ista_sym = tf.nn.conv2d(x3_ista_sym, Weights11, strides=[1, 1, 1, 1], padding='SAME')
x6_ista_sym = tf.nn.relu(tf.nn.conv2d(x4_ista_sym, Weights2, strides=[1, 1, 1, 1], padding='SAME'))
x7_ista_sym = tf.nn.conv2d(x6_ista_sym, Weights22, strides=[1, 1, 1, 1], padding='SAME')
# systematic
x11_ista = x7_ista_sym - x3_ista
return [x_k, x11_ista, x7_ista]
def add_con2d_weight_bias(w_shape, b_shape, order_no):
with tf.variable_scope('weight_bias_scope', reuse=tf.AUTO_REUSE ):
Weights = tf.get_variable(shape=w_shape, initializer=tf.contrib.layers.xavier_initializer_conv2d(), name='Weights_%d' % order_no)
biases = tf.Variable(tf.random_normal(b_shape, stddev=0.05), name='biases_%d' % order_no)
return [Weights, biases]
def compute_cost(Prediction,ListX ,P_input ,X_output, PhaseNumber):
cost_sym = 0
for k in range(PhaseNumber):
cost_sym += tf.reduce_mean(tf.square(ListX[k]))
cost = tf.reduce_mean(tf.square(Prediction[-1] - X_output))
print('cost',type(cost))
return [cost, cost_sym] | 6,359 | 41.4 | 152 | py |
VP-Net | VP-Net-master/test.py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 11:43:18 2021
@author: KunLi
"""
import os
import numpy as np
import scipy.io as sio
# =============================================================================
# ms_path = 'E:\datasets\\3_QB-Wuhan\\crop_xj_30_01_smooth_down\mat10'
# ms_file_list = os.listdir(ms_path)
# ms_file_list.sort(key=lambda x:int(x.split('.')[0].split('QB_WH_down_')[1]))
# used_ms = []
# used_pan = []
# used_ref = []
# for file in ms_file_list:
# if not os.path.isdir(file):
# mat_data = sio.loadmat(ms_path+"/"+file)
# mat_ms = mat_data['I_MS_LR']
# used_ms.append(mat_ms)
# mat_pan = mat_data['I_PAN_LR']
# used_pan.append(mat_pan)
# mat_ref = mat_data['Ref']
# used_ref.append(mat_ref)
# =============================================================================
# ms_path = 'E:\\datasets\\4_GF1_GF2\GF2\\crop_xj_smooth_down\\mat161_304_for_test'
# we give 2 test examples (from GF2 satellite)
ms_path = './data'
ms_file_list = os.listdir(ms_path)
ms_file_list.sort(key=lambda x:int(x.split('.')[0].split('GF2_300_')[1]))
used_ms = []
used_pan = []
used_ref = []
for file in ms_file_list:
if not os.path.isdir(file):
mat_data = sio.loadmat(ms_path+"/"+file)
mat_ms = mat_data['I_MS_LR']
used_ms.append(mat_ms)
mat_pan = mat_data['I_PAN_LR']
used_pan.append(mat_pan)
mat_ref = mat_data['Ref']
used_ref.append(mat_ref)
# ===============================================================================================
# ===============================================================================================
model_path = './checkpoint/'
save_path = 'test_1/' # provide a folder name to save test result according to test dataset
import os
import cv2
import numpy as np
import scipy.io as sio
import tensorflow as tf
from utils import downgrade_images
from fusion_net import vp_net, compute_cost
from metrics import ref_evaluate , no_ref_evaluate
tf.reset_default_graph()
PhaseNumber =7
test_label = np.zeros((300, 300, 4), dtype = 'float32')
X_output = tf.placeholder(tf.float32, shape=(1, 300, 300, 4))
P_input = tf.placeholder(tf.float32, shape=(1, 300, 300, 1))
M_input = tf.placeholder(tf.float32, shape=(1, 75, 75, 4))
PredX , ListX ,Q = vp_net( PhaseNumber, M_input, P_input, X_output )
config = tf.ConfigProto(allow_soft_placement=True,log_device_placement=True)
config.gpu_options.allow_growth = True
saver = tf.train.Saver(max_to_keep = 5)
import time
time_all =[]
with tf.Session(config=config) as sess:
ckpt = tf.train.latest_checkpoint(model_path)
saver.restore(sess, ckpt)
# for num in range(5):
for num in range(len(used_pan)):
HR_Ref = used_ref[num] # GT 真值 全都已经归一化 下采样了
LR_ms = used_ms[num]
LR_pan = used_pan[num]
LR_pan = np.expand_dims(LR_pan, -1)
LR_ms_test = np.expand_dims(LR_ms, 0)
LR_pan_test = np.expand_dims(LR_pan, 0)
batch_M = LR_ms_test
batch_P = LR_pan_test
time_start = time.time()
one , _ = sess.run([PredX[-1], ListX[-1]], feed_dict={M_input: batch_M, P_input: batch_P} )
time_end = time.time()
time_c = time_end - time_start
print('time cost', time_c, 's')
time_all.append(time_c)
one = np.clip(one, 0, 1)
test_label = one[0,:,:,:]
print('test_label',test_label.shape)
save_testimage_dir='./test_imgs/' +save_path
save_test_mat_dir='./test_mats/' +save_path
if not os.path.exists(save_testimage_dir):
os.makedirs(save_testimage_dir)
if not os.path.exists(save_test_mat_dir):
os.makedirs(save_test_mat_dir)
cv2.imwrite (save_testimage_dir +'%d_test.png'%(num+1) ,np.uint8(255*test_label)[:, :, [0,1,2]] )
cv2.imwrite (save_testimage_dir +'%d_ms.png'%(num+1) , np.uint8(255*HR_Ref) [:, :, [0,1,2]] )
# save mat
sio.savemat (save_test_mat_dir +'Variation_%d.mat'%(num+1), { 'ref':np.uint8(255*HR_Ref), 'fusion':np.uint8(255*test_label)} )
gt = HR_Ref
ref_results={}
ref_results.update({'metrics: ':' PSNR, SSIM, SAM, ERGAS, SCC, Q, RMSE'})
no_ref_results={}
no_ref_results.update({'metrics: ':' D_lamda, D_s, QNR'})
temp_ref_results = ref_evaluate( np.uint8(255*test_label), np.uint8(255*HR_Ref) )
temp_no_ref_results = no_ref_evaluate( test_label, LR_pan , LR_ms )
ref_results .update({'xxx ':temp_ref_results})
no_ref_results.update({'xxx ':temp_no_ref_results})
save_testlog_dir='./test_logs/' + save_path
if not os.path.exists(save_testlog_dir):
os.makedirs(save_testlog_dir)
lk_output_file_ref = save_testlog_dir+"ref_test_1.txt"
lk_output_file_no_ref = save_testlog_dir+"no_ref_test_1.txt"
print('################## reference #######################')
for index, i in enumerate(ref_results):
if index == 0:
print(i, ref_results[i])
else:
print(i, [round(j, 4) for j in ref_results[i]])
list2str= str([ round(j, 4) for j in ref_results[i] ])
list2str= ('%d '+ list2str+'\n')%(num+1)
lk_output_file = open(lk_output_file_ref, 'a')
lk_output_file.write(list2str)
lk_output_file.close()
print('################## no reference ####################')
for index, i in enumerate(no_ref_results):
if index == 0:
print(i, no_ref_results[i])
else:
print(i, [round(j, 4) for j in no_ref_results[i]])
list2str= str([ round(j, 4) for j in no_ref_results[i] ])
list2str=('%d '+ list2str+'\n')%(num+1)
lk_output_file = open(lk_output_file_no_ref, 'a')
lk_output_file.write(list2str)
lk_output_file.close()
print('#####################################################')
print('test finished')
| 7,307 | 34.475728 | 134 | py |
VP-Net | VP-Net-master/utils.py | # -*- coding: utf-8 -*-
"""
License: MIT
@author: gaj
E-mail: anjing_guo@hnu.edu.cn
"""
import cv2
import numpy as np
from scipy import ndimage
from scipy import signal
import scipy.misc as misc
def upsample_bilinear(image, ratio):
h,w,c = image.shape
re_image = cv2.resize(image, (w*ratio, h*ratio), cv2.INTER_LINEAR)
return re_image
def upsample_bicubic(image, ratio):
h,w,c = image.shape
re_image = cv2.resize(image, (w*ratio, h*ratio), cv2.INTER_CUBIC)
return re_image
def upsample_interp23(image, ratio):
image = np.transpose(image, (2, 0, 1))
b,r,c = image.shape
CDF23 = 2*np.array([0.5, 0.305334091185, 0, -0.072698593239, 0, 0.021809577942, 0, -0.005192756653, 0, 0.000807762146, 0, -0.000060081482])
d = CDF23[::-1] # 逆序 https://www.jb51.net/article/138362.htm
CDF23 = np.insert(CDF23, 0, d[:-1]) # https://blog.csdn.net/weixin_40576010/article/details/88426129?utm_medium=distribute.pc_relevant_t0.none-task-blog-BlogCommendFromBaidu-1.channel_param&depth_1-utm_source=distribute.pc_relevant_t0.none-task-blog-BlogCommendFromBaidu-1.channel_param
BaseCoeff = CDF23
first = 1
for z in range(1,np.int(np.log2(ratio))+1):
I1LRU = np.zeros((b, 2**z*r, 2**z*c))
if first:
I1LRU[:, 1:I1LRU.shape[1]:2, 1:I1LRU.shape[2]:2]=image
first = 0
else:
I1LRU[:,0:I1LRU.shape[1]:2,0:I1LRU.shape[2]:2]=image
for ii in range(0,b):
t = I1LRU[ii,:,:]
for j in range(0,t.shape[0]):
t[j,:]=ndimage.correlate(t[j,:],BaseCoeff,mode='wrap')
for k in range(0,t.shape[1]):
t[:,k]=ndimage.correlate(t[:,k],BaseCoeff,mode='wrap')
I1LRU[ii,:,:]=t
image = I1LRU
re_image=np.transpose(I1LRU, (1, 2, 0))
return re_image
def upsample_mat_interp23(image, ratio=4):
'''2 pixel shift compare with original matlab version'''
shift=2
h,w,c = image.shape
basecoeff = np.array([[-4.63495665e-03, -3.63442646e-03, 3.84904063e-18,
5.76678319e-03, 1.08358664e-02, 1.01980790e-02,
-9.31747402e-18, -1.75033181e-02, -3.17660068e-02,
-2.84531643e-02, 1.85181518e-17, 4.42450253e-02,
7.71733386e-02, 6.70554910e-02, -2.85299239e-17,
-1.01548683e-01, -1.78708388e-01, -1.60004642e-01,
3.61741232e-17, 2.87940558e-01, 6.25431459e-01,
8.97067600e-01, 1.00107877e+00, 8.97067600e-01,
6.25431459e-01, 2.87940558e-01, 3.61741232e-17,
-1.60004642e-01, -1.78708388e-01, -1.01548683e-01,
-2.85299239e-17, 6.70554910e-02, 7.71733386e-02,
4.42450253e-02, 1.85181518e-17, -2.84531643e-02,
-3.17660068e-02, -1.75033181e-02, -9.31747402e-18,
1.01980790e-02, 1.08358664e-02, 5.76678319e-03,
3.84904063e-18, -3.63442646e-03, -4.63495665e-03]])
coeff = np.dot(basecoeff.T, basecoeff)
I1LRU = np.zeros((ratio*h, ratio*w, c))
I1LRU[shift::ratio, shift::ratio, :]=image
for i in range(c):
temp = I1LRU[:, :, i]
temp = ndimage.convolve(temp, coeff, mode='wrap')
I1LRU[:, :, i]=temp
return I1LRU
def gaussian2d (N, std):
t=np.arange(-(N-1)/2,(N+2)/2)
t1,t2=np.meshgrid(t,t)
std=np.double(std)
w = np.exp(-0.5*(t1/std)**2)*np.exp(-0.5*(t2/std)**2)
return w
def kaiser2d (N, beta):
t=np.arange(-(N-1)/2,(N+2)/2)/np.double(N-1)
t1,t2=np.meshgrid(t,t)
t12=np.sqrt(t1*t1+t2*t2)
w1=np.kaiser(N,beta)
w=np.interp(t12,t,w1)
w[t12>t[-1]]=0
w[t12<t[0]]=0
return w
def fir_filter_wind(Hd,w):
"""
compute fir filter with window method
Hd: desired freqeuncy response (2D)
w: window (2D)
"""
hd=np.rot90(np.fft.fftshift(np.rot90(Hd,2)),2)
h=np.fft.fftshift(np.fft.ifft2(hd))
h=np.rot90(h,2)
h=h*w
h=h/np.sum(h)
return h
def downgrade_images(I_MS, I_PAN, ratio, sensor=None):
"""
downgrade MS and PAN by a ratio factor with given sensor's gains
"""
I_MS=np.double(I_MS)
I_PAN=np.double(I_PAN)
I_MS = np.transpose(I_MS, (2, 0, 1)) #
I_PAN = np.squeeze(I_PAN) #从数组的形状中删除单维度条目,即把shape中为1的维度去掉
ratio=np.double(ratio)
flag_PAN_MTF=0
if sensor=='QB':
flag_resize_new = 2
GNyq = np.asarray([0.34, 0.32, 0.30, 0.22],dtype='float32') # Band Order: B,G,R,NIR
GNyqPan = 0.15
elif sensor=='IKONOS':
flag_resize_new = 2 #MTF usage
GNyq = np.asarray([0.26,0.28,0.29,0.28],dtype='float32') # Band Order: B,G,R,NIR
GNyqPan = 0.17;
elif sensor=='GeoEye1':
flag_resize_new = 2 # MTF usage
GNyq = np.asarray([0.23,0.23,0.23,0.23],dtype='float32') # Band Order: B,G,R,NIR
GNyqPan = 0.16
elif sensor=='WV2':
flag_resize_new = 2 # MTF usage
GNyq = [0.35,0.35,0.35,0.35,0.35,0.35,0.35,0.27]
GNyqPan = 0.11
elif sensor=='WV3':
flag_resize_new = 2 #MTF usage
GNyq = 0.29 * np.ones(8)
GNyqPan = 0.15
else:
'''the default way'''
flag_resize_new = 1
'''the default downgrading method is gaussian'''
if flag_resize_new == 1:
# I_MS_LP = np.zeros((I_MS.shape[0],int(np.round(I_MS.shape[1]/ratio)+ratio),int(np.round(I_MS.shape[2]/ratio)+ratio)))
#
# for idim in range(I_MS.shape[0]):
# imslp_pad=np.pad(I_MS[idim,:,:],int(2*ratio),'symmetric')
# I_MS_LP[idim,:,:]=misc.imresize(imslp_pad,1/ratio,'bicubic',mode='F')
#
# I_MS_LR = I_MS_LP[:,2:-2,2:-2]
#
# I_PAN_pad=np.pad(I_PAN,int(2*ratio),'symmetric')
# I_PAN_LR=misc.imresize(I_PAN_pad,1/ratio,'bicubic',mode='F')
# I_PAN_LR=I_PAN_LR[2:-2,2:-2]
sig = (1/(2*(2.772587)/ratio**2))**0.5
kernel = np.multiply(cv2.getGaussianKernel(9, sig), cv2.getGaussianKernel(9,sig).T)
t=[]
for i in range(I_MS.shape[0]):
temp = signal.convolve2d(I_MS[i, :, :], kernel, mode='same', boundary = 'wrap')
temp = temp[0::int(ratio), 0::int(ratio)]
temp = np.expand_dims(temp, 0)
t.append(temp)
I_MS_LR = np.concatenate(t, axis=0)
I_PAN_LR = signal.convolve2d(I_PAN, kernel, mode='same', boundary = 'wrap')
I_PAN_LR = I_PAN_LR[0::int(ratio), 0::int(ratio)] #[0::4] # 从第一个开始取 每间隔4个取一个
elif flag_resize_new==2:
N=41
I_MS_LP=np.zeros(I_MS.shape)
fcut=1/ratio
for j in range(I_MS.shape[0]):
#fir filter with window method
alpha = np.sqrt(((N-1)*(fcut/2))**2/(-2*np.log(GNyq[j])))
H=gaussian2d(N,alpha)
Hd=H/np.max(H)
w=kaiser2d(N,0.5)
h=fir_filter_wind(Hd,w)
I_MS_LP[j,:,:] = ndimage.filters.correlate(I_MS[j,:,:],np.real(h),mode='nearest')
if flag_PAN_MTF==1:
#fir filter with window method
alpha = np.sqrt(((N-1)*(fcut/2))**2/(-2*np.log(GNyqPan)))
H=gaussian2d(N,alpha)
Hd=H/np.max(H)
h=fir_filter_wind(Hd,w)
I_PAN = ndimage.filters.correlate(I_PAN,np.real(h),mode='nearest')
I_PAN_LR=I_PAN[int(ratio/2):-1:int(ratio),int(ratio/2):-1:int(ratio)]
else:
#bicubic resize
I_PAN_pad=np.pad(I_PAN,int(2*ratio),'symmetric')
I_PAN_LR=misc.imresize(I_PAN_pad,1/ratio,'bicubic',mode='F')
I_PAN_LR=I_PAN_LR[2:-2,2:-2]
I_MS_LR=I_MS_LP[:,int(ratio/2):-1:int(ratio),int(ratio/2):-1:int(ratio)]
I_MS_LR = np.transpose(I_MS_LR, (1, 2, 0))
I_PAN_LR = np.expand_dims(I_PAN_LR, -1)
return I_MS_LR,I_PAN_LR
def downgrade_pan_3d(I_MS , I_PAN, ratio, sensor=None):
"""
downgrade MS and PAN by a ratio factor with given sensor's gains
"""
I_MS=np.double(I_MS)
I_PAN=np.double(I_PAN)
I_MS = np.transpose(I_MS, (2, 0, 1)) #
#I_PAN = np.squeeze(I_PAN) #从数组的形状中删除单维度条目,即把shape中为1的维度去掉
I_PAN = np.transpose(I_PAN, (2, 0, 1)) # lklklklk lklklkk
ratio=np.double(ratio)
flag_PAN_MTF=0
if sensor=='QB':
flag_resize_new = 2
GNyq = np.asarray([0.34, 0.32, 0.30, 0.22],dtype='float32') # Band Order: B,G,R,NIR
GNyqPan = 0.15
elif sensor=='IKONOS':
flag_resize_new = 2 #MTF usage
GNyq = np.asarray([0.26,0.28,0.29,0.28],dtype='float32') # Band Order: B,G,R,NIR
GNyqPan = 0.17;
elif sensor=='GeoEye1':
flag_resize_new = 2 # MTF usage
GNyq = np.asarray([0.23,0.23,0.23,0.23],dtype='float32') # Band Order: B,G,R,NIR
GNyqPan = 0.16
elif sensor=='WV2':
flag_resize_new = 2 # MTF usage
GNyq = [0.35,0.35,0.35,0.35,0.35,0.35,0.35,0.27]
GNyqPan = 0.11
elif sensor=='WV3':
flag_resize_new = 2 #MTF usage
GNyq = 0.29 * np.ones(8)
GNyqPan = 0.15
else:
'''the default way'''
flag_resize_new = 1
'''the default downgrading method is gaussian'''
if flag_resize_new == 1:
# I_MS_LP = np.zeros((I_MS.shape[0],int(np.round(I_MS.shape[1]/ratio)+ratio),int(np.round(I_MS.shape[2]/ratio)+ratio)))
#
# for idim in range(I_MS.shape[0]):
# imslp_pad=np.pad(I_MS[idim,:,:],int(2*ratio),'symmetric')
# I_MS_LP[idim,:,:]=misc.imresize(imslp_pad,1/ratio,'bicubic',mode='F')
#
# I_MS_LR = I_MS_LP[:,2:-2,2:-2]
#
# I_PAN_pad=np.pad(I_PAN,int(2*ratio),'symmetric')
# I_PAN_LR=misc.imresize(I_PAN_pad,1/ratio,'bicubic',mode='F')
# I_PAN_LR=I_PAN_LR[2:-2,2:-2]
sig = (1/(2*(2.772587)/ratio**2))**0.5
kernel = np.multiply(cv2.getGaussianKernel(9, sig), cv2.getGaussianKernel(9,sig).T)
t=[]
for i in range(I_MS.shape[0]):
temp = signal.convolve2d(I_MS[i, :, :], kernel, mode='same', boundary = 'wrap')
temp = temp[0::int(ratio), 0::int(ratio)]
temp = np.expand_dims(temp, 0)
t.append(temp)
I_MS_LR = np.concatenate(t, axis=0)
# lklklklk lklklkk
tt=[] # lklklklk lklklkk
for i in range(I_PAN.shape[0]): # lklklklk lklklkk
temp = signal.convolve2d(I_PAN[i, :, :], kernel, mode='same', boundary = 'wrap') # lklklklk lklklkk
temp = temp[0::int(ratio), 0::int(ratio)]
temp = np.expand_dims(temp, 0)
tt.append(temp) # lklklklk lklklkk
I_PAN_LR = np.concatenate(tt, axis=0) # lklklklk lklklkk
elif flag_resize_new==2:
N=41
I_MS_LP=np.zeros(I_MS.shape)
fcut=1/ratio
for j in range(I_MS.shape[0]):
#fir filter with window method
alpha = np.sqrt(((N-1)*(fcut/2))**2/(-2*np.log(GNyq[j])))
H=gaussian2d(N,alpha)
Hd=H/np.max(H)
w=kaiser2d(N,0.5)
h=fir_filter_wind(Hd,w)
I_MS_LP[j,:,:] = ndimage.filters.correlate(I_MS[j,:,:],np.real(h),mode='nearest')
if flag_PAN_MTF==1:
#fir filter with window method
alpha = np.sqrt(((N-1)*(fcut/2))**2/(-2*np.log(GNyqPan)))
H=gaussian2d(N,alpha)
Hd=H/np.max(H)
h=fir_filter_wind(Hd,w)
I_PAN = ndimage.filters.correlate(I_PAN,np.real(h),mode='nearest')
I_PAN_LR=I_PAN[int(ratio/2):-1:int(ratio),int(ratio/2):-1:int(ratio)]
else:
#bicubic resize
I_PAN_pad=np.pad(I_PAN,int(2*ratio),'symmetric')
I_PAN_LR=misc.imresize(I_PAN_pad,1/ratio,'bicubic',mode='F')
I_PAN_LR=I_PAN_LR[2:-2,2:-2]
I_MS_LR=I_MS_LP[:,int(ratio/2):-1:int(ratio),int(ratio/2):-1:int(ratio)]
I_MS_LR = np.transpose(I_MS_LR, (1, 2, 0))
I_PAN_LR = np.transpose(I_PAN_LR, (1, 2, 0))
# I_PAN_LR = np.expand_dims(I_PAN_LR, -1) # lklklklk lklklkk
return I_MS_LR,I_PAN_LR | 13,092 | 34.675749 | 296 | py |
VP-Net | VP-Net-master/data_prepare.py | # something about making dataset
def single_mat_64_no_down(used_ref, used_ms, used_pan):
# ### '''normalization''' 已标准化
# =============================================================================
# max_patch, min_patch = np.max(used_ref, axis=(0,1)), np.min(used_ref, axis=(0,1))
# used_ref = np.float32(used_ref-min_patch) / (max_patch - min_patch)
# max_patch, min_patch = np.max(used_ms, axis=(0,1)), np.min(used_ms, axis=(0,1))
# used_ms = np.float32(used_ms-min_patch) / (max_patch - min_patch)
# max_patch, min_patch = np.max(used_pan, axis=(0,1)), np.min(used_pan, axis=(0,1))
# used_pan = np.float32(used_pan-min_patch) / (max_patch - min_patch)
# =============================================================================
used_ref = used_ref # used_hrhs
downgrade_MS = used_ms # used_lrhs
downgrade_PAN = used_pan # used_hrms
train_original_used_MS = []
train_downgrade_PAN = []
train_downgrade_MS = []
### """crop images"""
stride = 24
training_size = 64
for j in range(0, downgrade_PAN.shape[0]-training_size +1, stride):
for k in range(0, downgrade_PAN.shape[1]-training_size +1, stride):
temp_hrhs = used_ref [j:j+training_size, k:k+training_size, :]
temp_hrms = downgrade_PAN[j:j+training_size, k:k+training_size, :]
# temp_lrhs = downgrade_MS [j:j+training_size, k:k+training_size, :]
temp_lrhs = downgrade_MS [int(j/4):int((j+training_size)/4), int(k/4):int((k+training_size)/4), :]
train_original_used_MS.append(temp_hrhs)
train_downgrade_PAN .append(temp_hrms)
train_downgrade_MS .append(temp_lrhs)
train_original_used_MS = np.array(train_original_used_MS, dtype='float32')
train_downgrade_PAN = np.array(train_downgrade_PAN, dtype='float32')
train_downgrade_MS = np.array(train_downgrade_MS, dtype='float32')
return train_original_used_MS, train_downgrade_PAN, train_downgrade_MS
# =======================================================================================================
import os
import numpy as np
import scipy.io as sio
used_ms = []
used_pan = []
used_ref = []
path = 'E:\datasets\\4_GF1_GF2\\GF2\\crop_xj_smooth_down\\mat60'
file_list = os.listdir(path)
file_list.sort(key=lambda x:int(x.split('.')[0].split('GF2_300_')[1]))
for file in file_list:
if not os.path.isdir(file):
mat_data = sio.loadmat(path+"/"+file)
mat_ms = mat_data['I_MS_LR']
used_ms.append(mat_ms)
mat_pan = mat_data['I_PAN_LR']
mat_pan = np.expand_dims(mat_pan,-1)
used_pan.append(mat_pan)
mat_ref = mat_data['Ref']
used_ref.append(mat_ref)
# used_pan = np.vstack((used_pan,mat_pan))
print('used_ms.len' ,len(used_ms))
print('used_pan.len',len(used_pan))
print('used_ref.len' ,len(used_ref))
original_used_MS, downgrade_PAN, downgrade_MS = single_mat_64_no_down (used_ref[0],used_ms[0], used_pan[0] )
for i in range(1, len(used_pan)):
for_original_used_MS, for_downgrade_PAN, for_downgrade_MS = single_mat_64_no_down (used_ref[i], used_ms[i], used_pan[i] )
original_used_MS = np.concatenate( (original_used_MS, for_original_used_MS) ,axis = 0)
downgrade_PAN = np.concatenate( (downgrade_PAN, for_downgrade_PAN) ,axis = 0)
downgrade_MS = np.concatenate( (downgrade_MS, for_downgrade_MS) ,axis = 0)
import random
index = [i for i in range(original_used_MS.shape[0])]
random.shuffle(index)
random.shuffle(index)
train_original_used_MS = original_used_MS [index, :, :, :]
train_downgrade_PAN = downgrade_PAN [index, :, :, :]
train_downgrade_MS = downgrade_MS [index, :, :, :]
sio.savemat('E:\GF2_train_from_single_pic_no_down_no_normal_60.mat', dict([('ms', train_downgrade_MS), ('pan', train_downgrade_PAN),('ref',train_original_used_MS)]) )
########################################################################################################################################
########################################################################################################################################
def prepare_input_64(used_ms, used_pan):
### '''normalization'''
max_patch, min_patch = np.max(used_ms, axis=(0,1)), np.min(used_ms, axis=(0,1))
used_ms = np.float32(used_ms-min_patch) / (max_patch - min_patch)
max_patch, min_patch = np.max(used_pan, axis=(0,1)), np.min(used_pan, axis=(0,1))
used_pan = np.float32(used_pan-min_patch) / (max_patch - min_patch)
print('after normalization:')
print('used_pan.shape' ,used_pan.shape,'used_ms.shape' ,used_ms.shape)
print('\n')
###############################################################################
### '''downgrade'''
ratio=4
downgrade_MS ,downgrade_PAN = downgrade_images(used_ms, used_pan, ratio, sensor= None)
print('after normalization:')
print('downgrade_PAN.shape',downgrade_PAN.shape,'downgrade_MS.shape' ,downgrade_MS.shape)
###############################################################################
### """crop images"""
stride = 8
training_size = 64
used_ms = used_ms # used_hrhs
downgrade_MS = downgrade_MS # used_lrhs
downgrade_PAN = downgrade_PAN # used_hrms
train_original_used_MS = []
train_downgrade_PAN = []
train_downgrade_MS = []
for j in range(0, downgrade_PAN.shape[0]-training_size + stride, stride):
for k in range(0, downgrade_PAN.shape[1]-training_size + stride, stride):
temp_hrhs = used_ms [j:j+training_size, k:k+training_size, :]
temp_hrms = downgrade_PAN[j:j+training_size, k:k+training_size, :]
# temp_lrhs = downgrade_MS [j:j+training_size, k:k+training_size, :]
temp_lrhs = downgrade_MS [int(j/4):int((j+training_size)/4), int(k/4):int((k+training_size)/4), :]
train_original_used_MS.append(temp_hrhs)
train_downgrade_PAN .append(temp_hrms)
train_downgrade_MS .append(temp_lrhs)
train_original_used_MS = np.array(train_original_used_MS, dtype='float32')
train_downgrade_PAN = np.array(train_downgrade_PAN, dtype='float32')
train_downgrade_MS = np.array(train_downgrade_MS, dtype='float32')
index = [i for i in range(train_original_used_MS.shape[0])]
random.shuffle(index)
random.shuffle(index)
train_original_used_MS = train_original_used_MS[index, :, :, :]
train_downgrade_PAN = train_downgrade_PAN[index, :, :, :]
train_downgrade_MS = train_downgrade_MS[index, :, :, :]
print('after croping:')
print(' train_original_used_MS.shape',train_original_used_MS.shape)
print(' train_downgrade_PAN.shape' ,train_downgrade_PAN.shape)
print(' train_downgrade_MS.shape' ,train_downgrade_MS.shape)
print('\n')
return train_original_used_MS, train_downgrade_PAN, train_downgrade_MS
| 7,721 | 40.294118 | 166 | py |
VP-Net | VP-Net-master/metrics.py | # -*- coding: utf-8 -*-
"""
License: GNU-3.0
Code Reference:https://github.com/wasaCheney/IQA_pansharpening_python
"""
import numpy as np
from scipy import ndimage
import cv2
from scipy.signal import convolve2d
def partial_sums(x, kernel_size=8):
"""Calculate partial sums of array in boxes (kernel_size x kernel_size).
This corresponds to:
scipy.signal.convolve2d(x, np.ones((kernel_size, kernel_size)), mode='valid')
>>> partial_sums(np.arange(12).reshape(3, 4), 2)
array([[10, 14, 18],
[26, 30, 34]])
"""
assert len(x.shape) >= 2 and x.shape[0] >= kernel_size and x.shape[1] >= kernel_size
sums = x.cumsum(axis=0).cumsum(axis=1)
sums = np.pad(sums, 1)[:-1, :-1]
return (
sums[kernel_size:, kernel_size:]
+ sums[:-kernel_size, :-kernel_size]
- sums[:-kernel_size, kernel_size:]
- sums[kernel_size:, :-kernel_size]
)
def universal_image_quality_index(x, y, kernel_size=8):
"""Compute the Universal Image Quality Index (UIQI) of x and y."""
N = kernel_size ** 2
x = x.astype(np.float)
y = y.astype(np.float)
e = np.finfo(np.float).eps
# sums and auxiliary expressions based on sums
S_x = partial_sums(x, kernel_size)
S_y = partial_sums(y, kernel_size)
PS_xy = S_x * S_y
SSS_xy = S_x*S_x + S_y*S_y
# sums of squares and product
S_xx = partial_sums(x*x, kernel_size)
S_yy = partial_sums(y*y, kernel_size)
S_xy = partial_sums(x*y, kernel_size)
num = 4 * PS_xy * (N * S_xy - PS_xy)
den = (N*(S_xx + S_yy) - SSS_xy) / (SSS_xy + e)
Q_s = (num) / (den + e)
return np.mean(Q_s)
def universal_image_quality_index_conv(x, y, kernelsize=8):
"""Compute the Universal Image Quality Index (UIQI) of x and y.
Not normalized with epsilon, and using scipy.signal.convolve2d."""
N = kernelsize ** 2
kernel = np.ones((kernelsize, kernelsize))
x = x.astype(np.float)
y = y.astype(np.float)
# sums and auxiliary expressions based on sums
S_x = convolve2d(x, kernel, mode='valid')
S_y = convolve2d(y, kernel, mode='valid')
PS_xy = S_x * S_y
SSS_xy = S_x*S_x + S_y*S_y
# sums of squares and product
S_xx = convolve2d(x*x, kernel, mode='valid')
S_yy = convolve2d(y*y, kernel, mode='valid')
S_xy = convolve2d(x*y, kernel, mode='valid')
Q_s = 4 * PS_xy * (N * S_xy - PS_xy) / (N*(S_xx + S_yy) - SSS_xy) / SSS_xy
return np.mean(Q_s)
####################################################################################################################################
def rmse(img1, img2, dynamic_range=255):
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
img1_ = img1.astype(np.float64)
img2_ = img2.astype(np.float64)
print('RMSE')
print((np.square(img1_ - img2_)).shape) # (400, 400, 4)
return np.sqrt(np.mean( np.square(img1_ - img2_) ) )
# =================================================================================================================
# ttps://github.com/Berhinj/Pansharpening/blob/10cef8395d0f4a30b663148c24486e59562df656/Pansharpening/quality.py
# 根据这个网站的RMSE改写成下面 上下的RMSE结果一样 OK
# =================================================================================================================
def RMSEs(A, B):
"""
Arguments: A: np.ndarray Stack of 1D bands
B: np.ndarray Stack of 1D bands
Returns: RMSEs: np.array 1D np array with one RMSE value per bands
"""
A = A.astype(np.float64)
B = B.astype(np.float64)
A = A.reshape(A.shape[0]*A.shape[1],A.shape[2])
B = B.reshape(B.shape[0]*B.shape[1],B.shape[2])
SE = (B - A)**2
s = np.mean(SE) ** 0.5
return s
# =================================================================================================================
def RASE(A, B):
"""
Arguments:
A: np.ndarray Stack of 1D bands
B: np.ndarray Stack of 1D bands
RMSE: float Root Mean Square Error
Returns: RASE: float
"""
A = A.astype(np.float64)
B = B.astype(np.float64)
A = A.reshape(A.shape[0]*A.shape[1],A.shape[2])
B = B.reshape(B.shape[0]*B.shape[1],B.shape[2])
SE = (B - A)**2
RMSEs = np.mean(SE) ** 0.5
Ms = np.mean(A, axis=0)
R = np.mean(100*np.sqrt(RMSEs**2/A.shape[1])/Ms)
return R
import math
# =================================================================================================================
# 仿照matlab 版本的代码 差别太大 ? ?
def RASE_(img1, img2):
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
[m,n,p]=img2.shape
print(m,n,p)
C1= np.square(img1[:,:,0]- - img2[:,:,0])
C2= np.square(img1[:,:,1]- - img2[:,:,1])
C3= np.square(img1[:,:,2]- - img2[:,:,2])
C4= np.square(img1[:,:,3]- - img2[:,:,3])
print(C1.shape)
C1 = C1.reshape(m*n)
C2 = C2.reshape(m*n)
C3 = C3.reshape(m*n)
C4 = C4.reshape(m*n)
print(C1.shape)
C1 = np.sum(C1)/(m*n)
C2 = np.sum(C2)/(m*n)
C3 = np.sum(C3)/(m*n)
C4 = np.sum(C4)/(m*n)
print(C1)
# C4 = C4.reshape(m*n)
# C4 = np.sum(C4)/(m*n) 等价 # C4 = sum(map(sum,C4))/(m*n)
C = C1+C2+C3+C4
mean = np.mean(np.mean(np.mean(img1, axis=0), axis=0), axis=0)
N= math.sqrt((C/4)) *100/mean;
return N
####################################################################################################################################
def sam(img1, img2):
"""SAM for 3D image, shape (H, W, C); uint or float[0, 1]"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
assert img1.ndim == 3 and img1.shape[2] > 1, "image n_channels should be greater than 1"
img1_ = img1.astype(np.float64)
img2_ = img2.astype(np.float64)
inner_product = (img1_ * img2_).sum(axis=2)
img1_spectral_norm = np.sqrt((img1_**2).sum(axis=2))
img2_spectral_norm = np.sqrt((img2_**2).sum(axis=2))
# numerical stability
cos_theta = (inner_product / (img1_spectral_norm * img2_spectral_norm + np.finfo(np.float64).eps)).clip(min=0, max=1)
return np.mean(np.arccos(cos_theta))*180/np.pi
# 问题就在*180/np.pi
##################################################################################
def psnr(img1, img2, dynamic_range=255):
"""PSNR metric, img uint8 if 225; uint16 if 2047"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
img1_ = img1.astype(np.float64)
img2_ = img2.astype(np.float64)
mse = np.mean((img1_ - img2_)**2)
if mse <= 1e-10:
return np.inf
return 20 * np.log10(dynamic_range / (np.sqrt(mse) + np.finfo(np.float64).eps))
def scc(img1, img2):
"""SCC for 2D (H, W)or 3D (H, W, C) image; uint or float[0, 1]"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
img1_ = img1.astype(np.float64)
img2_ = img2.astype(np.float64)
if img1_.ndim == 2:
return np.corrcoef(img1_.reshape(1, -1), img2_.rehshape(1, -1))[0, 1]
elif img1_.ndim == 3:
#print(img1_[..., i].reshape[1, -1].shape)
#test = np.corrcoef(img1_[..., i].reshape[1, -1], img2_[..., i].rehshape(1, -1))
#print(type(test))
ccs = [np.corrcoef(img1_[..., i].reshape(1, -1), img2_[..., i].reshape(1, -1))[0, 1]
for i in range(img1_.shape[2])]
return np.mean(ccs)
else:
raise ValueError('Wrong input image dimensions.')
def _qindex(img1, img2, block_size=8):
"""Q-index for 2D (one-band) image, shape (H, W); uint or float [0, 1]"""
assert block_size > 1, 'block_size shold be greater than 1!'
img1_ = img1.astype(np.float64)
img2_ = img2.astype(np.float64)
window = np.ones((block_size, block_size)) / (block_size**2)
# window_size = block_size**2
# filter, valid
pad_topleft = int(np.floor(block_size/2))
pad_bottomright = block_size - 1 - pad_topleft
mu1 = cv2.filter2D(img1_, -1, window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright]
mu2 = cv2.filter2D(img2_, -1, window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1_**2, -1, window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] - mu1_sq
sigma2_sq = cv2.filter2D(img2_**2, -1, window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] - mu2_sq
# print(mu1_mu2.shape)
#print(sigma2_sq.shape)
sigma12 = cv2.filter2D(img1_ * img2_, -1, window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] - mu1_mu2
# all = 1, include the case of simga == mu == 0
qindex_map = np.ones(sigma12.shape)
# sigma == 0 and mu != 0
# print(np.min(sigma1_sq + sigma2_sq), np.min(mu1_sq + mu2_sq))
idx = ((sigma1_sq + sigma2_sq) < 1e-8) * ((mu1_sq + mu2_sq) >1e-8)
qindex_map[idx] = 2 * mu1_mu2[idx] / (mu1_sq + mu2_sq)[idx]
# sigma !=0 and mu == 0
idx = ((sigma1_sq + sigma2_sq) >1e-8) * ((mu1_sq + mu2_sq) < 1e-8)
qindex_map[idx] = 2 * sigma12[idx] / (sigma1_sq + sigma2_sq)[idx]
# sigma != 0 and mu != 0
idx = ((sigma1_sq + sigma2_sq) >1e-8) * ((mu1_sq + mu2_sq) >1e-8)
qindex_map[idx] =((2 * mu1_mu2[idx]) * (2 * sigma12[idx])) / (
(mu1_sq + mu2_sq)[idx] * (sigma1_sq + sigma2_sq)[idx])
# print(np.mean(qindex_map))
# idx = ((sigma1_sq + sigma2_sq) == 0) * ((mu1_sq + mu2_sq) != 0)
# qindex_map[idx] = 2 * mu1_mu2[idx] / (mu1_sq + mu2_sq)[idx]
# # sigma !=0 and mu == 0
# idx = ((sigma1_sq + sigma2_sq) != 0) * ((mu1_sq + mu2_sq) == 0)
# qindex_map[idx] = 2 * sigma12[idx] / (sigma1_sq + sigma2_sq)[idx]
# # sigma != 0 and mu != 0
# idx = ((sigma1_sq + sigma2_sq) != 0) * ((mu1_sq + mu2_sq) != 0)
# qindex_map[idx] =((2 * mu1_mu2[idx]) * (2 * sigma12[idx])) / (
# (mu1_sq + mu2_sq)[idx] * (sigma1_sq + sigma2_sq)[idx])
return np.mean(qindex_map)
def qindex(img1, img2, block_size=8):
"""Q-index for 2D (H, W) or 3D (H, W, C) image; uint or float [0, 1]"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return _qindex(img1, img2, block_size)
elif img1.ndim == 3:
qindexs = [_qindex(img1[..., i], img2[..., i], block_size) for i in range(img1.shape[2])]
return np.array(qindexs).mean()
else:
raise ValueError('Wrong input image dimensions.')
def _ssim(img1, img2, dynamic_range=255):
"""SSIM for 2D (one-band) image, shape (H, W); uint8 if 225; uint16 if 2047"""
C1 = (0.01 * dynamic_range)**2
C2 = (0.03 * dynamic_range)**2
img1_ = img1.astype(np.float64)
img2_ = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5) # kernel size 11
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1_, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2_, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1_**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2_**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1_ * img2_, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def ssim(img1, img2, dynamic_range=255):
"""SSIM for 2D (H, W) or 3D (H, W, C) image; uint8 if 225; uint16 if 2047"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return _ssim(img1, img2, dynamic_range)
elif img1.ndim == 3:
ssims = [_ssim(img1[..., i], img2[..., i], dynamic_range) for i in range(img1.shape[2])]
return np.array(ssims).mean()
else:
raise ValueError('Wrong input image dimensions.')
def ergas(img_fake, img_real, scale=4):
"""ERGAS for 2D (H, W) or 3D (H, W, C) image; uint or float [0, 1].
scale = spatial resolution of PAN / spatial resolution of MUL, default 4."""
if not img_fake.shape == img_real.shape:
raise ValueError('Input images must have the same dimensions.')
img_fake_ = img_fake.astype(np.float64)
img_real_ = img_real.astype(np.float64)
if img_fake_.ndim == 2:
mean_real = img_real_.mean()
mse = np.mean((img_fake_ - img_real_)**2)
return 100 / scale * np.sqrt(mse / (mean_real**2 + np.finfo(np.float64).eps))
elif img_fake_.ndim == 3:
means_real = img_real_.reshape(-1, img_real_.shape[2]).mean(axis=0)
mses = ((img_fake_ - img_real_)**2).reshape(-1, img_fake_.shape[2]).mean(axis=0)
return 100 / scale * np.sqrt((mses / (means_real**2 + np.finfo(np.float64).eps)).mean())
else:
raise ValueError('Wrong input image dimensions.')
# =================================================================================================================
# ttps://github.com/Berhinj/Pansharpening/blob/10cef8395d0f4a30b663148c24486e59562df656/Pansharpening/quality.py
# 根据这个网站的RMSE改写成下面 上下 不一样
# =================================================================================================================
def ERGAS_(A, B):
A = A.astype(np.float64)
B = B.astype(np.float64)
A = A.reshape(A.shape[0]*A.shape[1],A.shape[2])
B = B.reshape(B.shape[0]*B.shape[1],B.shape[2])
SE = (B - A)**2
RMSEs = np.mean(SE) ** 0.5
f_ratio = 1
# Mean
Ms = np.average(B, 0)
# Relative Dimensionless Global Error in Synthesis
ERGAS = 100 * f_ratio**2 * (np.sum((RMSEs/Ms)**2)/A.shape[1])**0.5
return ERGAS
####################
# observation model
####################
def gaussian2d(N, std):
t = np.arange(-(N - 1) // 2, (N + 2) // 2)
t1, t2 = np.meshgrid(t, t)
std = np.double(std)
w = np.exp(-0.5 * (t1 / std)**2) * np.exp(-0.5 * (t2 / std)**2)
return w
def kaiser2d(N, beta):
t = np.arange(-(N - 1) // 2, (N + 2) // 2) / np.double(N - 1)
t1, t2 = np.meshgrid(t, t)
t12 = np.sqrt(t1 * t1 + t2 * t2)
w1 = np.kaiser(N, beta)
w = np.interp(t12, t, w1)
w[t12 > t[-1]] = 0
w[t12 < t[0]] = 0
return w
def fir_filter_wind(Hd, w):
"""
compute fir (finite impulse response) filter with window method
Hd: desired freqeuncy response (2D)
w: window (2D)
"""
hd = np.rot90(np.fft.fftshift(np.rot90(Hd, 2)), 2)
h = np.fft.fftshift(np.fft.ifft2(hd))
h = np.rot90(h, 2)
h = h * w
h = h / np.sum(h)
return h
def GNyq2win(GNyq, scale=4, N=41):
"""Generate a 2D convolutional window from a given GNyq
GNyq: Nyquist frequency
scale: spatial size of PAN / spatial size of MS
"""
#fir filter with window method
fcut = 1 / scale
alpha = np.sqrt(((N - 1) * (fcut / 2))**2 / (-2 * np.log(GNyq)))
H = gaussian2d(N, alpha)
Hd = H / np.max(H)
w = kaiser2d(N, 0.5)
h = fir_filter_wind(Hd, w)
return np.real(h)
def mtf_resize(img, satellite='QuickBird', scale=4):
# satellite GNyq
scale = int(scale)
if satellite == 'QuickBird':
GNyq = [0.34, 0.32, 0.30, 0.22] # Band Order: B,G,R,NIR
GNyqPan = 0.15
elif satellite == 'IKONOS':
GNyq = [0.26, 0.28, 0.29, 0.28] # Band Order: B,G,R,NIR
GNyqPan = 0.17
else:
raise NotImplementedError('satellite: QuickBird or IKONOS')
# lowpass
img_ = img.squeeze()
img_ = img_.astype(np.float64)
if img_.ndim == 2: # Pan
H, W = img_.shape
lowpass = GNyq2win(GNyqPan, scale, N=41)
elif img_.ndim == 3: # MS
H, W, _ = img.shape
lowpass = [GNyq2win(gnyq, scale, N=41) for gnyq in GNyq]
lowpass = np.stack(lowpass, axis=-1)
img_ = ndimage.filters.correlate(img_, lowpass, mode='nearest')
# downsampling
output_size = (H // scale, W // scale)
img_ = cv2.resize(img_, dsize=output_size, interpolation=cv2.INTER_NEAREST)
return img_
##################
# No reference IQA
##################
def D_lambda(img_fake, img_lm, block_size=32, p=1):
""" Spectral distortion
img_fake, generated HRMS
img_lm , LRMS """
assert img_fake.ndim == img_lm.ndim == 3, 'Images must be 3D!'
H_f, W_f, C_f = img_fake.shape
H_r, W_r, C_r = img_lm.shape
assert C_f == C_r, 'Fake and lm should have the same number of bands!'
Q_fake = []
Q_lm = []
for i in range(C_f):
for j in range(i+1, C_f):
# for fake
band1 = img_fake[..., i]
band2 = img_fake[..., j]
Q_fake.append(_qindex(band1, band2, block_size=block_size))
# for real
band1 = img_lm[..., i]
band2 = img_lm[..., j]
Q_lm.append(_qindex(band1, band2, block_size=block_size))
Q_fake = np.array(Q_fake)
Q_lm = np.array(Q_lm)
D_lambda_index = (np.abs(Q_fake - Q_lm) ** p).mean()
return D_lambda_index ** (1/p)
def D_s(img_fake, img_lm, pan, satellite='QuickBird', scale=4, block_size=32, q=1):
"""Spatial distortion
img_fake, generated HRMS
img_lm, LRMS
pan, HRPan"""
# fake and lm
assert img_fake.ndim == img_lm.ndim == 3, 'MS images must be 3D!'
H_f, W_f, C_f = img_fake.shape
H_r, W_r, C_r = img_lm.shape
assert H_f // H_r == W_f // W_r == scale, 'Spatial resolution should be compatible with scale'
assert C_f == C_r, 'Fake and lm should have the same number of bands!'
# fake and pan
assert pan.ndim == 3, 'Panchromatic image must be 3D!'
H_p, W_p, C_p = pan.shape
assert C_p == 1, 'size of 3rd dim of Panchromatic image must be 1'
assert H_f == H_p and W_f == W_p, "Pan's and fake's spatial resolution should be the same"
# get LRPan, 2D
pan_lr = mtf_resize(pan, satellite=satellite, scale=scale)
#print(pan_lr.shape)
# D_s
Q_hr = []
Q_lr = []
for i in range(C_f):
# for HR fake
band1 = img_fake[..., i]
band2 = pan[..., 0] # the input PAN is 3D with size=1 along 3rd dim
#print(band1.shape)
#print(band2.shape)
Q_hr.append(_qindex(band1, band2, block_size=block_size))
band1 = img_lm[..., i]
band2 = pan_lr # this is 2D
#print(band1.shape)
#print(band2.shape)
Q_lr.append(_qindex(band1, band2, block_size=block_size))
Q_hr = np.array(Q_hr)
Q_lr = np.array(Q_lr)
D_s_index = (np.abs(Q_hr - Q_lr) ** q).mean()
return D_s_index ** (1/q)
def qnr(img_fake, img_lm, pan, satellite='QuickBird', scale=4, block_size=32, p=1, q=1, alpha=1, beta=1):
"""QNR - No reference IQA"""
D_lambda_idx = D_lambda(img_fake, img_lm, block_size, p)
D_s_idx = D_s(img_fake, img_lm, pan, satellite, scale, block_size, q)
QNR_idx = (1 - D_lambda_idx) ** alpha * (1 - D_s_idx) ** beta
return QNR_idx
def ref_evaluate(pred, gt):
#reference metrics
c_psnr = psnr(pred, gt)
c_ssim = ssim(pred, gt)
c_sam = sam(pred, gt)
c_ergas = ergas(pred, gt)
c_scc = scc(pred, gt)
c_q = qindex(pred, gt)
c_rmse = rmse(pred, gt)
# c_rmse = RMSEs(pred, gt)
# c_rase = RASE(pred, gt)
# c_uiqi = universal_image_quality_index_conv(pred, gt)
# c_uiqi = universal_image_quality_index(pred, gt)
return [c_psnr, c_ssim, c_sam, c_ergas, c_scc, c_q, c_rmse]
def no_ref_evaluate(pred, pan, hs):
#no reference metrics
c_D_lambda = D_lambda(pred, hs)
c_D_s = D_s(pred, hs, pan)
c_qnr = qnr(pred, hs, pan)
return [c_D_lambda, c_D_s, c_qnr]
| 20,138 | 32.677258 | 132 | py |
VP-Net | VP-Net-master/train.py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 11:58:09 2021
@author: kunLi
"""
# -*- coding: utf-8 -*-
import os
import numpy as np
import scipy.io as sio
# First, you need to make your own training dataset ----
GF2_train_from_single_pic160 = sio.loadmat('E:\datasets\\4_GF1_GF2\\GF2\\crop_xj_smooth_down\\mat160_to_train_set\\GF2_train_from_single_pic_no_down_no_normal.mat')
train_original_used_MS = GF2_train_from_single_pic160['ref']
train_downgrade_PAN = GF2_train_from_single_pic160['pan']
train_downgrade_MS = GF2_train_from_single_pic160['ms']
#%%
from fusion_net import vp_net, compute_cost
import tensorflow as tf
import skimage.measure
import time
nrtrain = 16000
EpochNum = 55
batch_size = 40
PhaseNumber = 7
learning_rate = 0.0001
tf.reset_default_graph()
X_output = tf.placeholder(tf.float32, shape=(batch_size, 64, 64, 4))
P_input = tf.placeholder(tf.float32, shape=(batch_size, 64, 64, 1))
M_input = tf.placeholder(tf.float32, shape=(batch_size, 16, 16, 4))
PredX , ListX ,Q = vp_net( PhaseNumber, M_input, P_input, X_output )
cost , cost_sym = compute_cost ( PredX, ListX ,P_input ,X_output, PhaseNumber)
cost_all = 10*cost + 0.1*cost_sym
optm_all = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_all)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
tf.ConfigProto().gpu_options.allow_growth = True
time_start = time.time()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
print('#'*60," Strart Training... ")
model_dir = 'VP_Net_Phase%d_epoch%d_GF2_Model' % (PhaseNumber,EpochNum)
output_file_name = "./train_log/%s_log.txt" % (model_dir)
lk_output_file_name = "./train_log/batch_%s_log.txt" % (model_dir)
for epoch_i in range(0, EpochNum):
print('##############')
print('Training with %d epoch, learning rate =%.5f'%(epoch_i+1, learning_rate))
Training_Loss = 0
randidx_all = np.random.permutation(nrtrain)
for batch_i in range(nrtrain // batch_size):
randidx = randidx_all[batch_i*batch_size:(batch_i+1)*batch_size]
batch_X = train_original_used_MS[randidx, :, :, :]
batch_P = train_downgrade_PAN [randidx, :, :, :]
batch_M = train_downgrade_MS [randidx, :, :, :]
feed_dict = { M_input: batch_M, P_input: batch_P, X_output: batch_X }
_ , cost_all_value = sess.run([optm_all, cost_all], feed_dict = feed_dict)
Training_Loss += cost_all_value
# visual output
_ ,ifshow = divmod(batch_i+1,300)
if ifshow ==1:
P_PredX , P_ListX = sess.run([PredX[-1], ListX[-1]],feed_dict={M_input: batch_M, P_input: batch_P} )
print('PredX[-1].shape, ListX[-1].shape', P_PredX.shape, P_ListX.shape)
# eval this batch
psnr = skimage.metrics.peak_signal_noise_ratio(batch_X, P_PredX )
ssim = skimage.metrics.structural_similarity (batch_X, P_PredX, multichannel=True)
nrmse = skimage.metrics.normalized_root_mse (batch_X, P_PredX )
mse = skimage.metrics.mean_squared_error (batch_X, P_PredX)
CurLoss = Training_Loss/(batch_i+1)
print ('In %d epoch %d-th batch , Training_Loss =%.8f, PSNR =%.3f, SSIM =%.4f, NRMSE =%.5f\n' %(epoch_i+1, batch_i+1, CurLoss, psnr, ssim, nrmse))
write_data = 'In %d epoch i-th batch , Training_Loss =%.8f, PSNR =%.8f, SSIM =%.8f, NRMSE =%.8f\n' %(epoch_i+1, CurLoss, psnr, ssim, nrmse)
out_file = open(lk_output_file_name, 'a')
out_file.write(write_data)
out_file.close()
enter = open(lk_output_file_name, 'a')
enter.write('\n')
enter.close()
output_data = "[%02d/%02d] cost: %.4f, cost_sym: %.4f \n" % (epoch_i+1, EpochNum, sess.run(cost, feed_dict=feed_dict), sess.run(cost_sym, feed_dict=feed_dict))
print('##############')
print(output_data)
output_file = open(output_file_name, 'a')
output_file.write(output_data)
output_file.close()
# save model
# if not os.path.exists(model_dir):
# os.makedirs(model_dir)
if epoch_i <= 10:
saver.save(sess, './train_model/%s/VP_Model_%d.cpkt' % (model_dir, epoch_i), write_meta_graph=True)
else:
if epoch_i % 5 == 0:
saver.save(sess, './train_model/%s/VP_Model_%d.cpkt' % (model_dir, epoch_i), write_meta_graph=False)
print("Training Finished")
sess.close()
time_end = time.time()
time_c = time_end - time_start
| 5,434 | 37.006993 | 169 | py |
mff | mff-master/setup.py | from setuptools import find_packages, setup, Extension
import numpy
tricube_cpp_module = Extension(
'mff.interpolation.tricube_cpp._tricube',
sources=["mff/interpolation/tricube_cpp/tricube_module.c", "mff/interpolation/tricube_cpp/_tricube.c"],
include_dirs=[numpy.get_include()]
)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='mff',
version='1.1.2',
author='Claudio Zeni, Adam Fekete, Aldo Glielmo',
author_email='',
description='Gaussian process regression to extract non-parametric 2-, 3- and many-body force fields.',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kcl-tscm/mff",
packages=find_packages(),
ext_modules=[tricube_cpp_module],
package_data={
# If any package contains source code files, include them:
'': ['*.h', '*.c', '*.pyf']
},
python_requires='>=3.6',
install_requires=[
'numpy >= 1.17',
'asap3 >= 3.11',
'ase',
'theano >= 1.0.4',
'scipy'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
'License :: OSI Approved',
'Topic :: Scientific/Engineering :: Physics'
],
)
| 1,386 | 29.822222 | 107 | py |
mff | mff-master/tests/test_mff.py | import unittest
import mff
class TestMFFModels(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
if __name__ == '__main__':
unittest.main()
| 192 | 13.846154 | 46 | py |
mff | mff-master/tests/__init__.py | from tests.test_mff import TestMFFModels
| 41 | 20 | 40 | py |
mff | mff-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'MFF'
copyright = '2018, Claudio Zeni, Aldo Glielmo, Adam Fekete, Alessandro De Vita'
author = 'Claudio Zeni, Aldo Glielmo, Adam Fekete, Alessandro De Vita'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
napoleon_google_docstring = True
# napoleon_use_param = False
# napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
add_module_names = True
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
modindex_common_prefix = ['mff', 'models']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# import sphinx_rtd_theme
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'canonical_url': '',
'analytics_id': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 2,
'includehidden': True,
'titles_only': False
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/mff_logo_2.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
html_show_copyright = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MFF'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mff.tex', 'mff Documentation',
'Aldo, Claudio, Adam', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mff', 'mff Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mff', 'mff Documentation',
author, 'mff', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration ------------------------------------------------- | 6,686 | 28.588496 | 79 | py |
mff | mff-master/mff/gp.py | # -*- coding: utf-8 -*-
import logging
import numpy as np
from scipy.linalg import cho_solve, cholesky, solve_triangular
from scipy.optimize import fmin_l_bfgs_b
from mff import interpolation, kernels
logger = logging.getLogger(__name__)
class GaussianProcess(object):
""" Gaussian process class
Class of GP regression of QM energies and forces
Args:
kernel (obj): A kernel object (typically a two or three body)
noise (float): The regularising noise level (typically named \sigma_n^2)
optimizer (str): The kind of optimization of marginal likelihood (not implemented yet)
Attributes:
X_train_ (list): The configurations used for training
alpha_ (array): The coefficients obtained during training
L_ (array): The lower triangular matrix from cholesky decomposition of gram matrix
K (array): The kernel gram matrix
"""
# optimizers "fmin_l_bfgs_b"
def __init__(self, kernel=None, noise=1e-10,
optimizer=None, n_restarts_optimizer=0):
self.kernel = kernel
self.noise = noise
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.fitted = [None, None]
def calc_gram_ff(self, X):
"""Calculate the force-force kernel gram matrix
Args:
X (list): list of N training configurations, which are M x 5 matrices
Returns:
K (matrix): The force-force gram matrix, has dimensions 3N x 3N
"""
self.kernel_ = self.kernel
self.X_train_ = X
K = self.kernel_.calc_gram(self.X_train_, self.ncores)
return K
def calc_gram_ee(self, X):
"""Calculate the force-force kernel gram matrix
Args:
X (list): list of N training configurations, which are M x 5 matrices
Returns:
K (matrix): The energy energy gram matrix, has dimensions N x N
"""
self.kernel_ = self.kernel
self.X_train_ = X
K = self.kernel_.calc_gram_e(self.X_train_, self.ncores)
return K
def fit(self, X, y, ncores=1):
"""Fit a Gaussian process regression model on training forces
Args:
X (list): training configurations
y (np.ndarray): training forces
ncores (int): number of CPU workers to use, default is 1
"""
self.kernel_ = self.kernel
self.X_train_ = X
self.y_train_ = np.reshape(y, (y.shape[0] * 3, 1))
# if self.optimizer is not None:
# # Choose hyperparameters based on maximizing the log-marginal
# # likelihood (potentially starting from several initial values)
# def obj_func(theta, eval_gradient=True):
# if eval_gradient:
# lml, grad = self.log_marginal_likelihood(
# theta, eval_gradient=True)
# return -lml, -grad
# else:
# return -self.log_marginal_likelihood(theta)
# # First optimize starting from theta specified in kernel_
# optima = [(self._constrained_optimization(obj_func,
# self.kernel_.theta,
# self.kernel_.bounds))]
# # Additional runs are performed from log-uniform chosen initial
# # theta
# if self.n_restarts_optimizer > 0:
# if not np.isfinite(self.kernel_.bounds).all():
# raise ValueError(
# "Multiple optimizer restarts (n_restarts_optimizer>0) "
# "requires that all bounds are finite.")
# bounds = self.kernel_.bounds
# for iteration in range(self.n_restarts_optimizer):
# theta_initial = \
# self._rng.uniform(bounds[:, 0], bounds[:, 1])
# optima.append(
# self._constrained_optimization(obj_func, theta_initial,
# bounds))
# # Select result from run with minimal (negative) log-marginal
# # likelihood
# lml_values = list(map(itemgetter(1), optima))
# self.kernel_.theta = optima[np.argmin(lml_values)][0]
# self.log_marginal_likelihood_value_ = -np.min(lml_values)
# else:
# self.log_marginal_likelihood_value_ = \
# self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_.calc_gram(self.X_train_, ncores)
K[np.diag_indices_from(K)] += self.noise
try: # Use Cholesky decomposition to build the lower triangular matrix
self.L_ = cholesky(K, lower=True)
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'noise' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
# Calculate the alpha weights using the Cholesky method
self.alpha_ = cho_solve((self.L_, True), self.y_train_)
self.K = K
self.energy_alpha_ = None
self.energy_K = None
self.X_glob_train_ = None
self.fitted[0] = 'force'
self.n_train = len(self.y_train_) // 3
return self
def fit_force_and_energy(self, X, y_force, X_glob, y_energy, ncores=1):
"""Fit a Gaussian process regression model using forces and energies
Args:
X (list of arrays): training configurations
y_force (np.ndarray): training forces
X_glob (list of lists of arrays): list of grouped training configurations
y_energy (np.ndarray): training total energies
ncores (int): number of CPU workers to use, default is 1
"""
self.kernel_ = self.kernel
self.X_train_ = X
self.X_glob_train_ = X_glob
self.y_train_ = np.reshape(y_force, (y_force.shape[0] * 3, 1))
self.y_train_energy_ = np.reshape(y_energy, (y_energy.shape[0], 1))
if self.optimizer is not None:
logger.warning(
"Optimizer not yet implemented for force-energy training")
'''
# TODO
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
'''
else:
pass
'''
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)'''
# Precompute quantities required for predictions which are independent
# of actual query points
K_ff = self.kernel_.calc_gram(self.X_train_, ncores)
K_ff[np.diag_indices_from(K_ff)] += self.noise
K_ee = self.kernel_.calc_gram_e(self.X_glob_train_, ncores)
K_ee[np.diag_indices_from(K_ee)] += self.noise
K_ef = self.kernel_.calc_gram_ef(
self.X_train_, self.X_glob_train_, ncores)
K = np.zeros((y_force.shape[0] * 3 + y_energy.shape[0],
y_force.shape[0] * 3 + y_energy.shape[0]))
K[:y_energy.shape[0], :y_energy.shape[0]] = K_ee
K[:y_energy.shape[0], y_energy.shape[0]:] = K_ef
K[y_energy.shape[0]:, :y_energy.shape[0]] = K_ef.T
K[y_energy.shape[0]:, y_energy.shape[0]:] = K_ff
try: # Use Cholesky decomposition to build the lower triangular matrix
self.L_ = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'noise' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
self.y_energy_and_force = np.vstack(
(self.y_train_energy_, self.y_train_))
# Calculate the alpha weights using the Cholesky method
self.alpha_ = cho_solve((self.L_, True), self.y_energy_and_force)
self.energy_alpha_ = None # Used to distinguish pure energy fitting
self.K = K
self.energy_K = None # Used to distinguish pure energy fitting
self.fitted = ['force', 'energy']
self.n_train = len(self.X_train_) + len(self.X_glob_train_)
return self
# Untested, log_marginal_linkelihood not working as for now
def fit_energy(self, X_glob, y, ncores=1):
"""Fit a Gaussian process regression model using local energies.
Args:
X_glob (list of lists of arrays): list of grouped training configurations
y (np.ndarray): training total energies
ncores (int): number of CPU workers to use, default is 1
"""
self.kernel_ = self.kernel
self.X_glob_train_ = X_glob
self.y_train_energy_ = np.reshape(y, (y.shape[0], 1))
if self.optimizer is not None: # TODO Debug
logger.warning("Optimizer not yet implemented for energy training")
'''
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [(self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds))]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
'''
else:
pass
'''
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
'''
# Precompute quantities required for predictions which are independent
# of actual query points
self.energy_K = self.kernel_.calc_gram_e(self.X_glob_train_, ncores)
self.energy_K[np.diag_indices_from(self.energy_K)] += self.noise
try: # Use Cholesky decomposition to build the lower triangular matrix
self.L_ = cholesky(self.energy_K, lower=True)
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'noise' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
# Calculate the alpha weights using the Cholesky method
self.energy_alpha_ = cho_solve((self.L_, True), self.y_train_energy_)
self.K = None
self.alpha_ = None
self.fitted[1] = 'energy'
self.n_train = len(self.y_train_energy_)
self.X_train_ = None
return self
def predict(self, X, return_std=False, ncores=1):
"""Predict forces using the Gaussian process regression model
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True)
Args:
X (np.ndarray): Target configuration where the GP is evaluated
return_std (bool): If True, the standard-deviation of the
predictive distribution of the target configurations is
returned along with the mean.
Returns:
y_mean (np.ndarray): Mean of predictive distribution at target configurations.
y_std (np.ndarray): Standard deviation of predictive distribution at target
configurations. Only returned when return_std is True.
"""
# Unfitted; predict based on GP prior
if not hasattr(self, "X_glob_train_") and not hasattr(self, "X_train_"):
kernel = self.kernel
y_mean = np.zeros(X.shape[0])
logger.warning("No training data, predicting based on prior")
if return_std:
y_var = kernel.calc_diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
if self.fitted == ['force', None]: # Predict using force data
K_trans = self.kernel_.calc(X, self.X_train_, ncores)
y_mean = K_trans.dot(self.alpha_[:, 0])
elif self.fitted == [None, 'energy']: # Predict using energy data
K_force_energy = self.kernel_.calc_ef(
self.X_glob_train_, X, ncores).T
y_mean = K_force_energy.dot(self.energy_alpha_[:, 0])
else: # Predict using both force and energy data
K_trans = self.kernel_.calc(X, self.X_train_, ncores)
K_force_energy = self.kernel_.calc_ef(
self.X_glob_train_, X, ncores).T
K = np.hstack((K_force_energy, K_trans))
y_mean = K.dot(self.alpha_[:, 0])
if return_std: # TODO CHECK FOR ENERGY, FORCE and FORCE +ENERGY FIT
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
y_var = self.kernel_.calc_diag(X)
fit = np.einsum("ij,ij->i", np.dot(K_trans, K_inv), K_trans)
y_var -= fit
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
logger.warning("Predicted variances smaller than 0. "
"Setting those variances to 0.")
y_var[y_var_negative] = 0.0
return np.reshape(y_mean, (int(y_mean.shape[0] / 3), 3)), np.reshape(np.sqrt(y_var),
(int(y_var.shape[0] / 3), 3))
else:
return np.reshape(y_mean, (int(y_mean.shape[0] / 3), 3))
def predict_energy(self, X, return_std=False, ncores=1, mapping=False, **kwargs):
"""Predict energies from forces only using the Gaussian process regression model
This function evaluates the GP energies for a set of test configurations.
Args:
X (np.ndarray): Target configurations where the GP is evaluated
return_std (bool): If True, the standard-deviation of the
predictive distribution of the target configurations is
returned along with the mean.
Returns:
y_mean (np.ndarray): Mean of predictive distribution at target configurations.
y_std (np.ndarray): Standard deviation of predictive distribution at target
configurations. Only returned when return_std is True.
"""
# Unfitted; predict based on GP prior
if not hasattr(self, "X_glob_train_") and not hasattr(self, "X_train_"):
kernel = self.kernel
e_mean = np.zeros(len(X))
logger.warning("No training data, predicting based on prior")
if return_std:
y_var = kernel.calc_diag_e(X)
return e_mean, np.sqrt(e_var)
else:
return e_mean
else: # Predict based on GP posterior
if self.fitted == ['force', None]: # Predict using force data
K_trans = self.kernel_.calc_ef(
X, self.X_train_, ncores, mapping, **kwargs)
# Line 4 (y_mean = f_star)
e_mean = K_trans.dot(self.alpha_[:, 0])
elif self.fitted == [None, 'energy']: # Predict using energy data
K_energy = self.kernel_.calc_ee(
X, self.X_glob_train_, ncores, mapping, **kwargs)
e_mean = K_energy.dot(self.energy_alpha_[:, 0])
else: # Predict using both force and energy data
K_energy = self.kernel_.calc_ee(
X, self.X_glob_train_, ncores, mapping, **kwargs)
K_energy_force = self.kernel_.calc_ef(
X, self.X_train_, ncores, mapping, **kwargs)
K = np.hstack((K_energy, K_energy_force))
e_mean = K.dot(self.alpha_[:, 0])
if return_std: # TODO CHECK FOR ENERGY, FORCE and FORCE +ENERGY FIT
# compute inverse K_inv of K based on its Cholesky
# decomposition L and its inverse L_inv
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
K_inv = L_inv.dot(L_inv.T)
# Compute variance of predictive distribution
if self.fitted == ['force', None]: # Predict using force data
e_var = self.kernel_.calc_diag_e(X)
fit = np.einsum(
"ij,ij->i", np.dot(K_trans, K_inv), K_trans)
e_var -= fit
elif self.fitted == [None, 'energy']: # Predict using force data
e_var = self.kernel_.calc_diag_e(X)
fit = np.einsum(
"ij,ij->i", np.dot(K_energy, K_inv), K_energy)
e_var -= fit
else: # Predict using force data
e_var = self.kernel_.calc_diag_e(X)
fit = np.einsum("ij,ij->i", np.dot(K, K_inv), K)
e_var -= fit
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
e_var_negative = e_var < 0
if np.any(e_var_negative):
logger.warning("Predicted variances smaller than 0. "
"Setting those variances to 0.")
e_var[e_var_negative] = 0.0
return e_mean, np.sqrt(e_var)
else:
return e_mean
# TODO: debug for energy and energy-force fitting
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Args:
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns:
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
# kernel = self.kernel_.clone_with_theta(theta)
kernel = self.kernel
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel.calc_gram(self.X_train_, eval_gradient=True)
else:
K = kernel.calc_gram(self.X_train_)
K[np.diag_indices_from(K)] += self.noise
try:
L = cholesky(K, lower=True) # Line 2
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) \
if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alpha = cho_solve((L, True), y_train) # Line 3
# Compute log-likelihood (compare line 7)
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions
if eval_gradient: # compare Equation 5.9 from GPML
tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension
tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis]
# Compute "0.5 * trace(tmp.dot(K_gradient))" without
# constructing the full matrix tmp.dot(K_gradient) since only
# its diagonal is required
log_likelihood_gradient_dims = \
0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient)
log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta,
bounds): # TODO: debug for energy and energy-force fitting
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
logger.warning("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
# TODO: debug for energy and energy-force fitting
def pseudo_log_likelihood(self):
"""Returns pseudo log-likelihood of the training data.
Args:
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns:
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
# kernel = self.kernel_.clone_with_theta(theta)
kernel = self.kernel
# kernel.theta = theta
K = kernel.calc_gram(self.X_train_)
K[np.diag_indices_from(K)] += self.noise
Km1 = np.linalg.inv(K)
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
alphas = np.dot(Km1, y_train)
pred_means = y_train - alphas / np.diag(Km1)
pred_variances = 1. / np.diag(Km1)
# Compute pseudo log-likelihood (compare Equation 5.10-5.12 of GPML)
log_probabilities = - (pred_means - y_train) ** 2 / (2 * pred_variances) - 0.5 * np.log(
pred_variances) - 0.5 * np.log(2 * np.pi)
pseudo_log_likelihood = np.sum(log_probabilities)
return pseudo_log_likelihood
def save(self, filename):
"""Dump the current GP model for later use
Args:
filename (str): name of the file where to save the GP
"""
output = [self.kernel_.kernel_name,
self.noise,
self.optimizer,
self.n_restarts_optimizer,
self.fitted,
self.alpha_,
self.K,
self.energy_alpha_,
self.energy_K,
self.X_train_,
self.X_glob_train_,
self.L_,
self.n_train]
np.save(filename, output)
def load(self, filename):
"""Load a saved GP model
Args:
filename (str): name of the file where the GP is saved
"""
self.kernel.kernel_name, \
self.noise, \
self.optimizer, \
self.n_restarts_optimizer, \
self.fitted, \
self.alpha_, \
self.K, \
self.energy_alpha_, \
self.energy_K, \
self.X_train_, \
self.X_glob_train_, \
self.L_, \
self.n_train = np.load(filename, allow_pickle=True)
self.kernel_ = self.kernel
print('Loaded GP from file')
class TwoBodySingleSpeciesGP(GaussianProcess):
def __init__(self, theta, noise=1e-10, optimizer=None, n_restarts_optimizer=0):
kernel = kernels.TwoBodySingleSpeciesKernel(theta=theta)
super().__init__(
kernel=kernel, noise=noise, optimizer=optimizer, n_restarts_optimizer=n_restarts_optimizer)
def build_grid(self, dists, element1):
num = len(dists)
confs = np.zeros((num, 1, 5))
confs[:, 0, 0] = dists
confs[:, 0, 3], confs[:, 0, 4] = element1, element1
grid_2b = self.predict_energy(confs)
return interpolation.Spline1D(dists, grid_2b)
class ThreeBodySingleSpeciesGP(GaussianProcess):
def __init__(self, theta, noise=1e-10, optimizer=None, n_restarts_optimizer=0):
kernel = kernels.ThreeBodySingleSpeciesKernel(theta=theta)
super().__init__(
kernel=kernel, noise=noise, optimizer=optimizer, n_restarts_optimizer=n_restarts_optimizer)
def build_grid(self, dists, element1):
"""Function that builds and predicts energies on a cube of values"""
num = len(dists)
inds, r_ij_x, r_ki_x, r_ki_y = self.generate_triplets(dists)
confs = np.zeros((len(r_ij_x), 2, 5))
confs[:, 0, 0] = r_ij_x # Element on the x axis
confs[:, 1, 0] = r_ki_x # Reshape into confs shape: this is x2
confs[:, 1, 1] = r_ki_y # Reshape into confs shape: this is y2
# Permutations of elements
confs[:, :, 3] = element1 # Central element is always element 1
confs[:, 0, 4] = element1 # Element on the x axis is always element 2
# Element on the xy plane is always element 3
confs[:, 1, 4] = element1
grid_3b = np.zeros((num, num, num))
grid_3b[inds] = self.predict_energy(confs).flatten()
for ind_i in range(num):
for ind_j in range(ind_i + 1):
for ind_k in range(ind_j + 1):
grid_3b[ind_i, ind_k, ind_j] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_j, ind_i, ind_k] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_j, ind_k, ind_i] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_k, ind_i, ind_j] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_k, ind_j, ind_i] = grid_3b[ind_i, ind_j, ind_k]
return interpolation.Spline3D(dists, dists, dists, grid_3b)
@staticmethod
def generate_triplets(dists):
d_ij, d_jk, d_ki = np.meshgrid(
dists, dists, dists, indexing='ij', sparse=False, copy=True)
# Valid triangles according to triangle inequality
inds = np.logical_and(
d_ij <= d_jk + d_ki, np.logical_and(d_jk <= d_ki + d_ij, d_ki <= d_ij + d_jk))
# Utilizing permutation invariance
inds = np.logical_and(np.logical_and(d_ij >= d_jk, d_jk >= d_ki), inds)
# Element on the x axis
r_ij_x = d_ij[inds]
# Element on the xy plane
r_ki_x = (d_ij[inds] ** 2 - d_jk[inds] ** 2 +
d_ki[inds] ** 2) / (2 * d_ij[inds])
# using abs to avoid numerical error near to 0
r_ki_y = np.sqrt(np.abs(d_ki[inds] ** 2 - r_ki_x ** 2))
return inds, r_ij_x, r_ki_x, r_ki_y
| 32,047 | 40.89281 | 114 | py |
mff | mff-master/mff/test.py | import os
from os import listdir
from os.path import isfile, join
import logging
import numpy as np
from ase import Atoms
import mff
from mff import models, calculators, utility
from mff import configurations as cfg
def get_potential(confs):
pot = 0
for conf in confs:
el1 = conf[:, 3]
el2 = conf[:, 4]
dist = np.sum(conf[:, :3]**2, axis=1)**0.5
pot += np.sum(el1**0.5*el2**0.5*pot_profile(dist))
return pot
def pot_profile(dist):
return ((dist-1)**2 - 0.5)*np.exp(-dist)
def force_profile(dist):
a = (dist-1)**2 - 0.5
da = 2*(dist-1)
b = np.exp(-dist)
db = -np.exp(-dist)
return a*db+b*da
def get_potentials(many_confs):
pots = np.zeros(len(many_confs))
for i, confs in enumerate(many_confs):
pots[i] = get_potential(confs)
return pots
def get_force(conf):
el1 = conf[:, 3]
el2 = conf[:, 4]
dist = np.sum(conf[:, :3]**2, axis=1)**0.5
vers = conf[:, :3]/dist[:, None]
force = np.sum(vers * (el1[:, None]**0.5*el2[:, None]
** 0.5*(force_profile(dist[:, None]))), axis=0)
return force
def get_forces(many_confs):
forces = np.zeros((len(many_confs), 3))
for i, confs in enumerate(many_confs):
forces[i] = get_force(confs)
return forces
def generate_confs(n, elements, r_cut):
phi = np.random.uniform(0, 2*np.pi, size=n*2)
costheta = np.random.uniform(-1, 1, size=n*2)
u = np.random.uniform(0, 1, size=n*2)
theta = np.arccos(costheta)
r = r_cut * u**(1/3)
x = r * np.sin(theta) * np.cos(phi)
y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(theta)
xyz = np.vstack((x, y, z)).T
glob_confs = []
loc_confs = []
for i in range(n):
conf1 = np.zeros((2, 5))
conf2 = np.zeros((2, 5))
conf3 = np.zeros((2, 5))
conf1[0, :3] = xyz[2*i]
conf1[1, :3] = xyz[2*i+1]
conf2[0, :3] = -xyz[2*i]
conf2[1, :3] = xyz[2*i+1] - xyz[2*i]
conf3[0, :3] = xyz[2*i] - xyz[2*i+1]
conf3[1, :3] = -xyz[2*i+1]
if len(elements) == 1:
conf1[:, 3] = elements
conf1[:, 4] = elements
conf2[:, 3] = elements
conf2[:, 4] = elements
conf3[:, 3] = elements
conf3[:, 4] = elements
elif len(elements) >= 2:
a, b, c = np.random.choice(elements), np.random.choice(
elements), np.random.choice(elements)
conf1[:, 3] = a
conf1[0, 4] = b
conf1[1, 4] = c
conf2[:, 3] = b
conf2[0, 4] = a
conf2[1, 4] = c
conf3[:, 3] = c
conf3[0, 4] = b
conf3[1, 4] = a
this_conf = np.array([conf1, conf2, conf3])
glob_confs.append(this_conf)
loc_confs.append(conf1)
loc_confs.append(conf2)
loc_confs.append(conf3)
loc_confs = np.array(loc_confs)
glob_confs = np.array(glob_confs)
return (glob_confs, loc_confs)
def fit_test(m, loc_confs, forces, glob_confs, energies, ntr, ntest, elements, fit_type, r_cut, ncores = 1):
if fit_type == 'force':
m.fit(loc_confs[:ntr], forces[:ntr], ncores=ncores)
elif fit_type == 'energy':
m.fit_energy(glob_confs[:ntr], energies[:ntr], ncores=ncores)
elif fit_type == 'force_and_energy':
m.fit_force_and_energy(
loc_confs[:ntr], forces[:ntr], glob_confs[:ntr], energies[:ntr], ncores=ncores)
pred_forces = m.predict(loc_confs[-ntest:], ncores=ncores)
pred_energies = m.predict_energy(glob_confs[-ntest:], ncores=ncores)
# print("MAEF: %.4f eV/A " %(np.mean(np.sum(forces[-ntest:] - pred_forces, axis = 1)**2)**0.5))
# print("MAEE: %.4f eV" %( np.mean(abs(energies[-ntest:] - pred_energies))))
mtype = str(type(m)).split('.')[-1].split("'")[0]
if mtype == "TwoBodySingleSpeciesModel" or mtype == "ThreeBodySingleSpeciesModel" or mtype == "TwoBodyManySpeciesModel" or mtype == "ThreeBodyManySpeciesModel":
m.build_grid(0.0, 5, ncores=2)
if mtype == "TwoBodySingleSpeciesModel":
calc = calculators.TwoBodySingleSpecies(r_cut*2, m.grid)
elif mtype == "ThreeBodySingleSpeciesModel":
calc = calculators.ThreeBodySingleSpecies(r_cut*2, m.grid)
elif mtype == "TwoBodyManySpeciesModel":
calc = calculators.TwoBodyManySpecies(r_cut*2, elements, m.grid)
elif mtype == "ThreeBodyManySpeciesModel":
calc = calculators.ThreeBodyManySpecies(r_cut*2, elements, m.grid)
elif mtype == "CombinedSingleSpeciesModel" or mtype == "CombinedManySpeciesModel":
m.build_grid(0.0, 5, 5, ncores=2)
if mtype == "CombinedSingleSpeciesModel":
calc = calculators.CombinedSingleSpecies(
r_cut*2, m.grid_2b, m.grid_3b)
elif mtype == "CombinedManySpeciesModel":
calc = calculators.CombinedManySpecies(
r_cut*2, elements, m.grid_2b, m.grid_3b)
elif mtype == "EamSingleSpeciesModel" or mtype == "EamManySpeciesModel":
m.build_grid(5, ncores=2)
if mtype == "EamSingleSpeciesModel":
calc = calculators.EamSingleSpecies(
r_cut*2, m.grid, m.gp.kernel.theta[2], m.gp.kernel.theta[3])
elif mtype == "EamManySpeciesModel":
calc = calculators.EamManySpecies(
r_cut*2, elements, m.grid, m.gp.kernel.theta[2], m.gp.kernel.theta[3])
elif mtype == "TwoThreeEamSingleSpeciesModel" or mtype == "TwoThreeEamManySpeciesModel":
m.build_grid(0, 5, 5, 5, ncores=2)
if mtype == "TwoThreeEamSingleSpeciesModel":
calc = calculators.TwoThreeEamSingleSpecies(
r_cut*2, m.grid_2b, m.grid_3b, m.grid_eam, m.gp_eam.kernel.theta[2], m.gp_eam.kernel.theta[3])
elif mtype == "TwoThreeEamManySpeciesModel":
calc = calculators.TwoThreeEamManySpecies(
r_cut*2, elements, m.grid_2b, m.grid_3b, m.grid_eam, m.gp_eam.kernel.theta[2], m.gp_eam.kernel.theta[3])
map_forces = np.zeros((len(pred_forces), 3))
map_energies = np.zeros_like(pred_energies)
for i in np.arange(ntest):
coords = np.vstack(([0, 0, 0], glob_confs[-ntest:][i][0, 0:3, 0:3]))
atoms = Atoms(positions=coords + 20)
atoms.set_atomic_numbers([glob_confs[-ntest:][i][0, 0, 3],
glob_confs[-ntest:][i][0, 0, 4], glob_confs[-ntest:][i][0, 1, 4]])
atoms.set_cell([100, 100, 100])
atoms.set_calculator(calc)
map_energies[i] = atoms.get_potential_energy()
for i in np.arange(ntest):
coords = np.vstack(([0, 0, 0], loc_confs[-ntest:][i][0:3, 0:3]))
atoms = Atoms(positions=coords + 20)
atoms.set_atomic_numbers(
[loc_confs[-ntest:][i][0, 3], loc_confs[-ntest:][i][0, 4], loc_confs[-ntest:][i][1, 4]])
atoms.set_cell([100, 100, 100])
atoms.set_calculator(calc)
map_forces[i] = atoms.get_forces()[0, :]
error_f = np.sum((pred_forces - map_forces)**2, axis=1)**0.5
error_e = pred_energies - map_energies
# print("Force Error: %.4f eV/A Energy Error: %.4f eV " %(np.mean(error_f), np.mean(error_e)))
m.save('MODELS/')
class Tests():
def __init__(self, elements, noise, sigma, r_cut, theta, ntr_f,
ntr_e, ntest, alpha, r0, ncores):
self.elements = elements
self.noise = noise
self.sigma = sigma
self.r_cut = r_cut
self.theta = theta
self.ntr_f = ntr_f
self.ntr_e = ntr_e
self.ntest = ntest
self.alpha = alpha
self.r0 = r0
self.ncores = ncores
self.glob_confs, self.loc_confs = generate_confs(self.ntr_f+self.ntr_e+self.ntest,
self.elements, self.r_cut)
self.forces = get_forces(self.loc_confs)
self.energies = get_potentials(self.glob_confs)
def test_2_body_single(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.TwoBodySingleSpeciesModel(
element=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2,
theta=self.theta, rep_sig=0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 2-body Single %s fit" % (fit_type))
def test_3_body_single(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.ThreeBodySingleSpeciesModel(
element=self.elements, noise=self.noise, sigma=self.sigma,
r_cut=self.r_cut*2, theta=self.theta)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 3-body Single %s fit" % (fit_type))
def test_combined_body_single(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.CombinedSingleSpeciesModel(
element=self.elements, noise=self.noise, sigma_2b=self.sigma, sigma_3b=self.sigma, r_cut=self.r_cut*2, theta_2b=self.theta, theta_3b=self.theta, rep_sig=0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in combined Single %s fit" % (fit_type))
def test_eam_single(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.EamSingleSpeciesModel(
element=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, alpha=self.alpha, r0=self.r0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in Eam Single %s fit" % (fit_type))
def test_23eam_single(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.TwoThreeEamSingleSpeciesModel(
self.elements, self.r_cut*2, self.sigma, self.sigma, self.sigma, self.theta, self.theta, self.alpha, self.r0, self.noise, 0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 23 Eam Single %s fit" % (fit_type))
def test_2_body_many(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.TwoBodyManySpeciesModel(
elements=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, theta=self.theta, rep_sig=0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 2-body Many %s fit" % (fit_type))
def test_3_body_many(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.ThreeBodyManySpeciesModel(
elements=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, theta=self.theta)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 3-body Many %s fit" % (fit_type))
def test_combined_body_many(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.CombinedManySpeciesModel(elements=self.elements, noise=self.noise, sigma_2b=self.sigma,
sigma_3b=self.sigma, r_cut=self.r_cut*2, theta_2b=self.theta, theta_3b=self.theta, rep_sig=0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in combined Many %s fit" % (fit_type))
def test_eam_many(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.EamManySpeciesModel(
elements=self.elements, noise=self.noise, sigma=self.sigma, r_cut=self.r_cut*2, alpha=self.alpha, r0=self.r0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in eam Many %s fit" % (fit_type))
def test_23eam_many(self):
for fit_type in ("force", "energy", "force_and_energy"):
m = models.TwoThreeEamManySpeciesModel(
self.elements, self.r_cut*2, self.sigma, self.sigma, self.sigma, self.theta, self.theta, self.alpha, self.r0, self.noise, 0)
try:
fit_test(m, self.loc_confs, self.forces, self.glob_confs,
self.energies, self.ntr_f, self.ntest, self.elements, fit_type, self.r_cut, self.ncores)
except:
print("ERROR in 23 eam Many %s fit" % (fit_type))
def test_load(self):
onlyfiles = [f for f in listdir("MODELS") if isfile(join("MODELS", f))]
for file in onlyfiles:
if file.endswith(".json"):
try:
m2 = utility.load_model("MODELS/" + file)
except:
print("ERROR: %s not loaded" % (file))
if __name__ == '__main__':
# GP Parameters
sigma = 1.0 # Angstrom - typical value 0.2-0.6
noise = .001 # Number - Typical values 0.01 - 0.0001
theta = 0.1 # Cutoff decay lengthscale in Angstrom - Typical value r_cut/5 - r_cut/10
r_cut = 3.0
ntr_f = 10
ntr_e = 10
ntest = 10
elements = [1]
ncores = 2
alpha = 1
r0 = 10
test = Tests(elements, noise, sigma, r_cut, theta, ntr_f,
ntr_e, ntest, alpha, r0, ncores)
test.test_2_body_single()
test.test_3_body_single()
test.test_combined_body_single()
test.test_eam_single()
test.test_23eam_single()
test.test_2_body_many()
test.test_3_body_many()
test.test_combined_body_many()
test.test_eam_many()
test.test_23eam_many()
test.test_load() | 14,943 | 40.281768 | 171 | py |
mff | mff-master/mff/utility.py | import json
import os
import sys
import time
from itertools import combinations_with_replacement
from pathlib import Path
import numpy as np
from scipy.spatial.distance import cdist
from asap3.analysis import FullCNA
from ase.io import read
from mff import configurations, models
from mff.gp import GaussianProcess
# Keep track on whether there are actual energies in your dataset or they have been discarded
global energydefault
energydefault = False
def find_repulstion_sigma(confs):
""" Function used to find the repulsion parameter
rep_sig such that the energy of a bond at distance r is 0.02 eV.
The distance r is the smallest bond distance found in the training set.
"""
dists = []
for c in confs:
if len(c.shape) == 2:
d_ = np.sum(c[:, :3]**2, axis=1)**0.5
dists.extend(d_)
else:
for c1 in c:
d_ = np.sum(c1[:, :3]**2, axis=1)**0.5
dists.extend(d_)
r = min(dists)
rep_sig = r*0.02**(1/12)
return rep_sig
def get_repulsive_forces(confs, sig):
""" Function used to get repulsive forces for a configuration
given a sigma value. The repuslion is a LJ repulsion.
"""
forces = np.zeros((len(confs), 3))
for i, c in enumerate(confs):
d_ = np.sum(c[:, :3]**2, axis=1)**0.5
v = c[:, :3]/d_[:, None]
f = 12*(sig/d_)**12/d_
forces[i] = np.sum(f[:, None]*v, axis=0)
return forces
def get_repulsive_energies(confs, sig, mapping=False):
""" Function used to get repulsive energy for a configuration
given a sigma value. The repuslion is a LJ repulsion.
"""
energies = np.zeros(len(confs))
if not mapping:
for i, c in enumerate(confs):
for c1 in c:
d_ = np.sum(c1[:, :3]**2, axis=1)**0.5
energies[i] += np.sum((sig/d_)**12)
else:
for i, c1 in enumerate(confs):
d_ = np.sum(c1[:, :3]**2, axis=1)**0.5
energies[i] += np.sum((sig/d_)**12)
return energies
def open_data(folder, cutoff):
""" Open already extracted conf, force and energy data
"""
elements, confs, forces, energies, global_confs = configurations.load_and_unpack(
folder, cutoff)
print("Opened data from %s" % (folder))
return elements, confs, forces, energies, global_confs
def extract_data(folder, cutoff, filename=None):
""" Extract training points from an .xyz trajectory or an .out file
"""
file_path = Path(folder + '/' + filename)
data = configurations.generate_and_save(
file_path, cutoff, forces_label='forces', energy_label='energy')
elements, confs, forces, energies, global_confs = configurations.unpack(
data)
return elements, confs, forces, energies, global_confs
def get_data(folder, cutoff, filename=None):
""" Retrieve the data either from a set of .npy objects or from an .xyz trajectory
"""
try:
elements, confs, forces, energies, global_confs = open_data(
folder, cutoff)
print("Loaded data from %s" % (folder))
except FileNotFoundError:
try:
elements, confs, forces, energies, global_confs = extract_data(
folder, cutoff, filename)
print("Extracted data from %s" % (filename))
except FileNotFoundError:
sys.exit(
"I did not found either the conf, force and energy files, or the movie.xyz file. Quitting now.")
return elements, confs, forces, energies, global_confs
def get_manyfolders(folders, cutoff, train_filenames):
""" Retrieve the data for many folders and join everything
"""
confs, forces, energies, elements = [], [], [], []
for f, name in zip(folders, train_filenames):
a, b, c, d = get_data(f, cutoff, name)
confs.extend(a)
forces.extend(b)
energies.extend(c)
elements.extend(d)
forces = np.asarray(forces)
energies = np.asarray(energies)
elements = list(np.unique(np.ravel(elements)))
return confs, forces, energies, elements
def grid_2b_onesp(X, nbins, cutoff):
""" Grid sampling for 2-body descriptor and system with one element
"""
stored_histogram = np.zeros(nbins)
index = []
ind = np.arange(len(X))
randomarange = np.random.choice(ind, size=len(X), replace=False)
for j in randomarange: # for every snapshot of the trajectory file
distances = np.sqrt(np.einsum('id -> i', np.square(X[j][:, :3])))
distances[np.where(distances > cutoff)] = None
this_snapshot_histogram = np.histogram(distances, nbins, (0.0, cutoff))
if (stored_histogram - this_snapshot_histogram[0] < 0).any():
index.append(j)
stored_histogram += this_snapshot_histogram[0]
return index
def grid_2b_manysp(X, nbins, cutoff, elements):
""" Grid sampling for 2-body descriptor and system with two elements
"""
element_pairs = list(combinations_with_replacement(elements, 2))
stored_histogram = np.zeros((nbins, len(element_pairs)))
index = []
ind = np.arange(len(X))
randomarange = np.random.choice(ind, size=len(X), replace=False)
for j in randomarange: # for every snapshot of the trajectory file
distances = np.sqrt(np.einsum('id -> i', np.square(X[j][:, :3])))
distances[np.where(distances > cutoff)] = None
for k in range(len(element_pairs)):
if k == 1: # In the case of two different elements, we have to account for permutation invariance
this_element_pair = np.union1d(
np.intersect1d(np.where(X[j][:, 3] == element_pairs[k][0]),
np.where(X[j][:, 4] == element_pairs[k][1])),
np.intersect1d(np.where(X[j][:, 3] == element_pairs[k][1]),
np.where(X[j][:, 4] == element_pairs[k][0])))
else:
this_element_pair = np.intersect1d(
np.where(X[j][:, 3] == element_pairs[k][0]), np.where(X[j][:, 4] == element_pairs[k][1]))
distances_this = distances[this_element_pair]
this_snapshot_histogram = np.histogram(
distances_this, nbins, (0.0, cutoff))
if (stored_histogram[:, k] - this_snapshot_histogram[0] < 0).any():
index.append(j)
stored_histogram[:, k] += this_snapshot_histogram[0]
return index
def grid_3b_onesp(X, nbins, cutoff):
""" Grid sampling for 3-body descriptor and system with one element
"""
stored_histogram = np.zeros((nbins, nbins, nbins))
index = []
ind = np.arange(len(X))
randomarange = np.random.choice(ind, size=len(X), replace=False)
for j in randomarange: # for every snapshot of the trajectory file
atoms = np.vstack(([0., 0., 0.], X[j][:, :3]))
distances = cdist(atoms, atoms)
distances[np.where(distances > cutoff)] = None
distances[np.where(distances == 0)] = None
triplets = []
for k in np.argwhere(distances[:, 0] > 0):
for l in np.argwhere(distances[0, :] > 0):
if distances[k, l] > 0:
triplets.append(
[distances[0, k], distances[0, l], distances[k, l]])
triplets.append(
[distances[0, l], distances[k, l], distances[0, k]])
triplets.append(
[distances[k, l], distances[0, k], distances[0, l]])
triplets = np.reshape(triplets, (len(triplets), 3))
this_snapshot_histogram = np.histogramdd(triplets, bins=(nbins, nbins, nbins),
range=((0.0, cutoff), (0.0, cutoff), (0.0, cutoff)))
if (stored_histogram - this_snapshot_histogram[0] < 0).any():
index.append(j)
stored_histogram += this_snapshot_histogram[0]
return index
def grid_3b_manysp(X, nbins, cutoff, elements):
""" Grid sampling for 3-body descriptor and system with two elements
"""
possible_triplets = list(combinations_with_replacement(elements, 3))
stored_histogram = np.zeros((nbins, nbins, nbins, len(possible_triplets)))
index = []
ind = np.arange(len(X))
randomarange = np.random.choice(ind, size=len(X), replace=False)
for j in randomarange: # for every snapshot of the trajectory file
atoms = np.vstack(([0., 0., 0.], X[j][:, :3]))
distances = cdist(atoms, atoms)
distances[np.where(distances > cutoff)] = None
distances[np.where(distances == 0)] = None
triplets = []
elements_triplets = []
for k in np.argwhere(distances[:, 0] > 0):
for l in np.argwhere(distances[0, :] > 0):
if distances[k, l] > 0:
elements_triplets.append(
np.sort([X[j][0, 3], X[j][k-1, 4], X[j][l-1, 4]]))
triplets.append(
[distances[0, k], distances[0, l], distances[k, l]])
triplets.append(
[distances[0, l], distances[k, l], distances[0, k]])
triplets.append(
[distances[k, l], distances[0, k], distances[0, l]])
elements_triplets = np.reshape(
elements_triplets, (len(elements_triplets), 3))
triplets = np.reshape(triplets, (len(triplets), 3))
this_snapshot_histogram = np.histogramdd(triplets, bins=(nbins, nbins, nbins),
range=((0.0, cutoff), (0.0, cutoff), (0.0, cutoff)))
for k in np.arange(len(possible_triplets)):
valid_triplets = triplets[np.where(
elements_triplets == possible_triplets[k]), :][0]
this_snapshot_histogram = np.histogramdd(valid_triplets, bins=(nbins, nbins, nbins),
range=((0.0, cutoff), (0.0, cutoff), (0.0, cutoff)))
if (stored_histogram[:, :, :, k] - this_snapshot_histogram[0] < 0).any():
index.append(j)
stored_histogram[:, :, :, k] += this_snapshot_histogram[0]
return index
def sample_oneset(c, f, gc, en, el, method, ntr, ntest, cutoff, nbins=None, f_e_ratio=100, traj=None, cna_cut=None):
""" Get training and test set from one database with method of choice
"""
# For the forces, isolate a test set at random and then apply database selection on the remaining data
ind = np.arange(len(c))
ind_test = np.random.choice(ind, size=ntest, replace=False)
ind_train = np.array(list(set(ind) - set(ind_test)))
# For the energy we always use random sampling
ntr_e = ntr//f_e_ratio + 1
ntest_e = ntest//f_e_ratio+1
ind_e = np.random.choice(np.arange(len(gc)), ntr_e+ntest_e, replace=False)
ind_train_e = ind_e[ntr_e:]
ind_test_e = ind_e[:ntr_e]
if ((en == None).any()):
en = np.zeros(len(c))
energydefault = True
X, Y, X_e, Y_e = c[ind_train], f[ind_train], gc[ind_train_e], en[ind_train_e]
x, y, x_e, y_e = c[ind_test], f[ind_test], gc[ind_test_e], en[ind_test_e]
X, Y = get_training_set(c, f, el, ntr, method,
cutoff, nbins, traj, cna_cut)
return X, Y, X_e, Y_e, x, y, x_e, y_e
def sample_twosets(c1, f1, gc1, en1, el1, c2, f2, gc2, en2, el2, method, ntr, ntest, cutoff, nbins=None, f_e_ratio=100, traj=None, cna_cut=None):
""" Get training and test set from two databases with method of choice
"""
ind_test = np.random.choice(np.arange(len(c2)), size=ntest, replace=False)
ind_test_e = np.random.choice(
np.arange(len(gc2)), size=ntest//f_e_ratio+1, replace=False)
ind_train_e = np.random.choice(
np.arange(len(gc1)), size=ntr//f_e_ratio + 1, replace=False)
if ((en1 == None).any()) or len(c1) != len(en1):
en1 = np.zeros(len(c1))
if ((en2 == None).any()) or len(c2) != len(en2):
en2 = np.zeros(len(c2))
X_e, Y_e = gc1[ind_train_e], en1[ind_train_e]
x, y, x_e, y_e = c2[ind_test], f2[ind_test], gc2[ind_test_e], en2[ind_test_e]
X, Y = get_training_set(c1, f1, el1, ntr, method,
cutoff, nbins, traj, cna_cut)
return X, Y, X_e, Y_e, x, y, x_e, y_e
def get_right_grid(c, el, nbins, cutoff, method):
""" Function used to determine the right sampling method to call
"""
if method == "grid2":
if len(set(el)) == 1:
ind = grid_2b_onesp(c, nbins, cutoff)
elif len(set(el)) >= 2:
ind = grid_2b_manysp(c, nbins, cutoff, el)
else:
print("Error: Number of elements less than 1.")
return 0
elif method == "grid3":
if len(set(el)) == 1:
ind = grid_3b_onesp(c, nbins, cutoff)
elif len(set(el)) >= 2:
ind = grid_3b_manysp(c, nbins, cutoff, el)
else:
print("Error: Number of elements less than 1.")
return 0
return ind
def get_right_nbins(c, el, cutoff, method, ntr):
""" Estimate how many bins are necesaary to roughly obtain the desired
number of training points via linear interpolation at two estimates.
"""
if method == "grid2":
ind_1000 = get_right_grid(c, el, 1000, cutoff, method)
ind_3000 = get_right_grid(c, el, 3000, cutoff, method)
x = np.array([1000.0, 3000.0])
y = np.array([len(ind_1000), len(ind_3000)])
x = x[:, np.newaxis]
slope, _, _, _ = np.linalg.lstsq(x, y, rcond=None)
nbins = int(ntr/slope)
elif method == "grid3":
ind_10 = get_right_grid(c, el, 10, cutoff, method)
ind_20 = get_right_grid(c, el, 20, cutoff, method)
x = np.array([10**3, 20**3])
y = np.array([len(ind_10), len(ind_20)])
x = x[:, np.newaxis]
slope, _, _, _ = np.linalg.lstsq(x, y, rcond=None)
nbins = int((ntr/slope)**(1/3.0))
return nbins
def transform_cna(cna, meaningful_cnas):
""" Given a cna and a list containing the tuples defining the
meaningful cnas, this method returns an array with length equal to the
total number of cnas which contains the number of times each cna occurrs.
"""
result = np.zeros(len(meaningful_cnas))
for i, key in enumerate(meaningful_cnas):
try:
result[i] = cna[key]
except KeyError:
result[i] = 0
return result
def get_atomic_cnas(traj, meaningful_cnas, r_cut):
""" Given a trajectory, a list of tuples containing the CNAS the user is interested
in, and a cutoff radius, this returns, for every atom for every snapshot in traj,
an array which contians the count of cnas for that atom, arranged accordin to the order
found in meaningful_cnas
"""
transformed_cna = np.zeros((len(traj)*len(traj[0]), len(meaningful_cnas)))
for j, atoms in enumerate(traj):
cna = FullCNA(atoms, r_cut)
atoms.set_cell([[100, 0, 0], [0, 100, 0], [0, 0, 100]])
snapshot_cna = cna.get_normal_cna()
for i, atomic_cna in enumerate(snapshot_cna):
transformed_cna[j*len(traj[0]) +
i] = transform_cna(atomic_cna, meaningful_cnas)
return transformed_cna
def get_all_cnas(traj, cna_cut):
""" Given a trajectory and a cutoff radius, returns a dictionary, sorted by value,
with all the cnas that appear in the trajectyory as keys and the number
of times they appear as value.
"""
all_cnas = {}
for j, atoms in enumerate(traj):
cna = FullCNA(atoms, cna_cut)
atoms.set_cell([[100, 0, 0], [0, 100, 0], [0, 0, 100]])
snapshot_cna = cna.get_normal_cna()
for i, atomic_cna in enumerate(snapshot_cna):
for key in atomic_cna:
try:
all_cnas[key] += atomic_cna[key]
except KeyError:
all_cnas[key] = atomic_cna[key]
sorted_cnas = sorted(all_cnas.items(), key=lambda kv: kv[1])
sorted_cnas_dict = {}
for t in sorted_cnas:
sorted_cnas_dict[t[0]] = t[1]
return sorted_cnas_dict
def extract_cnas(traj, cna_cut):
""" Get all the cnas in the trajectory file, then extract the atomic CNA signatures for each CNA present.
For each atom, the atomic cnas contains a row entry with dimensionality equal to the number of cnas present in the trajectory.
The all_cnas variable contains a count of occurrance of each cna in the trajectory.
"""
all_cnas = get_all_cnas(traj, cna_cut)
atomic_cnas = get_atomic_cnas(traj, all_cnas, cna_cut)
return atomic_cnas, all_cnas
def sample_uniform_cna(ntr, transformed_cnas):
""" Sample from an array of transformed cnas a ntr number of indexes.
For each cnas class, ntr//len(cna classes) atoms are selected which do
contain at least one pair of that particular class.
"""
tr_ind = []
sampled_atoms = np.ones(len(transformed_cnas), dtype='bool')
ntr_sampled = 0
for i in range(transformed_cnas.shape[1]):
indx_this_class = np.where(
transformed_cnas[:, i][sampled_atoms] > 0)[0]
ntr_this_class = min(len(indx_this_class), ntr //
transformed_cnas.shape[1])
sampled_inds = np.random.choice(
indx_this_class, ntr_this_class, replace=False)
sampled_atoms[sampled_inds] = False
tr_ind.extend(sampled_inds)
ntr_sampled += len(sampled_inds)
if ntr_sampled < ntr:
additional_inds = np.random.choice(np.arange(len(transformed_cnas))[
sampled_atoms], ntr-ntr_sampled, replace=False)
tr_ind.extend(additional_inds)
return np.array(tr_ind)
def sample_cna(traj, cna_cut, ntr):
""" From a trajcetory file, calculate CNAS using cna_cut as cutoff,
order the classes and sample according to the sample_using_cna method
"""
traj = read(traj, index=':')
transformed_cnas, all_cnas = extract_cnas(traj, cna_cut)
print("CNA classes are: \n", all_cnas)
training_indexes = sample_uniform_cna(ntr, transformed_cnas)
return training_indexes
def get_training_set(c, f, el, ntr, method, cutoff, nbins=None, traj=None, cna_cut=None):
""" Call training set sampling
"""
if method == "random":
ind = np.arange(len(c))
ind_tr = np.random.choice(ind, size=ntr, replace=False)
X, Y = c[ind_tr], f[ind_tr]
elif method == "grid2" or method == "grid3":
from itertools import combinations_with_replacement
from scipy.spatial.distance import cdist
if nbins is None:
nbins = get_right_nbins(c, el, cutoff, method, ntr)
ind = get_right_grid(c, el, nbins, cutoff, method)
X, Y = c[ind], f[ind]
elif method == "cna":
ind = sample_cna(traj, cna_cut, ntr)
X, Y, = c[ind], f[ind]
else:
print("Training method not understood, using random.")
ind = np.arange(len(c))
ind_tr = np.random.choice(ind, size=ntr, replace=False)
X, Y = c[ind_tr], f[ind_tr]
return X, Y
def get_model(elements, r_cut, ker, sigma=0.5, theta=0.5, noise=0.001, rep_sig=1, alpha =1, r0=10, sigma_eam = 1):
""" Load the correct model based on the specifications of kernel and number of elements
"""
if len(elements) == 1:
if ker == '2b':
m = models.TwoBodySingleSpeciesModel(
element=elements, r_cut=r_cut, sigma=sigma, noise=noise, theta=theta, rep_sig=rep_sig)
elif ker == '3b':
m = models.ThreeBodySingleSpeciesModel(
element=elements, r_cut=r_cut, sigma=sigma, noise=noise, theta=theta)
elif ker == 'combined':
m = models.CombinedSingleSpeciesModel(element=elements, r_cut=r_cut, sigma_2b=sigma, sigma_3b=sigma*2,
noise=noise, theta_2b=theta, theta_3b=theta, rep_sig=rep_sig)
elif ker == 'mb':
m = models.ManyBodySingleSpeciesModel(
element=elements, r_cut=r_cut, sigma=sigma, noise=noise, theta=theta)
elif ker == 'eam':
m = models.EamySingleSpeciesModel(
element=elements, r_cut=r_cut, sigma=sigma, noise=noise, alpha=alpha, r0=r0)
elif ker == '23eam':
m = models.TwoThreeEamSingleSpeciesModel(element=elements, r_cut=r_cut, sigma_2b=sigma, sigma_3b=sigma*2, sigma_eam = sigma_eam,
noise=noise, theta_2b=theta, theta_3b=theta, alpha=alpha, r0=r0, rep_sig=rep_sig)
else:
print(
"Kernel Type not understood, available options are 2b, 3b, mb, eam, 23eam or combined.")
elif len(elements) > 1:
if ker == '2b':
m = models.TwoBodyManySpeciesModel(
elements=elements, r_cut=r_cut, sigma=sigma, noise=noise, theta=theta, rep_sig=rep_sig)
elif ker == '3b':
m = models.ThreeBodyManySpeciesModel(
elements=elements, r_cut=r_cut, sigma=sigma, noise=noise, theta=theta)
elif ker == 'combined':
m = models.CombinedManySpeciesModel(elements=elements, r_cut=r_cut, sigma_2b=sigma, sigma_3b=sigma*2,
noise=noise, theta_2b=theta, theta_3b=theta, rep_sig=rep_sig)
elif ker == 'mb':
m = models.ManyBodyManySpeciesModel(
elements=elements, r_cut=r_cut, sigma=sigma, noise=noise, theta=theta)
elif ker == 'eam':
m = models.EamyManySpeciesModel(
elements=elements, r_cut=r_cut, sigma=sigma, noise=noise, alpha=alpha, r0=r0)
elif ker == '23eam':
m = models.TwoThreeEamManySpeciesModel(elements=elements, r_cut=r_cut, sigma_2b=sigma, sigma_3b=sigma*2, sigma_eam = sigma_eam,
noise=noise, theta_2b=theta, theta_3b=theta, alpha=alpha, r0=r0, rep_sig=rep_sig)
else:
print(
"Kernel Type not understood, available options are 2b, 3b, mb, eam, 23eam or combined.")
else:
print("Number of elements less than 1, elements must be an array or list with len >=1.")
return m
def train_right_gp(X, Y, elements_1, kernel, sigma, noise, cutoff, train_folder, X_e=None, Y_e=None, train_mode="force", ncores=1, rep_sig=1):
""" Train GP module based on train mode, kernel and number of atomic species.
"""
m = get_model(elements_1, cutoff, kernel,
sigma, cutoff/5.0, noise, rep_sig)
print("Training using %i points on %i cores" % (len(X), ncores))
tic = time.time()
if train_mode == "force":
m.fit(X, Y, ncores=ncores)
else:
if not energydefault:
if train_mode == "energy":
m.fit_energy(X_e, Y_e, ncores=ncores)
elif train_mode == "force_and_energy":
m.fit_force_and_energy(X, Y, X_e, Y_e, ncores=ncores)
else:
print("Training mode not understood. Defaulting to force training.")
m.fit(X, Y, ncores=ncores)
else:
print("No energies available. Defaulting to force training.")
m.fit(X, Y, ncores=ncores)
toc = time.time()
print("Seconds for training: %.2f" % (toc-tic))
# Save the GP
save_gp(m, train_folder, kernel, cutoff, sigma, noise, len(X)+len(X_e))
return m
def get_gp(train_folder, X, Y, elements_1, kernel, sigma, noise, cutoff, training_points, X_e=None, Y_e=None, train_mode="force", ncores=1, rep_sig=1):
""" Try to load the specified model if it exists, otherwise train it.
"""
try:
if not isinstance(train_folder, Path):
train_folder = Path(train_folder)
gp_name = get_model_name(elements_1, kernel, training_points)
full_path = train_folder / "models" / gp_name
m = load_model(full_path)
except FileNotFoundError:
m = train_right_gp(X, Y, elements_1, kernel, sigma, noise, cutoff, train_folder,
X_e, Y_e, train_mode=train_mode, ncores=ncores, rep_sig=rep_sig)
return m
def get_model_name(elements, kernel, ntr):
""" Set the name of the model file
"""
if kernel == "2b":
first_name = "TwoBody"
elif kernel == "3b":
first_name = "ThreeBody"
elif kernel == "combined":
first_name = "Combined"
elif kernel == "eam":
first_name = "Eam"
elif kernel == "23eam":
first_name = "TwoThreeEam"
elif kernel == "mb":
first_name = "ManyBody"
if len(elements) == 1:
second_name = "SingleSpecies"
if len(elements) >= 2:
second_name = "ManySpecies"
name = first_name + second_name
full_name = "MODEL_ker_" + name + "_ntr" + str(ntr) + ".json"
return full_name
def test_forces(m, x, y, plot=False, ncores=1):
""" Test forces and report significant statystics on the errors incurred by the GP.
"""
print("Testing the force prediction on %i configurations" % (len(x)))
# Predict forces on test configurations
y_pred = m.predict(x, ncores=ncores)
y_err = y_pred - y # Calculate error
MAEC = np.mean(abs(y_err)) # Mean average error on force components
# Mean average error on force vector
MAEF = np.mean(np.sqrt(np.sum(np.square(y_err), axis=1)))
# Standard deviation of the average error on force vector
SMAEF = np.std(np.sqrt(np.sum(np.square(y_err), axis=1)))
MF = np.mean(np.linalg.norm(y, axis=1)) # Meean force value
RMSEF = np.sqrt(np.mean((y_err) ** 2)) # Root mean square error on force
print('')
print('RMSE: {:.4} eV/A'.format(RMSEF))
print('MAEC: {:.4} eV/A'.format(MAEC))
print('MAEF : {:.4f} +- {:.4f} eV/A'.format(MAEF, SMAEF))
print('Relative MAEF: {:.4f} +- {:.4f}'.format(MAEF / MF, SMAEF / MF))
if plot:
density_plot(y, y_pred, 'force')
return MAEC, MAEF, SMAEF, MF, RMSEF
def test_energies(m, x_e, y_e, plot=False, ncores=1):
""" Test forces and report significant statystics on the errors incurred by the GP.
"""
print("Testing the energy prediction on %i configurations" % (len(x_e)))
# Predict forces on test configurations
y_pred = m.predict_energy(x_e, ncores=ncores)
y_pred /= len(x_e[0])
y_e /= len(x_e[0])
y_err = y_pred - y_e # Calculate error
MAE = np.mean(abs(y_err)) # Mean average error on energy
# Standard deviation of the average error on energy
SMAE = np.std(abs(y_err))
RMSE_e = np.sqrt(np.mean((y_err) ** 2)) # Root mean square error on energy
print('')
print('Energy RMSE: {:.4} eV/atom'.format(RMSE_e))
print('Energy MAE : {:.4f} +- {:.4f} eV/atom'.format(MAE, SMAE))
if plot:
density_plot(y_e, y_pred, 'energy')
return MAE, SMAE, RMSE_e
def save_gp(m, folder, kernel, cutoff, sigma, noise, ntr):
""" Save the model
"""
if not isinstance(folder, Path):
folder = Path(folder)
if not os.path.exists(folder / "models"):
os.makedirs(folder / "models")
m.save(folder / "models")
def save_report(MAEC, MAEF, SMAEF, MF, RMSEF, folder, test_folder, kernel, cutoff, sigma, noise, ntr, sampling, MAE=None, SMAE=None, RMSE_e=None):
""" Save a .json file containing details about the model and the errors it incurred in
"""
if not isinstance(folder, Path):
folder = Path(folder)
if not isinstance(test_folder, Path):
test_folder = Path(test_folder)
if not os.path.exists(folder / "results"):
os.makedirs(folder / "results")
if test_folder == None or test_folder == "None":
end_name = "%s_%.2f_%.2f_%.4f_%i.json" % (
kernel, cutoff, sigma, noise, ntr)
else:
test_final_name = test_folder.stem
end_name = "on_%s_%s_%.2f_%.2f_%.4f_%i.json" % (test_final_name,
kernel, cutoff, sigma, noise, ntr)
filename = folder / "results" / end_name
errors = {
"kernel": kernel,
"sampling": sampling,
"ntr": ntr,
"cutoff": cutoff,
"sigma": sigma,
"noise": noise,
"MAE_c": MAEC,
"MAE_f": MAEF,
"SMAE_f": SMAEF,
"M_f": MF,
"RMSE_f": RMSEF,
"MAE_e": MAE,
"SMAE_e": SMAE,
"RMSE_e": RMSE_e
}
with open(filename, 'w') as fp:
json.dump(errors, fp, indent=4)
print("Saved report of errors.")
def train_and_test_gp(train_folder, traj_filename, cutoff=5.0, test_folder=None,
training_points=100, test_points=100,
kernel='2b', sigma=0.5, noise=0.001, sampling="random", nbins=None,
ncores=1, train_mode="force", test_mode="force", f_e_ratio=100, plot=True, cna_cut=None):
""" Wrapper function that handles everything startng from a .xyz file and details on the kernel.
Extracts data, creates model, trains GP model and then tests it.
"""
# Get data, and create training and test sets
elements_1, confs_1, forces_1, energies_1, global_confs_1 = get_data(
train_folder, cutoff, traj_filename)
if test_folder is not None and test_folder != "None":
print("The test folder is", test_folder)
elements_2, confs_2, forces_2, energies_2, global_confs_2 = get_data(
test_folder, cutoff, traj_filename)
X, Y, X_e, Y_e, x, y, x_e, y_e = sample_twosets(confs_1, forces_1, global_confs_1,
energies_1, elements_1, confs_2, forces_2, global_confs_2, energies_2,
elements_2, sampling, training_points, test_points, cutoff, nbins, f_e_ratio, train_folder + '/' + traj_filename, cna_cut)
else:
X, Y, X_e, Y_e, x, y, x_e, y_e = sample_oneset(confs_1, forces_1, global_confs_1,
energies_1, elements_1, sampling, training_points, test_points, cutoff, nbins, f_e_ratio, train_folder + '/' + traj_filename, cna_cut)
# See if the GP is aleady there, if not train the Gaussian Process
m = get_gp(train_folder, X, Y, elements_1, kernel, sigma, noise,
cutoff, training_points, X_e, Y_e, train_mode, ncores)
# Test the GP
MAE_c, MAE_f, SMAE_f, M_f, RMSE_f, MAE_e, SMAE_e, RMSE_e = test_gp(
m, x, y, x_e, y_e, plot, test_mode, ncores)
# Save a report of the errors
save_report(MAE_c, MAE_f, SMAE_f, M_f, RMSE_f, train_folder, test_folder, kernel,
cutoff, sigma, noise, len(X), sampling, MAE_e, SMAE_e, RMSE_e)
return m
def test_gp(m, x=None, y=None, x_e=None, y_e=None, plot=False, test_mode="forces", ncores=1):
""" Wrapper function that tests a GP on a test set and returns error metrics.
"""
if test_mode == "force":
MAE_c, MAE_f, SMAE_f, M_f, RMSE_f = test_forces(m, x, y, plot, ncores)
MAE_e, SMAE_e, RMSE_e = None, None, None
elif test_mode == "energy":
MAE_c, MAE_f, SMAE_f, M_f, RMSE_f = None, None, None, None
MAE_e, SMAE_e, RMSE_e = test_energies(m, x_e, y_e, plot, ncores)
elif test_mode == "force_and_energy":
MAE_c, MAE_f, SMAE_f, M_f, RMSE_f = test_forces(m, x, y, plot, ncores)
MAE_e, SMAE_e, RMSE_e = test_energies(m, x_e, y_e, plot, ncores)
else:
print("Test mode not understood, use either force, energy or force_and_energy")
return MAE_c, MAE_f, SMAE_f, M_f, RMSE_f, MAE_e, SMAE_e, RMSE_e
def load_model(filename):
""" Load GP module based on train mode, kernel and number of atomic species.
"""
with open(filename) as json_file:
metadata = json.load(json_file)
model = metadata['model']
if model == "TwoBodySingleSpeciesModel":
m = models.TwoBodySingleSpeciesModel.from_json(filename)
elif model == "ThreeBodySingleSpeciesModel":
m = models.ThreeBodySingleSpeciesModel.from_json(filename)
elif model == "CombinedSingleSpeciesModel":
m = models.CombinedSingleSpeciesModel.from_json(filename)
elif model == "EamSingleSpeciesModel":
m = models.EamSingleSpeciesModel.from_json(filename)
elif model == "TwoThreeEamSingleSpeciesModel":
m = models.TwoThreeEamSingleSpeciesModel.from_json(filename)
elif model == "TwoBodyManySpeciesModel":
m = models.TwoBodyManySpeciesModel.from_json(filename)
elif model == "ThreeBodyManySpeciesModel":
m = models.ThreeBodyManySpeciesModel.from_json(filename)
elif model == "CombinedManySpeciesModel":
m = models.CombinedManySpeciesModel.from_json(filename)
elif model == "EamSingleSpeciesModel":
m = models.EamSingleSpeciesModel.from_json(filename)
elif model == "EamManySpeciesModel":
m = models.EamManySpeciesModel.from_json(filename)
elif model == "TwoThreeEamSingleSpeciesModel":
m = models.TwoThreeEamSingleSpeciesModel.from_json(filename)
elif model == "TwoThreeEamManySpeciesModel":
m = models.TwoThreeEamManySpeciesModel.from_json(filename)
else:
print("Json file does contain unexpected model name")
return 0
return m
def density_plot(x, y, mode):
""" Plot a scatter plot where a gaussian kde has been superimposed in order to
highlight areas where points are more dense.
"""
from matplotlib import pyplot as plt
from scipy.stats import gaussian_kde
# Calculate the point density
x = np.ravel(x)
y = np.ravel(y)
xy = np.vstack([x, y])
z = gaussian_kde(xy)(xy)
# Sort the points by density, so that the densest points are plotted last
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
plt.scatter(x, y, c=z, s=50, edgecolor='')
plt.colorbar()
plt.plot(x, x, 'k-')
if mode == 'force':
plt.xlabel(r"True Force [eV/$\AA$]")
plt.ylabel(r"Predicted Force [eV/$\AA$]")
plt.title(r"Force Prediction Error")
elif mode == 'energy':
plt.xlabel("True Energy [eV/atom]")
plt.ylabel("Predicted Energy [eV/atom]")
plt.title("Energy Prediction Error")
plt.show()
def get_calculator(filepath):
from mff import calculators
m = load_model(filepath)
with open(filepath) as f:
model_json = json.load(f)
model_name = model_json['model']
if model_name == 'TwoBodySingleSpeciesModel':
calc = calculators.TwoBodySingleSpecies(m.r_cut, m.grid)
elif model_name == 'ThreeBodySingleSpeciesModel':
calc = calculators.ThreeBodySingleSpecies(m.r_cut, m.grid)
elif model_name == 'CombinedSingleSpeciesModel':
calc = calculators.CombinedSingleSpecies(m.r_cut, m.grid_2b, m.grid_3b)
elif model_name == 'TwoThreeEamSingleSpeciesModel':
calc = calculators.TwoThreeEamSingleSpecies(m.r_cut, m.grid_2b, m.grid_3b, m.grid_eam,
m.gp_eam.kernel.theta[2], m.gp_eam.kernel.theta[3])
elif model_name == 'TwoBodyManySpeciesModel':
calc = calculators.TwoBodyManySpecies(m.r_cut,m.elements, m.grid)
elif model_name == 'ThreeBodyManySpeciesModel':
calc = calculators.ThreeBodySManySpecies(m.r_cut,m.elements, m.grid)
elif model_name == 'CombinedManySpeciesModel':
calc = calculators.CombinedManySpecies(m.r_cut, m.elements, m.grid_2b, m.grid_3b)
elif model_name == 'TwoThreeEamManySpeciesModel':
calc = calculators.TwoThreeEamManySpecies(m.r_cut, m.elements, m.grid_2b, m.grid_3b,
m.grid_eam, m.gp_eam.kernel.theta[2], m.gp_eam.kernel.theta[3])
else:
print("ERROR: Model type not understood when loading")
quit()
return calc
| 35,709 | 39.350282 | 189 | py |
mff | mff-master/mff/error_measures.py | import numpy as np
def MAEF(tst_confs, tst_forces, gp):
"""Mean Absolute Error on Force
Calculated the absolute error done on the force vectors
Args:
tst_confs (list): Configurations in the test set
tst_forces (array): Forces in the test set
gp (class): Trained gp class
Returns:
MAEF (float): the Mean Absolute error on force
"""
predicted_forces = gp.predict(tst_confs, return_std=False)
MAEF = np.mean(np.linalg.norm(tst_forces - predicted_forces, axis=1))
return MAEF
def MAEC(tst_confs, tst_forces, gp):
"""Mean Absolute Error on Components
Calculated the absolute error done on the force components
Args:
tst_confs (list): Configurations in the test set
tst_forces (array): Forces in the test set
gp (class): Trained gp class
Returns:
MAEF (float): the Mean Absolute error on force components
"""
predicted_forces = gp.predict(tst_confs, return_std=False)
MAEC = np.mean(abs(tst_forces - predicted_forces))
return MAEC
def RMSE(tst_confs, tst_forces, gp):
"""Root Mean Squared Error
Calculated the root mean squared error made on force
Args:
tst_confs (list): Configurations in the test set
tst_forces (array): Forces in the test set
gp (class): Trained gp class
Returns:
RMSE (float): the Root Mean Square Deviation of predicted forces
"""
predicted_forces = gp.predict(tst_confs, return_std=False)
RMSE = np.sqrt(np.mean((tst_forces - predicted_forces) ** 2))
return RMSE
def neg_log_pred(tst_confs, tst_forces, gp):
"""Negative log predictive probabiltiy of the data
Calculates the probability assigned by the trained model to the test set.
The covariance matrix is approximated to be diaginal.
Args:
tst_confs (list): Configurations in the test set
tst_forces (array): Forces in the test set
gp (class): Trained gp class
Returns:
neg_log_lik_diag (float): negative log predictive probability of force data
"""
predicted_forces, predicted_std = gp.predict(tst_confs, return_std=True)
neg_log_lik_diag = np.mean((tst_forces - predicted_forces) ** 2 / (2 * predicted_std ** 2) + np.log(
predicted_std) + 0.5 * np.log(2 * np.pi))
return neg_log_lik_diag
| 2,356 | 27.059524 | 104 | py |
mff | mff-master/mff/configurations.py | # -*- coding: utf-8 -*-
import json
import logging
from abc import ABCMeta, abstractmethod
from pathlib import Path
from random import shuffle
import numpy as np
from scipy.spatial.distance import cdist
from asap3 import FullNeighborList
from ase.io import read
logger = logging.getLogger(__name__)
class MissingData(Exception):
pass
def carve_from_snapshot(atoms, r_cut, forces_label=None, energy_label=None, atoms_ind=None):
"""Extract atomic configurations, the forces acting on the central atoms
os said configurations, and the local energy values associated to a single atoms object.
Args:
atoms (ase atoms object): Ase atoms file, opened with ase.io.read
atoms_ind (list): indexes of the atoms for which a conf is created
r_cut (float): Cutoff to use when carving out atomic environments
forces_label (str): Name of the force label in the trajectory file, if None default is "forces"
energy_label (str): Name of the energy label in the trajectory file, if None default is "energy"
Returns:
confs (list of arrays): List of M by 5 numpy arrays, where M is the number of atoms within
r_cut from the central one. The first 3 components are positions w.r.t
the central atom in Angstroms, the fourth is the atomic number of the
central atom, the fifth the atomic number of each atom.
forces (array): x,y,z components of the force acting on the central atom in eV/Angstrom
energies (array): value of the local atomic energy in eV
"""
if atoms_ind is None:
atoms_ind = np.arange(len(atoms))
if forces_label:
forces = atoms.arrays.get(forces_label)
else:
try:
forces = atoms.get_forces()
except:
forces = None
if energy_label and energy_label != 'energy':
energy = atoms.arrays.get(energy_label)
else:
energy_label = 'energy'
try:
energy = atoms.get_potential_energy()
except:
energy = None
if forces is None and energy is None:
raise MissingData(
'Cannot find energy or force values in the xyz file, shutting down')
if forces is not None:
forces = forces[atoms_ind]
else:
logger.info(
'Forces in the xyz file are not present, or are not called %s' % (forces_label))
if energy is None:
logger.info(
'Energy in the xyz file is not present, or is not called %s' % (energy_label))
# See if there are forces and energies, get them for the chosen atoms
if (atoms.get_cell() == np.zeros((3, 3))).all():
atoms.set_cell(100.0 * np.identity(3))
logger.info('No cell values found, setting to a 100 x 100 x 100 cube')
# Build local configurations for every indexed atom
nl = FullNeighborList(r_cut, atoms=atoms)
confs = []
for i in atoms_ind:
indices, positions, distances = nl.get_neighbors(i)
atomic_numbers_i = np.ones(
(len(indices), 1)) * atoms.get_atomic_numbers()[i]
atomic_numbers_j = atoms.get_atomic_numbers()[indices].reshape(-1, 1)
confs.append(
np.hstack([positions, atomic_numbers_i, atomic_numbers_j]))
return confs, forces, energy
def generate(traj, r_cut, forces_label=None, energy_label=None):
"""Extract atomic configurations, the forces acting on the central atoms
os said configurations, and the local energy values associeated.
Args:
traj (ase atoms object): Ase trajectory file, opened with ase.io.read
r_cut (float): Cutoff to use when carving out atomic environments
forces_label (str): Name of the force label in the trajectory file, if None default is "forces"
energy_label (str): Name of the energy label in the trajectory file, if None default is "energy"
Returns:
data (dictionary): Structure containing, for each snapshot in the trajectory,
the forces, energy, and local atomic configurations for that snapshot's atoms
"""
# Get the atomic number of each atom in the trajectory file
atom_number_list = [atoms.get_atomic_numbers() for atoms in traj]
flat_atom_number = np.concatenate(atom_number_list)
elements = np.unique(flat_atom_number, return_counts=False)
elements = list(elements)
data = {}
data['elements'] = elements
data['r_cut'] = r_cut
data['n_steps'] = len(traj)
data['data'] = []
for i, atoms in enumerate(traj):
this_conf, this_force, this_energy = \
carve_from_snapshot(
atoms, r_cut, forces_label=forces_label, energy_label=energy_label)
this_step = {}
this_step['confs'] = this_conf
this_step['forces'] = this_force
this_step['energy'] = this_energy
data['data'].append(this_step)
return data
def save(path, r_cut, data):
""" Save data extracted with ``generate`` to a file with a iven cutoff
Args:
path (Path or string): Name and position of file to save data to
r_cut (float): Cutoff used
data (dict): Structure containing, for each snapshot in the trajectory,
the forces, energy, and local atomic configurations for that snapshot's atoms.
Obtained from ``generate``
"""
if not isinstance(path, Path):
path = Path(path)
np.save('{}/data_cut={:.2f}.npy'.format(path, r_cut), data)
def generate_and_save(path, r_cut, forces_label=None, energy_label=None, index=':'):
""" Generate the data dictionary and save it to the same location.
Args:
path (Path or string): Name and position of trajectory file
r_cut (float): Cutoff used
forces_label (str): Name of the force label in the trajectory file, if None default is "forces"
energy_label (str): Name of the energy label in the trajectory file, if None default is "energy"
index (str): Indexes indicating which snapshots to use from the traj file
Returns:
data (dict): Structure containing, for each snapshot in the trajectory,
the forces, energy, and local atomic configurations for that snapshot's atoms.
Obtained from ``generate``
"""
if not isinstance(path, Path):
path = Path(path)
suffix = path.suffix
if str(suffix) == "out":
traj = read(path, index=index, format='aims-output')
elif str(suffix) == ".xyz":
# Get the ASE traj from xyz
traj = read(path, index=index, format='extxyz')
else:
traj = read(path, index=index)
data = generate(traj, r_cut, forces_label=forces_label,
energy_label=energy_label)
save(path.parent, r_cut, data)
return data
def load(path, r_cut):
""" Load data saved with ``save``
Args:
path (Path or string): Name and position of file to load data from
r_cut (float): Cutoff used
Returns:
data (dict): Structure containing, for each snapshot in the trajectory,
the forces, energy, and local atomic configurations for that snapshot's atoms.
Obtained from ``generate``
"""
if not isinstance(path, Path):
path = Path(path)
data = np.load('{}/data_cut={:.2f}.npy'.format(path, r_cut),
allow_pickle=True)
return data.item()
def unpack(data):
""" From a data dictionary, generate elements, configurations, forces, energies and
global configurations to be used by the GP module.
Args:
data (dict): Structure containing, for each snapshot in the trajectory,
the forces, energy, and local atomic configurations for that snapshot's atoms.
Obtained from ``generate``
Returns:
elements (list): Atomic numbers of all atomic species present in the dataset
confs (list of arrays): List of M by 5 numpy arrays, where M is the number of atoms within
r_cut from the central one. The first 3 components are positions w.r.t
the central atom in Angstroms, the fourth is the atomic number of the
central atom, the fifth the atomic number of each atom.
forces (array): x,y,z components of the force acting on the central atom in eV/Angstrom
energies (array): value of the total energy in eV
global_confs (list of lists of arrays): list containing lists of configurations, grouped together
so that local atomic environments taken from the same snapshot are in the same group.
"""
elements = data['elements']
global_confs = []
forces = []
energies = []
for i in data['data']:
global_confs.append(i['confs'])
forces.append(i['forces'])
energies.append(i['energy'])
try:
forces = np.array([item for sublist in forces for item in sublist])
except:
logger.warning("No forces in the data file")
confs = np.array([item for sublist in global_confs for item in sublist])
try:
energies = np.array(energies)
except:
logger.warning("No energies in the data file")
global_confs = np.array(global_confs)
return elements, confs, forces, energies, global_confs
def load_and_unpack(path, r_cut):
""" Load data saved with ``save`` and unpack it with ``unpak``
Args:
path (Path or string): Name and position of file to load data from
r_cut (float): Cutoff used
Returns:
elements (list): Atomic numbers of all atomic species present in the dataset
confs (list of arrays): List of M by 5 numpy arrays, where M is the number of atoms within
r_cut from the central one. The first 3 components are positions w.r.t
the central atom in Angstroms, the fourth is the atomic number of the
central atom, the fifth the atomic number of each atom.
forces (array): x,y,z components of the force acting on the central atom in eV/Angstrom
energies (array): value of the total energy in eV
global_confs (list of lists of arrays): list containing lists of configurations, grouped together
so that local atomic environments taken from the same snapshot are in the same group.
"""
data = load(path, r_cut)
elements, confs, forces, energies, global_confs = unpack(data)
return elements, confs, forces, energies, global_confs
| 10,423 | 35.704225 | 105 | py |
mff | mff-master/mff/advanced_sampling.py | from mff.gp import GaussianProcess
from itertools import product, combinations_with_replacement
from scipy.spatial.distance import cdist
from mff import kernels
from mff.configurations import carve_from_snapshot
from mff.models import TwoBodyTwoSpeciesModel, CombinedTwoSpeciesModel
from mff.models import TwoBodySingleSpeciesModel, CombinedSingleSpeciesModel
from pathlib import Path
import logging
import numpy as np
import time
import sys
import random
sys.path.insert(0, '../')
try:
from skbayes.rvm_ard_models import RVR
from sklearn.metrics import mean_squared_error
except:
print("No skbayes module found, rvm sampling cannot be used")
logger = logging.getLogger(__name__)
class Sampling(object):
""" Sampling methods class
Class containing sampling methods to optimize the trainng database selection.
The class is currently set in order to work with local atomic energies,
and is therefore made to be used in confined systems (nanoclusters, molecules).
Some of the mothods used can be applied to force training too (ivm, random),
or are independent to the training outputs (grid).
These methods can be used on systems with PBCs where a local energy is not well defined.
The class also initializes two GP objects to use in some of its methods.
Args:
confs (list of arrays): List of the configurations as M*5 arrays
energies (array): Local atomic energies, one per configuration
forces (array): Forces acting on the central atoms of confs, one per configuration
sigma_2b (float): Lengthscale parameter of the 2-body kernels in Amstrongs
sigma_3b (float): Lengthscale parameter of the 3-body kernels in Amstrongs
sigma_mb (float): Lengthscale parameter of the many-body kernel in Amstrongs
noise (float): Regularization parameter of the Gaussian process
r_cut (float): Cutoff function for the Gaussian process
theta (float): Decay lengthscale of the cutoff function for the Gaussian process
Attributes:
elements (list): List of the atomic number of the atoms present in the system
natoms (int): Number of atoms in the system, used for nanoclusters
K2 (array): Gram matrix for the energy-energy 2-body kernel using the full reduced dataset
K3 (array): Gram matrix for the energy-energy 3-body kernel using the full reduced dataset
"""
def __init__(self, confs=None, energies=None,
forces=None, sigma_2b=0.05, sigma_3b=0.1, sigma_mb=0.2, noise=0.001, r_cut=8.5, theta=0.5):
self.confs = confs
self.energies = energies
self.forces = forces
natoms = len(confs[0]) + 1
self.elements = list(
np.sort(list(set(confs[0][:, 3:].flatten().tolist()))))
self.natoms = natoms
self.K2 = None
self.K3 = None
self.sigma_2b, self.sigma_3b, self.sigma_mb, self.noise, self.r_cut, self.theta = (
sigma_2b, sigma_3b, sigma_mb, noise, r_cut, theta)
self.get_the_right_kernel('2b')
self.get_the_right_kernel('3b')
# def read_xyz(self, filename, r_cut, randomized = True, shuffling = True, forces_label=None, energy_label=None):
# from ase.io import read
# traj = read(filename, index=slice(None), format='extxyz')
# confs, forces, energies = [], [], []
# for i in np.arange(len(traj)):
# if randomized:
# rand = np.random.randint(0, len(traj[i]), 1)
# else:
# rand = 0
# co, fe, en = carve_from_snapshot(traj[i], rand, r_cut, forces_label=forces_label, energy_label=energy_label)
# if len(co[0]) == self.natoms - 1:
# confs.append(co[0])
# forces.append(fe)
# energies.append(en)
# confs = np.reshape(confs, (len(confs), self.natoms-1, 5))
# forces = np.reshape(forces, (len(forces), 3))
# # Bring energies to zero mean
# energies = np.reshape(energies, len(energies))
# energies -= np.mean(energies)
# if shuffling:
# shuffled_order = np.arange(len(energies))
# random.shuffle(shuffled_order)
# energies, forces, confs = energies[shuffled_order], forces[shuffled_order], confs[shuffled_order]
# self.reduced_energies = energies
# self.reduced_forces = forces
# self.reduced_confs = confs
# del confs, energies, forces, shuffled_order, traj
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def clean_dataset(self, randomized=True, shuffling=True):
'''
Function used to subsample from a complete trajectory only one atomic environment
per snapshot. This is necessary when training on energies of nanoclusters in order to assign
an unique energy value to every configuration and to avoid using redundant information
in the form of local atomic environments centered around different atoms in the same snapshot.
Args:
randomized (bool): If True, an atom at random is chosen every snapshot, if false always the first
atom in the configurations will be chosen to represent said snapshot.
shuffling (bool): if True, once the dataset is created, it is shuffled randomly in order to
avoid any bias during incremental training set optimization methods (e.g. rvm, cur, ivm).
'''
confs, energies, forces = self.confs, self.energies, self.forces
natoms = self.natoms
# Bring energies to zero mean
energies = np.reshape(energies, len(energies))
energies -= np.mean(energies)
# Transform confs into a numpy array
arrayed_confs = np.zeros((len(forces), natoms-1, 5))
for i in np.arange(len(confs)):
try:
arrayed_confs[i] = confs[i][:natoms-1, :]
except:
print("Number of atoms in the configurations is not the expected one")
arrayed_confs[i] = np.zeros((natoms-1, 5))
energies[i] = 0
forces[i] = np.zeros(3)
# Extract one conf, energy and force per snapshot
# The particular atom can be chosen at random (random = True)
# or be always the same (random = False).
reduced_energies = np.zeros(len(confs)//natoms)
reduced_confs = np.zeros((len(confs)//natoms, natoms-1, 5))
reduced_forces = np.zeros((len(confs)//natoms, 3))
for i in np.arange(len(confs)//natoms):
if randomized:
rand = np.random.randint(0, natoms, 1)
else:
rand = 0
reduced_confs[i] = arrayed_confs[i*natoms+rand]
reduced_energies[i] = energies[i*natoms+rand]
reduced_forces[i] = forces[i*natoms+rand]
if shuffling:
shuffled_order = np.arange(len(reduced_energies))
random.shuffle(shuffled_order)
reduced_energies, reduced_forces, reduced_confs = (
reduced_energies[shuffled_order], reduced_forces[shuffled_order], reduced_confs[shuffled_order])
# Strip the data of every possible configuration which was discarded because it had the wrong number of atoms
reduced_energies = reduced_energies[np.where(reduced_energies != 0)]
reduced_forces = reduced_forces[np.where(reduced_energies != 0)]
reduced_confs = reduced_confs[np.where(reduced_energies != 0)]
self.reduced_energies = reduced_energies
self.reduced_forces = reduced_forces
self.reduced_confs = reduced_confs
del confs, energies, forces, natoms, reduced_confs, reduced_forces, reduced_energies, shuffled_order
def get_the_right_model(self, ker):
if len(self.elements) == 1:
if ker == '2b':
return TwoBodySingleSpeciesModel(self.elements, self.r_cut, self.sigma_2b, self.theta, self.noise)
elif ker == '3b':
return CombinedSingleSpeciesModel(element=self.elements, noise=self.noise, sigma_2b=self.sigma_2b, sigma_3b=self.sigma_3b, theta_3b=self.theta, r_cut=self.r_cut, theta_2b=self.theta)
else:
print('Kernel type not understood, shutting down')
return 0
else:
if ker == '2b':
return TwoBodyTwoSpeciesModel(self.elements, self.r_cut, self.sigma_2b, self.theta, self.noise)
elif ker == '3b':
return CombinedTwoSpeciesModel(elements=self.elements, noise=self.noise, sigma_2b=self.sigma_2b, sigma_3b=self.sigma_3b, theta_3b=self.theta, r_cut=self.r_cut, theta_2b=self.theta)
else:
print('Kernel type not understood, shutting down')
return 0
def get_the_right_kernel(self, ker):
if len(self.elements) == 1:
if ker == '2b':
self.gp2 = GaussianProcess(kernel=kernels.TwoBodySingleSpeciesKernel(
theta=[self.sigma_2b, self.theta, self.r_cut]), noise=self.noise)
self.gp2.ncores = 1
elif ker == '3b':
self.gp3 = GaussianProcess(kernel=kernels.ThreeBodySingleSpeciesKernel(
theta=[self.sigma_3b, self.theta, self.r_cut]), noise=self.noise)
self.gp3.ncores = 1
else:
print('Kernel type not understood, shutting down')
return 0
else:
if ker == '2b':
self.gp2 = GaussianProcess(kernel=kernels.TwoBodyTwoSpeciesKernel(
theta=[self.sigma_2b, self.theta, self.r_cut]), noise=self.noise)
self.gp2.ncores = 1
elif ker == '3b':
self.gp3 = GaussianProcess(kernel=kernels.ThreeBodyTwoSpeciesKernel(
theta=[self.sigma_3b, self.theta, self.r_cut]), noise=self.noise)
self.gp3.ncores = 1
else:
print('Kernel type not understood, shutting down')
return 0
def train_test_split(self, confs, forces=None, energies=None, ntest=10):
'''
Function used to subsample a training and a test set: the test set is extracted at random
and the remaining dataset is trated as a training set (from which we then subsample using the various methods).
Args:
confs (array or list): List of the configurations as M*5 arrays
energies (array): Local atomic energies, one per configuration
forces (array): Forces acting on the central atoms of confs, one per configuration
ntest (int): Number of test points, if None, every point that is not a training point will be used
as a test point
'''
if forces is None:
forces = np.zeros((len(confs), 3))
print('No forces in the input')
if energies is None:
energies = np.zeros(len(confs))
print('No energies in the input')
ind = np.arange(len(confs))
ind_test = np.random.choice(ind, size=ntest, replace=False)
ind_train = np.array(list(set(ind) - set(ind_test)))
self.X, self.Y, self.Y_force = confs[ind_train], energies[ind_train], forces[ind_train]
self.x, self.y, self.y_force = confs[ind_test], energies[ind_test], forces[ind_test]
del ind, ind_test, ind_train, confs, energies, forces
try:
del self.reduced_energies, self.reduced_confs, self.reduced_forces
except:
pass
def ker_2b(self, X1, X2):
X1, X2 = np.reshape(X1, (self.natoms - 1, 5)
), np.reshape(X2, (self.natoms - 1, 5))
ker = self.gp2.kernel.k2_ee(
X1, X2, sig=self.sigma_2b, rc=self.r_cut, theta=self.theta)
del X1, X2
return ker
def ker_3b(self, X1, X2):
X1, X2 = np.reshape(X1, (self.natoms - 1, 5)
), np.reshape(X2, (self.natoms - 1, 5))
ker = self.gp3.kernel.k3_ee(
X1, X2, sig=self.sigma_3b, rc=self.r_cut, theta=self.theta)
del X1, X2
return ker
def normalized_3b(self, X1, X2):
X1, X2 = np.reshape(X1, (self.natoms - 1, 5)
), np.reshape(X2, (self.natoms - 1, 5))
ker = self.gp3.kernel.k3_ee(
X1, X2, sig=self.sigma_3b, rc=self.r_cut, theta=self.theta)
ker_11 = self.gp3.kernel.k3_ee(
X1, X1, sig=self.sigma_3b, rc=self.r_cut, theta=self.theta)
ker_22 = self.gp3.kernel.k3_ee(
X2, X2, sig=self.sigma_3b, rc=self.r_cut, theta=self.theta)
ker2 = np.square(ker/np.sqrt(ker_11*ker_22))
del ker_11, ker_22, X1, X2, ker
return ker2
def ker_mb(self, X1, X2):
X1, X2 = np.reshape(X1, (self.natoms - 1, 5)
), np.reshape(X2, (self.natoms - 1, 5))
X1, X2 = X1[:, :3], X2[:, :3]
outer = X1[:, None, :] - X2[None, :, :]
ker = np.exp(-(np.sum(np.square(outer)/(2.0*self.sigma_mb**2), axis=2)))
ker = np.einsum('ij -> ', ker)
del outer, X1, X2
return ker
def rvm(self, method='2b', batchsize=1000):
'''
Relevance vector machine sampling. This method trains a 2-, 3- or many-body kernel on the energies of the
partitioned training dataset. The algortihm starts from a dataset containing a batchsize number of training
configurations extracted from the whole dataset at random. Subsequently, a rvm method is called and a
variable number of configurations is selected. These are then included in the next batch, and the operation
is repeated until every point in the training dataset was included at least once.
The function then returns the indexes of the points returned by the last call of the rvm method.
Args:
method (str): 2b or 3b, speciefies which energy kernel to use to calculate the gram matrix
batchsize (int): number of training points to include in each iteration of the gram matrix calculation
Returns:
MAE (float): Mean absolute error made by the final iteration of the method on the test set
SMAE (float):Standard deviation of the absolute error made by the final iteration of the method on the test set
RMSE (float): Root mean squared error made by the final iteration of the method on the test set
index (list): List containing the indexes of all the selected training points
total_time (float): Excecution time in seconds
'''
t0 = time.time()
if method == '2b':
rvm = RVR(kernel=self.ker_2b)
if method == '3b':
rvm = RVR(kernel=self.ker_3b)
if method == 'mb':
rvm = RVR(kernel=self.ker_mb)
if method == 'normalized_3b':
rvm = RVR(kernel=self.normalized_3b)
split = len(self.X)//batchsize + 1 # Decide the number of batches
# Create a number of evenly sized batches
batches = np.array_split(range(len(self.X)), split)
reshaped_X, reshaped_x = np.reshape(self.X, (len(
self.X), 5*(self.natoms-1))), np.reshape(self.x, (len(self.x), 5*(self.natoms-1)))
index = []
for s in np.arange(len(batches)):
batch_index = list(set(index).union(batches[s]))
rvm.fit(reshaped_X[batch_index], self.Y[batch_index])
index = np.asarray(batch_index)[rvm.active_]
y_hat, var = rvm.predict_dist(reshaped_x)
error = y_hat - self.y
MAE = np.mean(np.abs(error))
SMAE = np.std(np.abs(error))
RMSE = np.sqrt(np.mean((error) ** 2))
del var, rvm, split, batches, batch_index, reshaped_X, reshaped_x, y_hat, error
tf = time.time()
total_time = tf-t0
index = list(index)
return MAE, SMAE, RMSE, index, total_time
def ivm_e(self, method='2b', ntrain=500, batchsize=1000, use_pred_error=True, error_metric='energy'):
'''
Importance vector machine sampling for energies. This method uses a 2- or 2-body energy kernel and trains
it on the energies of the partitioned training dataset. The algortihm starts from two configurations chosen at
random. At each iteration, the predicted variance or on the observed error calculated on batchsize configurations
from the training set is calculated, and the configuration with the highest value is included in the final set.
The method finishes when ntrain configurations are included in the final set.
Args:
method (str): 2b or 3b, speciefies which energy kernel to use to calculate the gram matrix
ntrain (int): Number of training points to extract from the training dataset
batchsize (int): number of training points to use in each iteration of the error prediction
use_pred_error (bool): if true, the predicted variance is used as a metric of the ivm, if false the
observed error is used instead
errror_metric (str): specifies whether the final error is calculated on energies or on forces
Returns:
MAE (float): Mean absolute error made by the final iteration of the method on the test set
SMAE (float):Standard deviation of the absolute error made by the final iteration of the method on the test set
RMSE (float): Root mean squared error made by the final iteration of the method on the test set
index (list): List containing the indexes of all the selected training points
total_time (float): Excecution time in seconds
'''
t0 = time.time()
m = self.get_the_right_model(method)
ndata = len(self.Y)
mask = np.ones(ndata).astype(bool)
randints = random.sample(range(ndata), 2)
m.fit_energy(self.X[randints], self.Y[randints])
mask[randints] = False
for i in np.arange(min(ntrain-2, ndata-2)):
if batchsize > ndata-i-2:
batchsize = ndata-i-2
rand_test = random.sample(range(ndata-2-i), batchsize)
if use_pred_error:
pred, pred_var = m.predict_energy(
self.X[mask][rand_test], return_std=True)
worst_thing = np.argmax(pred_var) # L1 norm
else:
pred = m.predict_energy(self.X[mask][rand_test])
worst_thing = np.argmax(
abs(pred - self.Y[mask][rand_test])) # L1 norm
m.update_energy(self.X[mask][worst_thing],
self.Y[mask][worst_thing])
mask[rand_test[worst_thing]] = False
if error_metric == 'force':
y_hat = m.predict(self.x)
error = y_hat - self.y_force
MAE = np.mean(np.sqrt(np.sum(np.square(error), axis=1)))
SMAE = np.std(np.sqrt(np.sum(np.square(error), axis=1)))
RMSE = np.sqrt(np.mean((error) ** 2))
else:
y_hat = m.predict_energy(self.x)
error = y_hat - self.y
MAE = np.mean(np.abs(error))
SMAE = np.std(np.abs(error))
RMSE = np.sqrt(np.mean((error) ** 2))
index = np.arange(len(self.X))[~mask]
del mask, worst_thing, pred, rand_test, m, ndata, randints, y_hat
tf = time.time()
index = list(index)
total_time = tf-t0
return MAE, SMAE, RMSE, index, total_time
def ivm_f(self, method='2b', ntrain=500, batchsize=1000, use_pred_error=True, error_metric='energy'):
'''
Importance vector machine sampling for forces. This method uses a 2- or 2-body energy kernel and trains
it on the energies of the partitioned training dataset. The algortihm starts from two configurations chosen at
random. At each iteration, the predicted variance or on the observed error calculated on batchsize configurations
from the training set is calculated, and the configuration with the highest value is included in the final set.
The method finishes when ntrain configurations are included in the final set.
Args:
method (str): 2b or 3b, speciefies which energy kernel to use to calculate the gram matrix
ntrain (int): Number of training points to extract from the training dataset
batchsize (int): number of training points to use in each iteration of the error prediction
use_pred_error (bool): if true, the predicted variance is used as a metric of the ivm, if false the
observed error is used instead
errror_metric (str): specifies whether the final error is calculated on energies or on forces
Returns:
MAE (float): Mean absolute error made by the final iteration of the method on the test set
SMAE (float):Standard deviation of the absolute error made by the final iteration of the method on the test set
RMSE (float): Root mean squared error made by the final iteration of the method on the test set
index (list): List containing the indexes of all the selected training points
total_time (float): Excecution time in seconds
'''
t0 = time.time()
m = self.get_the_right_model(method)
ndata = len(self.Y_force)
mask = np.ones(ndata).astype(bool)
randints = random.sample(range(ndata), 2)
m.fit(self.X[randints], self.Y_force[randints])
mask[randints] = False
for i in np.arange(min(ntrain-2, ndata-2)):
if batchsize > ndata-i-2:
batchsize = ndata-i-2
rand_test = random.sample(range(ndata-2-i), batchsize)
if use_pred_error:
pred, pred_var = m.predict(
self.X[mask][rand_test], return_std=True)
worst_thing = np.argmax(np.sum(np.abs(pred_var), axis=1))
# L1 norm
else:
pred = m.predict(self.X[mask][rand_test])
worst_thing = np.argmax(
np.sum(abs(pred - self.Y_force[mask][rand_test]), axis=1)) # L1 norm
m.update_force(self.X[mask][worst_thing],
self.Y_force[mask][worst_thing])
mask[rand_test[worst_thing]] = False
if error_metric == 'force':
y_hat = m.predict(self.x)
error = y_hat - self.y_force
MAE = np.mean(np.sqrt(np.sum(np.square(error), axis=1)))
SMAE = np.std(np.sqrt(np.sum(np.square(error), axis=1)))
RMSE = np.sqrt(np.mean((error) ** 2))
else:
y_hat = m.predict_energy(self.x)
error = y_hat - self.y
MAE = np.mean(np.abs(error))
SMAE = np.std(np.abs(error))
RMSE = np.sqrt(np.mean((error) ** 2))
index = list(np.arange(len(self.X))[~mask])
del mask, worst_thing, pred, rand_test, m, ndata, randints, error
tf = time.time()
total_time = tf-t0
return MAE, SMAE, RMSE, index, total_time
def grid(self, method='2b', nbins=100, error_metric='energy', return_error=True):
'''
Grid sampling, based either on interatomic distances (2b) or on triplets of interatomic distances (3b).
Training configurations are shuffled and are then included in the final database only if they contain a
distance value (or a triplet of distance values) which is not yet present in the binned histogram of
distance values (or triplets of distance values) of the final database. This method is very fast since it
does not evaluate kernel functions nor gram matrices.
Args:
method (str): 2b or 3b, speciefies which energy kernel to use to calculate the gram matrix
nbins (int): Number of bins to use when building an histogram of interatomic distances.
If method is 2b, this will specify the value only for distances from the central atom, if
method is 3b, this will specify the value for triplets of distances.
errror_metric (str): specifies whether the final error is calculated on energies or on forces
return_error (bool): if true, error on test set using sampled database is returned
Returns:
MAE (float): Mean absolute error made by the final iteration of the method on the test set
SMAE (float):Standard deviation of the absolute error made by the final iteration of the method on the test set
RMSE (float): Root mean squared error made by the final iteration of the method on the test set
index (list): List containing the indexes of all the selected training points
total_time (float): Excecution time in seconds
'''
t0 = time.time()
if method == '2b':
if len(self.elements) == 1:
stored_histogram = np.zeros(nbins)
index = []
ind = np.arange(len(self.X))
randomarange = np.random.choice(
ind, size=len(self.X), replace=False)
for j in randomarange: # for every snapshot of the trajectory file
distances = np.sqrt(
np.einsum('id -> i', np.square(self.X[j][:, :3])))
distances[np.where(distances > self.r_cut)] = None
this_snapshot_histogram = np.histogram(
distances, nbins, (0.0, self.r_cut))
if (stored_histogram - this_snapshot_histogram[0] < 0).any():
index.append(j)
stored_histogram += this_snapshot_histogram[0]
m = TwoBodySingleSpeciesModel(
self.elements, self.r_cut, self.sigma_2b, self.theta, self.noise)
if len(self.elements) == 2:
stored_histogram = np.zeros((nbins, 3))
index = []
ind = np.arange(len(self.X))
randomarange = np.random.choice(
ind, size=len(self.X), replace=False)
for j in randomarange: # for every snapshot of the trajectory file
distances = np.sqrt(
np.einsum('id -> i', np.square(self.X[j][:, :3])))
distances[np.where(distances > self.r_cut)] = None
element_pairs = list(
combinations_with_replacement(self.elements, 2))
for k in range(3):
if k == 1: # In the case of two different elements, we have to account for permutation invariance
this_element_pair = np.union1d(
np.intersect1d(
np.where(self.X[j][:, 3] == element_pairs[k][0]), np.where(self.X[j][:, 4] == element_pairs[k][1])),
np.intersect1d(
np.where(self.X[j][:, 3] == element_pairs[k][1]), np.where(self.X[j][:, 4] == element_pairs[k][0])))
else:
this_element_pair = np.intersect1d(
np.where(self.X[j][:, 3] == element_pairs[k][0]), np.where(self.X[j][:, 4] == element_pairs[k][1]))
distances_this = distances[this_element_pair]
this_snapshot_histogram = np.histogram(
distances_this, nbins, (0.0, self.r_cut))
if (stored_histogram[:, k] - this_snapshot_histogram[0] < 0).any():
index.append(j)
stored_histogram[:,
k] += this_snapshot_histogram[0]
m = TwoBodyTwoSpeciesModel(
self.elements, self.r_cut, self.sigma_2b, self.theta, self.noise)
elif method == '3b':
if len(self.elements) == 1:
stored_histogram = np.zeros((nbins, nbins, nbins))
index = []
ind = np.arange(len(self.X))
randomarange = np.random.choice(
ind, size=len(self.X), replace=False)
for j in randomarange: # for every snapshot of the trajectory file
atoms = np.vstack(([0., 0., 0.], self.X[j][:, :3]))
distances = cdist(atoms, atoms)
distances[np.where(distances > self.r_cut)] = None
distances[np.where(distances == 0)] = None
triplets = []
for k in np.argwhere(distances[:, 0] > 0):
for l in np.argwhere(distances[0, :] > 0):
if distances[k, l] > 0:
triplets.append(
[distances[0, k], distances[0, l], distances[k, l]])
triplets.append(
[distances[0, l], distances[k, l], distances[0, k]])
triplets.append(
[distances[k, l], distances[0, k], distances[0, l]])
triplets = np.reshape(triplets, (len(triplets), 3))
this_snapshot_histogram = np.histogramdd(triplets, bins=(nbins, nbins, nbins),
range=((0.0, self.r_cut), (0.0, self.r_cut), (0.0, self.r_cut)))
if (stored_histogram - this_snapshot_histogram[0] < 0).any():
index.append(j)
stored_histogram += this_snapshot_histogram[0]
m = CombinedSingleSpeciesModel(element=self.elements, noise=self.noise, sigma_2b=self.sigma_2b,
sigma_3b=self.sigma_3b, theta_3b=self.theta, r_cut=self.r_cut, theta_2b=self.theta)
elif len(self.elements) == 2:
stored_histogram = np.zeros((nbins, nbins, nbins, 4))
index = []
ind = np.arange(len(self.X))
randomarange = np.random.choice(
ind, size=len(self.X), replace=False)
for j in randomarange: # for every snapshot of the trajectory file
atoms = np.vstack(([0., 0., 0.], self.X[j][:, :3]))
distances = cdist(atoms, atoms)
distances[np.where(distances > self.r_cut)] = None
distances[np.where(distances == 0)] = None
possible_triplets = list(
combinations_with_replacement(self.elements, 3))
triplets = []
elements_triplets = []
for k in np.argwhere(distances[:, 0] > 0):
for l in np.argwhere(distances[0, :] > 0):
if distances[k, l] > 0:
elements_triplets.append(
np.sort([self.X[j][0, 3], self.X[j][k-1, 4], self.X[j][l-1, 4]]))
triplets.append(
[distances[0, k], distances[0, l], distances[k, l]])
triplets.append(
[distances[0, l], distances[k, l], distances[0, k]])
triplets.append(
[distances[k, l], distances[0, k], distances[0, l]])
elements_triplets = np.reshape(
elements_triplets, (len(elements_triplets), 3))
triplets = np.reshape(triplets, (len(triplets), 3))
this_snapshot_histogram = np.histogramdd(triplets, bins=(nbins, nbins, nbins),
range=((0.0, self.r_cut), (0.0, self.r_cut), (0.0, self.r_cut)))
for k in np.arange(4):
valid_triplets = triplets[np.where(
elements_triplets == possible_triplets[k]), :][0]
this_snapshot_histogram = np.histogramdd(valid_triplets, bins=(nbins, nbins, nbins),
range=((0.0, self.r_cut), (0.0, self.r_cut), (0.0, self.r_cut)))
if (stored_histogram[:, :, :, k] - this_snapshot_histogram[0] < 0).any():
index.append(j)
stored_histogram[:, :, :,
k] += this_snapshot_histogram[0]
m = CombinedTwoSpeciesModel(elements=self.elements, noise=self.noise, sigma_2b=self.sigma_2b,
sigma_3b=self.sigma_3b, theta_3b=self.theta, r_cut=self.r_cut, theta_2b=self.theta)
else:
print('Method must be either 2b or 3b')
return 0
if return_error:
if error_metric == 'force':
m.fit(self.X[index], self.Y_force[index])
y_hat = m.predict(self.x)
error = y_hat - self.y_force
MAE = np.mean(np.sqrt(np.sum(np.square(error), axis=1)))
SMAE = np.std(np.sqrt(np.sum(np.square(error), axis=1)))
RMSE = np.sqrt(np.mean((error) ** 2))
else:
m.fit_energy(self.X[index], self.Y[index])
y_hat = m.predict_energy(self.x)
error = y_hat - self.y
MAE = np.mean(np.abs(error))
SMAE = np.std(np.abs(error))
RMSE = np.sqrt(np.mean((error) ** 2))
del m, distances, this_snapshot_histogram, randomarange, stored_histogram, y_hat, error
tf = time.time()
total_time = tf-t0
index = list(set(index))
return MAE, SMAE, RMSE, index, total_time
else:
index = list(set(index))
return index
def cur(self, method='2b', ntrain=1000, batchsize=1000, error_metric='energy'):
'''
Sampling using the CUR decomposition technique.
The complete dataset is first divided into batches, then the energy-energy Gram matrix is
calculated for each batch. An svd decomposition is subsequently applied to each gram matrix,
and a number of entries (columns) is selected based on their importance score.
The method is calibrated so that the final number of training points selected is roughly equal
to the input parameter ntrain.
Args:
method (str): 2b or 3b, speciefies which energy kernel to use to calculate the gram matrix
ntrain (int): Number of training points to be selected from the whole dataset
batchsize (int): Number of data points to be used for each calculation of the gram matrix.
Lower values make the computation faster but the error might be higher.
errror_metric (str): specifies whether the final error is calculated on energies or on forces
Returns:
MAE (float): Mean absolute error made by the final iteration of the method on the test set
SMAE (float):Standard deviation of the absolute error made by the final iteration of the method on the test set
RMSE (float): Root mean squared error made by the final iteration of the method on the test set
index (list): List containing the indexes of all the selected training points
total_time (float): Excecution time in seconds
'''
t0 = time.time()
split = len(self.X)//batchsize + 1 # Decide the number of batches
# Create a number of evenly sized batches
batches = np.array_split(range(len(self.X)), split)
index = []
ntr_per_batch = ntrain//split
for s in np.arange(split):
#batches[s] = list(batches[s])
batch_confs = self.X[batches[s]]
if method == '2b':
gram = self.gp2.calc_gram_ee(batch_confs)
elif method == '3b':
gram = self.gp3.calc_gram_ee(batch_confs)
else:
print('Method must be either 2b or 3b')
return 0
u, p, v = np.linalg.svd(gram)
v2 = np.square(v)
score = np.sum(v2[:ntr_per_batch], axis=0)/ntr_per_batch
# Calculate the score value of the nth percentile of the score distribution. This is used when randomly selecting the columns
median = np.percentile(score, int(
100 - 100*(ntr_per_batch/len(score))))
std = np.std(score)
# Choose randomly the columns with probability proportional to a sigmoid function applied to the score, centred on median
prob = np.minimum(np.ones(len(score)), self.sigmoid(
(score-median)/std/(ntr_per_batch/len(score))))
rand = np.random.uniform(size=len(score))
accepted = np.sign(prob - rand)
accepted = ((accepted+1)//2).astype(bool)
watever = batches[s]
these_ones = watever[accepted]
index.append(these_ones)
del score, accepted, prob, median, std, gram, u, s, v
index = np.concatenate(index).ravel().tolist()
m = self.get_the_right_model(method)
if error_metric == 'energy':
m.fit_energy(self.X[index], self.Y[index])
y_hat = m.predict_energy(self.x)
error = y_hat - self.y
MAE = np.mean(np.abs(error))
SMAE = np.std(np.abs(error))
RMSE = np.sqrt(np.mean((error) ** 2))
else:
m.fit(self.X[index], self.Y_force[index])
y_hat = m.predict(self.x)
error = y_hat - self.y_force
MAE = np.mean(np.sqrt(np.sum(np.square(error), axis=1)))
SMAE = np.std(np.sqrt(np.sum(np.square(error), axis=1)))
RMSE = np.sqrt(np.mean((error) ** 2))
index_return = np.arange(len(self.X))[index]
del error, y_hat, m
tf = time.time()
return MAE, SMAE, RMSE, list(index_return), tf-t0
def random(self, method='2b', ntrain=500, error_metric='energy', return_error=True):
'''
Random subsampling of training points from the larger training dataset.
Args:
method (str): 2b or 3b, speciefies which energy kernel to use to calculate the gram matrix
ntrain (int): Number of points to include in the final dataset.
errror_metric (str): specifies whether the final error is calculated on energies or on forces
return_error (bool): if True, train a GP and run a test
Returns:
MAE (float): Mean absolute error made by the final iteration of the method on the test set
SMAE (float):Standard deviation of the absolute error made by the final iteration of the method on the test set
RMSE (float): Root mean squared error made by the final iteration of the method on the test set
index (list): List containing the indexes of all the selected training points
total_time (float): Excecution time in seconds
'''
ind = np.arange(len(self.X))
ind_train = np.random.choice(ind, size=ntrain, replace=False)
if return_error:
t0 = time.time()
train_confs = self.X[ind_train]
train_energy = self.Y[ind_train]
train_forces = self.Y_force[ind_train]
m = self.get_the_right_model(method)
if error_metric == 'force':
m.fit(train_confs, train_forces)
y_hat = m.predict(self.x)
error = y_hat - self.y_force
MAE = np.mean(np.sqrt(np.sum(np.square(error), axis=1)))
SMAE = np.std(np.sqrt(np.sum(np.square(error), axis=1)))
RMSE = np.sqrt(np.mean((error) ** 2))
else:
m.fit_energy(train_confs, train_energy)
y_hat = m.predict_energy(self.x)
error = y_hat - self.y
MAE = np.mean(np.abs(error))
SMAE = np.std(np.abs(error))
RMSE = np.sqrt(np.mean((error) ** 2))
del m, train_confs, train_energy, train_forces, error, y_hat
tf = time.time()
index = list(ind_train)
total_time = tf-t0
return MAE, SMAE, RMSE, index, total_time
else:
return list(ind_train)
def test_forces(self, index, method='2b', sig_2b=0.2, sig_3b=0.8, noise=0.001):
'''
Random subsampling of training points from the larger training dataset.
Args:
method (str): 2b or 3b, speciefies which energy kernel to use to calculate the gram matrix
ntrain (int): Number of points to include in the final dataset.
errror_metric (str): specifies whether the final error is calculated on energies or on forces
Returns:
MAE (float): Mean absolute error made by the final iteration of the method on the test set
SMAE (float):Standard deviation of the absolute error made by the final iteration of the method on the test set
RMSE (float): Root mean squared error made by the final iteration of the method on the test set
index (list): List containing the indexes of all the selected training points
total_time (float): Excecution time in seconds
'''
self.sigma_2b, self.sigma_3b, self.noise = sig_2b, sig_3b, noise
m = self.get_the_right_model(method)
m.fit(self.X[index], self.Y_force[index])
y_hat = m.predict(self.x)
error = self.y_force - y_hat
MAEF = np.mean(np.sqrt(np.sum(np.square(error), axis=1)))
SMAEF = np.std(np.sqrt(np.sum(np.square(error), axis=1)))
RMSE = np.sqrt(np.mean((error) ** 2))
print("MAEF: %.4f SMAEF: %.4f RMSE: %.4f" % (MAEF, SMAEF, RMSE))
del m, error, y_hat, index
return MAEF, SMAEF, RMSE
| 42,252 | 50.030193 | 198 | py |
mff | mff-master/mff/__init__.py | import os
from .gp import GaussianProcess
Mffpath = __path__[0] + "/cache/"
if not os.path.exists(Mffpath):
os.mkdir(Mffpath)
__all__ = [GaussianProcess]
| 162 | 13.818182 | 33 | py |
mff | mff-master/mff/calculators.py | # -*- coding: utf-8 -*-
import logging
from abc import ABCMeta, abstractmethod
from itertools import combinations_with_replacement, islice
from pathlib import Path
import numpy as np
from asap3 import FullNeighborList
from ase.calculators.calculator import Calculator, all_changes
logger = logging.getLogger(__name__)
def eam_descriptor(dist, norm, rc, alpha, r0):
""" Function used to return the eam descriptor for an atom given
the descriptor's hyperparameters and the set of distances of neighbours.
Args:
dist (array): Array of distances of neighbours
norm (array): Array of versors of neighbours
rc (float): cutoff radius
alpha (float): exponent prefactor of the descriptor
r0 (float): radius in the descriptor
Returns:
t2 (float): Eam descriptor
"""
q2 = 0.5*(1 + np.cos(np.pi*dist/rc))
q1 = np.exp(-2*alpha*(dist/r0 - 1))
q = -sum(q1*q2)**0.5
dq1 = -2*alpha/r0 * q1
dq2 = - np.pi/(2*rc) * np.sin(np.pi*dist/rc)
try:
dqdrij = -1/(2*q) * (dq1*q2 + q1*dq2)
except ZeroDivisionError:
dqdrij = np.zeros(len(q1))
dqdr = -dqdrij[:, None]*norm
return q, dqdr
class MappedPotential(Calculator, metaclass=ABCMeta):
# 'Properties calculator can handle (energy, forces, ...)'
implemented_properties = ['energy', 'forces']
# 'Default parameters'
default_parameters = {}
@abstractmethod
def __init__(self, r_cut, **kwargs):
super().__init__(**kwargs)
self.r_cut = r_cut
self.nl = None
def calculate(self, atoms=None, properties=('energy', 'forces'), system_changes=all_changes):
"""Do the calculation.
properties: list of str
List of what needs to be calculated. Can be any combination
of 'energy', 'forces', 'stress', 'dipole', 'charges', 'magmom'
and 'magmoms'.
system_changes: list of str
List of what has changed since last calculation. Can be
any combination of these six: 'positions', 'numbers', 'cell',
'pbc', 'initial_charges' and 'initial_magmoms'.
Subclasses need to implement this, but can ignore properties
and system_changes if they want. Calculated properties should
be inserted into results dictionary like shown in this dummy
example::
self.results = {'energy': 0.0,
'forces': np.zeros((len(atoms), 3)),
'stress': np.zeros(6),
'dipole': np.zeros(3),
'charges': np.zeros(len(atoms)),
'magmom': 0.0,
'magmoms': np.zeros(len(atoms))}
The subclass implementation should first call this
implementation to set the atoms attribute.
"""
super().calculate(atoms, properties, system_changes)
if 'numbers' in system_changes:
logger.info('numbers is in system_changes')
self.initialize(self.atoms)
self.nl.check_and_update(self.atoms)
self.results = {'energy': 0.0,
'forces': np.zeros((len(atoms), 3))}
def initialize(self, atoms):
logger.info('initialize')
self.nl = FullNeighborList(self.r_cut, atoms=atoms, driftfactor=0.)
def set(self, **kwargs):
changed_parameters = super().set(**kwargs)
if changed_parameters:
self.reset()
class ManySpeciesMappedPotential(Calculator, metaclass=ABCMeta):
# 'Properties calculator can handle (energy, forces, ...)'
implemented_properties = ['energy', 'forces']
# 'Default parameters'
default_parameters = {}
@abstractmethod
def __init__(self, r_cut, elements, **kwargs):
super().__init__(**kwargs)
self.elements = elements
self.r_cut = r_cut
self.nl = None
def calculate(self, atoms=None, properties=('energy', 'forces'), system_changes=all_changes):
"""Do the calculation.
properties: list of str
List of what needs to be calculated. Can be any combination
of 'energy', 'forces', 'stress', 'dipole', 'charges', 'magmom'
and 'magmoms'.
system_changes: list of str
List of what has changed since last calculation. Can be
any combination of these six: 'positions', 'numbers', 'cell',
'pbc', 'initial_charges' and 'initial_magmoms'.
Subclasses need to implement this, but can ignore properties
and system_changes if they want. Calculated properties should
be inserted into results dictionary like shown in this dummy
example::
self.results = {'energy': 0.0,
'forces': np.zeros((len(atoms), 3)),
'stress': np.zeros(6),
'dipole': np.zeros(3),
'charges': np.zeros(len(atoms)),
'magmom': 0.0,
'magmoms': np.zeros(len(atoms))}
The subclass implementation should first call this
implementation to set the atoms attribute.
"""
super().calculate(atoms, properties, system_changes)
if 'numbers' in system_changes:
logger.info('numbers is in system_changes')
self.initialize(self.atoms)
self.nl.check_and_update(self.atoms)
self.results = {'energy': 0.0,
'forces': np.zeros((len(atoms), 3))}
def initialize(self, atoms):
logger.info('initialize')
self.nl = FullNeighborList(self.r_cut, atoms=atoms, driftfactor=0.)
def set(self, **kwargs):
changed_parameters = super().set(**kwargs)
if changed_parameters:
self.reset()
class TwoBodySingleSpecies(MappedPotential):
"""A mapped 2-body calculator for ase
Attributes:
grid_2b (object): 1D Spline interpolator for the 2-body mapped grid
results(dict): energy and forces calculated on the atoms object
"""
def __init__(self, r_cut, grid_2b, **kwargs):
"""
Args:
grid_2b (object): 1D Spline interpolator for the 2-body mapped grid
r_cut (float): cutoff radius
"""
super().__init__(r_cut, **kwargs)
self.grid_2b = grid_2b
def calculate(self, atoms=None, properties=('energy', 'forces'), system_changes=all_changes):
"""Do the calculation.
"""
super().calculate(atoms, properties, system_changes)
forces = np.zeros((len(self.atoms), 3))
potential_energies = np.zeros((len(self.atoms), 1))
for i in range(len(self.atoms)):
inds, pos, dists2 = self.nl.get_neighbors(i)
dist = np.sqrt(dists2)
norm = pos / dist.reshape(-1, 1)
energy_local = 0.5*self.grid_2b(dist, nu=0)
fs_scalars = self.grid_2b(dist, nu=1)
potential_energies[i] = np.sum(energy_local, axis=0)
forces[i] = np.sum(norm * fs_scalars.reshape(-1, 1), axis=0)
self.results['energy'] += np.sum(potential_energies)
self.results['forces'] += forces
class ThreeBodySingleSpecies(MappedPotential):
"""A mapped 3-body calculator for ase
Attributes:
grid_3b (object): 3D Spline interpolator for the 3-body mapped grid
results(dict): energy and forces calculated on the atoms object
"""
def __init__(self, r_cut, grid_3b, **kwargs):
"""
Args:
grid_3b (object): 3D Spline interpolator for the 3-body mapped grid
r_cut (float): cutoff radius
"""
super().__init__(r_cut, **kwargs)
self.grid_3b = grid_3b
def calculate(self, atoms=None, properties=('energy', 'forces'), system_changes=all_changes):
"""Do the calculation.
"""
super().calculate(atoms, properties, system_changes)
forces = np.zeros((len(self.atoms), 3))
potential_energies = np.zeros((len(self.atoms), 1))
indices, distances, positions = self.find_triplets()
d_ij, d_jk, d_ki = np.hsplit(distances, 3)
mapped = self.grid_3b.ev_all(d_ij, d_jk, d_ki)
for (i, j, k), energy, dE_ij, dE_jk, dE_ki in zip(indices, mapped[0], mapped[1], mapped[2], mapped[3]):
forces[i] += positions[(i, j)] * dE_ij + \
positions[(i, k)] * dE_ki # F = - dE/dx
forces[j] += positions[(j, k)] * dE_jk + \
positions[(j, i)] * dE_ij # F = - dE/dx
forces[k] += positions[(k, i)] * dE_ki + \
positions[(k, j)] * dE_jk # F = - dE/dx
potential_energies[
[i, j, k]] += energy # Energy of an atom is the sum of 1/3 of every triplet it is in
self.results['energy'] += np.sum(potential_energies)
self.results['forces'] += forces
def find_triplets(self):
'''Function that efficiently finds all of the valid triplets of atoms in the atoms object.
Returns:
indices (array): array containing the indices of atoms belonging to any valid triplet.
Has shape T by 3 where T is the number of valid triplets in the atoms object
distances (array): array containing the relative distances of every triplet of atoms.
Has shape T by 3 where T is the number of valid triplets in the atoms object
positions (dictionary): versor of position w.r.t. the central atom of every atom indexed in indices.
Has shape T by 3 where T is the number of valid triplets in the atoms object
'''
atoms, nl = self.atoms, self.nl
# atomic_numbers = self.atoms.get_array('numbers', copy=False)
indices, distances, positions = [], [], dict()
for i in range(len(atoms)):
inds, pos, dists2 = nl.get_neighbors(i)
# # Limitation
# assert len(inds) is len(np.unique(inds)
# ), "There are repetitive indices!\n{}".format(inds)
# ignoring already visited atoms
inds, pos, dists2 = inds[inds >
i], pos[inds > i, :], dists2[inds > i]
dists = np.sqrt(dists2)
for local_ind, (j, pos_ij, dist_ij) in enumerate(zip(inds, pos, dists)):
# Caching local displacement vectors
positions[(i, j)], positions[(j, i)] = pos_ij / \
dist_ij, -pos_ij / dist_ij
for k, dist_ik in islice(zip(inds, dists), local_ind + 1, None):
try:
jk_ind = list(nl[j]).index(k)
except ValueError:
continue # no valid triplet
_, _, dists_j = nl.get_neighbors(j)
indices.append([i, j, k])
distances.append(
[dist_ij, np.sqrt(dists_j[jk_ind]), dist_ik])
return np.array(indices), np.array(distances), positions
class EamSingleSpecies(MappedPotential):
"""A mapped Eam calculator for ase
Attributes:
grid_eam (object): 1D Spline interpolator for the eam mapped grid
results(dict): energy and forces calculated on the atoms object
"""
def __init__(self, r_cut, grid_eam, alpha, r0, **kwargs):
"""
Args:
grid_eam (object): 1D Spline interpolator for the eam mapped grid
r_cut (float): cutoff radius
alpha (float): Exponential prefactor of the eam Descriptor
r0 (float): Radius in the exponent of the eam Descriptor
"""
super().__init__(r_cut, **kwargs)
self.grid_eam = grid_eam
self.alpha = alpha
self.r0 = r0
def calculate(self, atoms=None, properties=('energy', 'forces'), system_changes=all_changes):
"""Do the calculation.
"""
super().calculate(atoms, properties, system_changes)
forces = np.zeros((len(self.atoms), 3))
potential_energies = np.zeros((len(self.atoms), 1))
for i in range(len(self.atoms)):
inds, pos, dists2 = self.nl.get_neighbors(i)
dist = np.sqrt(dists2)
norm = pos / dist.reshape(-1, 1)
descriptor, descriptor_der = eam_descriptor(
dist, norm, self.r_cut, self.alpha, self.r0)
energy_local = self.grid_eam(descriptor, nu=0)
fs_scalars = self.grid_eam(descriptor, nu=1)
potential_energies[i] = np.sum(energy_local, axis=0)
forces[i] = np.sum(
descriptor_der * fs_scalars.reshape(-1, 1), axis=0)
self.results['energy'] += np.sum(potential_energies)
self.results['forces'] += forces
class CombinedSingleSpecies(TwoBodySingleSpecies, ThreeBodySingleSpecies):
def __init__(self, r_cut, grid_2b, grid_3b, **kwargs):
super().__init__(r_cut, grid_2b=grid_2b, grid_3b=grid_3b, **kwargs)
class TwoThreeEamSingleSpecies(TwoBodySingleSpecies, ThreeBodySingleSpecies, EamSingleSpecies):
def __init__(self, r_cut, grid_2b, grid_3b, grid_eam, alpha, r0, **kwargs):
super().__init__(r_cut, grid_2b=grid_2b, grid_3b=grid_3b,
grid_eam=grid_eam, alpha=alpha, r0=r0, **kwargs)
class TwoBodyManySpecies(ManySpeciesMappedPotential):
"""A mapped 2-body 2-species calculator for ase
Attributes:
elements (list): List of ordered atomic numbers of the mapped two species system.
grids_2b (dict): contains the three 1D Spline interpolators relative to the 2-body
mapped grids for element0-element0, element0-element1 and element1-element1 interactions
results(dict): energy and forces calculated on the atoms object
"""
def __init__(self, r_cut, elements, grids_2b, **kwargs):
"""
Args:
r_cut (float): cutoff radius
elements (list): List of ordered atomic numbers of the mapped two species system.
grids_3b (list): contains the four 3D Spline interpolators relative to the 3-body
mapped grids for element0-element0-element0, element0-element0-element1,
element0-element1-element1 and element1-element1-element1 interactions.
"""
super().__init__(r_cut, elements, **kwargs)
self.elements = list(np.sort(elements))
self.grids_2b = grids_2b
def calculate(self, atoms=None, properties=('energy', 'forces'), system_changes=all_changes):
"""Do the calculation.
"""
super().calculate(atoms, properties, system_changes)
forces = np.zeros((len(self.atoms), 3))
potential_energies = np.zeros((len(self.atoms), 1))
for i, atom in enumerate(self.atoms):
inds, pos, dists2 = self.nl.get_neighbors(i)
dist = np.sqrt(dists2)
norm = pos / dist.reshape(-1, 1)
energy_local = np.zeros_like(dist)
fs_scalars = np.zeros_like(dist)
atom_element_index = atom.number
for element in self.elements:
local_inds = np.argwhere(atoms.numbers[inds] == element)
if len(local_inds) > 0:
# Doing this so that the order of the elements is always increasing
ellist = (sorted([atom_element_index, element])[0],
sorted([atom_element_index, element])[1])
local_grid = self.grids_2b[ellist]
energy_local[local_inds] = local_grid(
dist[local_inds], nu=0)
fs_scalars[local_inds] = local_grid(dist[local_inds], nu=1)
potential_energies[i] = + np.sum(energy_local, axis=0)
forces[i] = + np.sum(norm * fs_scalars.reshape(-1, 1), axis=0)
self.results['energy'] += np.sum(potential_energies)
self.results['forces'] += forces
class ThreeBodyManySpecies(ManySpeciesMappedPotential):
"""A mapped 3-body 2-species calculator for ase
Attributes:
elements (list): List of ordered atomic numbers of the mapped two species system.
grids_3b (dict): contains the four 3D Spline interpolators relative to the 3-body
mapped grids for element0-element0-element0, element0-element0-element1,
element0-element1-element1 and element1-element1-element1 interactions.
results(dict): energy and forces calculated on the atoms object
"""
def __init__(self, r_cut, elements, grids_3b, **kwargs):
"""
Args:
r_cut (float): cutoff radius
elements (list): List of ordered atomic numbers of the mapped two species system.
grids_3b (list): contains the four 3D Spline interpolators relative to the 3-body
mapped grids for element0-element0-element0, element0-element0-element1,
element0-element1-element1 and element1-element1-element1 interactions.
"""
super().__init__(r_cut, elements, **kwargs)
self.elements = list(np.sort(elements))
self.grids_3b = grids_3b
def calculate(self, atoms=None, properties=('energy', 'forces'), system_changes=all_changes):
"""Do the calculation.
"""
super().calculate(atoms, properties, system_changes)
forces = np.zeros((len(self.atoms), 3))
potential_energies = np.zeros((len(self.atoms), 1))
indices, distances, positions = self.find_triplets(atoms)
# Get an array which is a copy of the indices of atoms participating in each triplet
el_mapping = atoms.get_atomic_numbers()
el_indices = el_mapping[indices]
el_indices = np.sort(el_indices, axis=1)
d_ij, d_jk, d_ki = np.hsplit(distances, 3)
list_triplets, list_grids = [], []
perm_list = list(combinations_with_replacement(self.elements, 3))
for trip in perm_list:
is_this_the_right_triplet = np.sum(
el_indices == [trip], axis=1) == 3
list_triplets.append(is_this_the_right_triplet)
list_grids.append((trip))
for r in np.arange(len(list_triplets)):
mapped = self.grids_3b[list_grids[r]].ev_all(d_ij[list_triplets[r]], d_jk[list_triplets[r]],
d_ki[list_triplets[r]])
for (i, j, k), energy, dE_ij, dE_jk, dE_ki in zip(indices[list_triplets[r]], mapped[0], mapped[1],
mapped[2], mapped[3]):
forces[i] += positions[(i, j)] * dE_ij + \
positions[(i, k)] * dE_ki # F = - dE/dx
forces[j] += positions[(j, k)] * dE_jk + \
positions[(j, i)] * dE_ij # F = - dE/dx
forces[k] += positions[(k, i)] * dE_ki + \
positions[(k, j)] * dE_jk # F = - dE/dx
potential_energies[
[i, j, k]] += energy # Energy of an atom is the sum of 1/3 of every triplet it is in
self.results['energy'] += np.sum(potential_energies)
self.results['forces'] += forces
def find_triplets(self, atoms):
'''Function that efficiently finds all of the valid triplets of atoms in the atoms object.
Returns:
indices (array): array containing the indices of atoms belonging to any valid triplet.
Has shape T by 3 where T is the number of valid triplets in the atoms object
distances (array): array containing the relative distances of every triplet of atoms.
Has shape T by 3 where T is the number of valid triplets in the atoms object
positions (dictionary): versor of position w.r.t. the central atom of every atom indexed in indices.
Has shape T by 3 where T is the number of valid triplets in the atoms object
'''
nl = self.nl
indices, distances, positions = [], [], dict()
for i in range(len(atoms)):
inds, pos, dists2 = nl.get_neighbors(i)
# Limitation
assert len(inds) is len(np.unique(inds)
), "There are repetitive indices!\n{}".format(inds)
# ignoring already visited atoms
inds, pos, dists2 = inds[inds >
i], pos[inds > i, :], dists2[inds > i]
dists = np.sqrt(dists2)
for local_ind, (j, pos_ij, dist_ij) in enumerate(zip(inds, pos, dists)):
# Caching local displacement vectors
positions[(i, j)], positions[(j, i)] = pos_ij / \
dist_ij, -pos_ij / dist_ij
for k, dist_ik in islice(zip(inds, dists), local_ind + 1, None):
try:
jk_ind = list(nl[j]).index(k)
except ValueError:
continue # no valid triplet
_, _, dists_j = nl.get_neighbors(j)
indices.append([i, j, k])
distances.append(
[dist_ij, np.sqrt(dists_j[jk_ind]), dist_ik])
return np.array(indices), np.array(distances), positions
class EamManySpecies(ManySpeciesMappedPotential):
"""A mapped Eam calculator for ase
Attributes:
grid_eam (object): list of 1D Spline interpolator, one for each element in elements
results(dict): energy and forces calculated on the atoms object
"""
def __init__(self, r_cut, elements, grids_eam, alpha, r0, **kwargs):
"""
Args:
elements (list): List of ordered atomic numbers of the mapped two species system.
grid_eam (list): list of 1D Spline interpolator, one for each element in elements
r_cut (float): cutoff radius
alpha (float): Exponential prefactor of the eam Descriptor
r0 (float): Radius in the exponent of the eam Descriptor
"""
super().__init__(r_cut, elements, **kwargs)
self.elements = list(np.sort(elements))
self.r_cut = r_cut
self.grids_eam = grids_eam
self.alpha = alpha
self.r0 = r0
def calculate(self, atoms=None, properties=('energy', 'forces'), system_changes=all_changes):
"""Do the calculation.
"""
super().calculate(atoms, properties, system_changes)
forces = np.zeros((len(self.atoms), 3))
potential_energies = np.zeros((len(self.atoms), 1))
elements = atoms.get_atomic_numbers()
for i in range(len(self.atoms)):
inds, pos, dists2 = self.nl.get_neighbors(i)
dist = np.sqrt(dists2)
norm = pos / dist.reshape(-1, 1)
descriptor, descriptor_der = eam_descriptor(
dist, norm, self.r_cut, self.alpha, self.r0)
energy_local = self.grids_eam[elements[i]](descriptor, nu=0)
fs_scalars = self.grids_eam[elements[i]](descriptor, nu=1)
potential_energies[i] = np.sum(energy_local, axis=0)
forces[i] = np.sum(
descriptor_der * fs_scalars.reshape(-1, 1), axis=0)
self.results['energy'] += np.sum(potential_energies)
self.results['forces'] += forces
class CombinedManySpecies(TwoBodyManySpecies, ThreeBodyManySpecies):
def __init__(self, r_cut, elements, grids_2b, grids_3b, **kwargs):
super().__init__(r_cut, elements, grids_2b=grids_2b, grids_3b=grids_3b, **kwargs)
class TwoThreeEamManySpecies(TwoBodyManySpecies, ThreeBodyManySpecies, EamManySpecies):
def __init__(self, r_cut, elements, grids_2b, grids_3b, grids_eam, alpha, r0, **kwargs):
super().__init__(r_cut = r_cut, elements= elements, grids_2b=grids_2b, grids_3b=grids_3b,
grids_eam=grids_eam, alpha=alpha, r0=r0, **kwargs)
if __name__ == '__main__':
from ase.io import read
# from mff.interpolation import Spline3D, Spline1D
logging.basicConfig(level=logging.INFO)
directory = Path('../test/data/Fe_vac')
filename = directory / 'movie.xyz'
traj = read(str(filename), index=slice(None))
# calc = TwoBodySingleSpecies(r_cut=4.45, grid_2b=grid_2b)
# calc = ThreeBodySingleSpecies(r_cut=4.45, grid_2b=grid_2b, grid_3b=grid_3b)
#
# atoms = traj[0]
# atoms.set_calculator(calc)
#
# f = atoms.get_forces()
# rms = np.sqrt(np.sum(np.square(atoms.arrays['force'] - atoms.get_forces()), axis=1))
# print('MAEF on forces: {:.4f} +- {:.4f}'.format(np.mean(rms), np.std(rms)))
#
# for atoms in traj[0:5]:
# atoms.set_calculator(calc)
#
# # print(atoms.arrays['force'])
# # print(atoms.get_forces())
#
# rms = np.sqrt(np.sum(np.square(atoms.arrays['force'] - atoms.get_forces()), axis=1))
# print('MAEF on forces: {:.4f} +- {:.4f}'.format(np.mean(rms), np.std(rms)))
#
| 25,108 | 37.275915 | 112 | py |
mff | mff-master/mff/interpolation/spline1d.py | import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
class Spline1D(InterpolatedUnivariateSpline):
def __init__(self, x_range, f):
"""
:param x_range: 1 dimensional array
:param f: 1-dimensional array
"""
super(Spline1D, self).__init__(x_range, f, k=3, ext=3)
def ev_all(self, x):
return self.ev_energy(x), self.ev_forces(x)
def ev_forces(self, x):
fs_scalars = super(Spline1D, self).__call__(x, nu=1)
return fs_scalars
def ev_energy(self, x):
energy_single = super(Spline1D, self).__call__(x, nu=0)
return energy_single
@classmethod
def load(cls, filename):
data = np.load(filename)
x_range, energies = data['x'], data['f']
return cls(x_range, energies)
def save(self, filename):
np.savez_compressed(filename, x=self._data[0], f=self._data[1])
if __name__ == '__main__':
from pathlib import Path
n = 101
x = np.linspace(0, 10, n)
f = np.random.rand(n)
s1 = Spline1D(x, f)
filename = Path('grid.npz')
s1.save(filename)
s2 = Spline1D.load(filename)
print(s1.ev_all(3))
print(s2.ev_all(3))
| 1,211 | 21.444444 | 71 | py |
mff | mff-master/mff/interpolation/__init__.py | from .spline1d import Spline1D
from .tricube_cpp import Spline3D
__all__ = [Spline1D, Spline3D]
| 97 | 18.6 | 33 | py |
mff | mff-master/mff/interpolation/tricube_fortran/__init__.py | import numpy as np
# noinspection PyUnresolvedReferences
from mff.interpolation.tricube_fortran import _tricube
class Spline3D(object):
def __init__(self, x, y, z, f):
assert f.shape == (x.size, y.size, z.size), 'dimensions do not match f'
assert np.all(np.diff(x) > 0) & np.all(np.diff(y) > 0) & np.all(np.diff(z) > 0), \
'x, y or z is not monotonically increasing'
self._xlim = np.array([x.min(), x.max()])
self._ylim = np.array([y.min(), y.max()])
self._zlim = np.array([z.min(), z.max()])
self._x = np.pad(x, pad_width=1, mode='constant',
constant_values=(2 * x[0] - x[1], 2 * x[-1] - x[-2]))
self._y = np.pad(y, pad_width=1, mode='constant',
constant_values=(2 * y[0] - y[1], 2 * y[-1] - y[-2]))
self._z = np.pad(z, pad_width=1, mode='constant',
constant_values=(2 * z[0] - z[1], 2 * z[-1] - z[-2]))
# boundary = 'clamped'
self._f = np.pad(f, pad_width=1, mode='edge')
def _check_bounds(self, x_new, y_new, z_new):
"""Check the inputs for being in the bounds of the interpolated data.
Args:
x_new (float array):
y_new (float array):
Returns:
out_of_bounds (Boolean array): The mask on x_new and y_new of
values that are NOT of bounds.
"""
below_bounds_x = x_new < self._xlim[0]
above_bounds_x = x_new > self._xlim[1]
below_bounds_y = y_new < self._ylim[0]
above_bounds_y = y_new > self._ylim[1]
below_bounds_z = z_new < self._zlim[0]
above_bounds_z = z_new > self._zlim[1]
# !! Could provide more information about which values are out of bounds
if np.any(below_bounds_x):
raise ValueError('A value in x is below the interpolation range.')
if np.any(above_bounds_x):
raise ValueError('A value in x is above the interpolation range.')
if np.any(below_bounds_y):
raise ValueError('A value in y is below the interpolation range.')
if np.any(above_bounds_y):
raise ValueError('A value in y is above the interpolation range.')
if np.any(below_bounds_z):
raise ValueError('A value in z is below the interpolation range.')
if np.any(above_bounds_z):
raise ValueError('A value in z is above the interpolation range.')
def ev_energy_fast(self, x, y, z):
val = _tricube.reg_ev_energy(
z, y, x, self._f, self._z, self._y, self._x)
return val[:, np.newaxis]
def ev_energy(self, xi, yi, zi):
x = np.atleast_1d(xi)
y = np.atleast_1d(yi)
z = np.atleast_1d(zi) # This will not modify x1,y1,z1.
self._check_bounds(x, y, z)
return self.ev_energy_fast(x, y, z)
def ev_forces_fast(self, x, y, z):
val_dx2, val_dx1, val_dx0 = _tricube.reg_ev_forces(
z, y, x, self._f, self._z, self._y, self._x)
return val_dx0[:, np.newaxis], val_dx1[:, np.newaxis], val_dx2[:, np.newaxis]
def ev_forces(self, xi, yi, zi):
x = np.atleast_1d(xi)
y = np.atleast_1d(yi)
z = np.atleast_1d(zi) # This will not modify x1,y1,z1.
self._check_bounds(x, y, z)
return self.ev_forces_fast(x, y, z)
def ev_all_fast(self, x, y, z):
val, val_dx2, val_dx1, val_dx0 = _tricube.reg_ev_all(
z, y, x, self._f, self._z, self._y, self._x)
return val[:, np.newaxis], val_dx0[:, np.newaxis], val_dx1[:, np.newaxis], val_dx2[:, np.newaxis]
def ev_all(self, xi, yi, zi):
x = np.atleast_1d(xi)
y = np.atleast_1d(yi)
z = np.atleast_1d(zi) # This will not modify x1,y1,z1.
self._check_bounds(x, y, z)
return self.ev_all_fast(x, y, z)
if __name__ == '__main__':
x, y, z = np.linspace(0, 2, 3), np.linspace(0, 2, 3), np.linspace(0, 2, 3)
f = np.linspace(0, 3 ** 3 - 1, 3 ** 3).reshape(3, 3, 3)
print(x, y, z)
print(f)
s = Spline3D(x, y, z, f)
print(f[0, 0, 0], s.ev_energy(0, 0, 0))
print(f[0, 0, 1], s.ev_energy(0, 0, 1))
print(s.ev_energy(0, 0, 0.5))
| 4,225 | 31.75969 | 105 | py |
mff | mff-master/mff/interpolation/tricube_cpp/__init__.py | import numpy as np
from mff.interpolation.tricube_cpp import _tricube
class Spline3D(object):
def __init__(self, x, y, z, f):
assert f.shape == (x.size, y.size, z.size), "dimensions do not match f"
assert np.all(np.diff(x) > 0) & np.all(np.diff(y) > 0) & np.all(np.diff(z) > 0), \
"x, y or z is not monotonically increasing"
self._xlim = np.array([x.min(), x.max()])
self._ylim = np.array([y.min(), y.max()])
self._zlim = np.array([z.min(), z.max()])
self._x = np.pad(x, pad_width=1, mode='constant',
constant_values=(2 * x[0] - x[1], 2 * x[-1] - x[-2]))
self._y = np.pad(y, pad_width=1, mode='constant',
constant_values=(2 * y[0] - y[1], 2 * y[-1] - y[-2]))
self._z = np.pad(z, pad_width=1, mode='constant',
constant_values=(2 * z[0] - z[1], 2 * z[-1] - z[-2]))
boundary = 'natural'
# self._f = np.pad(f, pad_width=1, mode='edge')
self._f = np.zeros(np.array(f.shape) + (2, 2, 2))
# place f in center, so that it is padded by unfilled values on all sides
self._f[1:-1, 1:-1, 1:-1] = f
if boundary == 'clamped':
# faces
self._f[(0, -1), 1:-1, 1:-1] = f[(0, -1), :, :]
self._f[1:-1, (0, -1), 1:-1] = f[:, (0, -1), :]
self._f[1:-1, 1:-1, (0, -1)] = f[:, :, (0, -1)]
# verticies
self._f[(0, 0, -1, -1), (0, -1, 0, -1), 1:-
1] = f[(0, 0, -1, -1), (0, -1, 0, -1), :]
self._f[(0, 0, -1, -1), 1:-1, (0, -1, 0, -1)
] = f[(0, 0, -1, -1), :, (0, -1, 0, -1)]
self._f[1:-1, (0, 0, -1, -1), (0, -1, 0, -1)] = f[:,
(0, 0, -1, -1), (0, -1, 0, -1)]
# corners
self._f[(0, 0, 0, 0, -1, -1, -1, -1), (0, 0, -1, -1, 0, 0, -1, -1), (0, -1, 0, -1, 0, -1, 0, -1)] = \
f[(0, 0, 0, 0, -1, -1, -1, -1), (0, 0, -1, -1,
0, 0, -1, -1), (0, -1, 0, -1, 0, -1, 0, -1)]
elif boundary == 'natural':
# faces
self._f[(0, -1), 1:-1, 1:-1] = 2 * \
f[(0, -1), :, :] - f[(1, -2), :, :]
self._f[1:-1, (0, -1), 1:-1] = 2 * \
f[:, (0, -1), :] - f[:, (1, -2), :]
self._f[1:-1, 1:-1, (0, -1)] = 2 * \
f[:, :, (0, -1)] - f[:, :, (1, -2)]
# verticies
self._f[(0, 0, -1, -1), (0, -1, 0, -1), 1:-1] = \
4 * f[(0, 0, -1, -1), (0, -1, 0, -1), :] - \
f[(1, 1, -2, -2), (0, -1, 0, -1), :] - \
f[(0, 0, -1, -1), (1, -2, 1, -2), :] - \
f[(1, 1, -2, -2), (1, -2, 1, -2), :]
self._f[(0, 0, -1, -1), 1:-1, (0, -1, 0, -1)] = \
4 * f[(0, 0, -1, -1), :, (0, -1, 0, -1)] - \
f[(1, 1, -2, -2), :, (0, -1, 0, -1)] - \
f[(0, 0, -1, -1), :, (1, -2, 1, -2)] - \
f[(1, 1, -2, -2), :, (1, -2, 1, -2)]
self._f[1:-1, (0, 0, -1, -1), (0, -1, 0, -1)] = \
4 * f[:, (0, 0, -1, -1), (0, -1, 0, -1)] - \
f[:, (1, 1, -2, -2), (0, -1, 0, -1)] - \
f[:, (0, 0, -1, -1), (1, -2, 1, -2)] - \
f[:, (1, 1, -2, -2), (1, -2, 1, -2)]
# corners
self._f[(0, 0, 0, 0, -1, -1, -1, -1), (0, 0, -1, -1, 0, 0, -1, -1), (0, -1, 0, -1, 0, -1, 0, -1)] = \
8 * f[(0, 0, 0, 0, -1, -1, -1, -1), (0, 0, -1, -1, 0, 0, -1, -1), (0, -1, 0, -1, 0, -1, 0, -1)] - \
f[(1, 1, 1, 1, -2, -2, -2, -2), (0, 0, -1, -1, 0, 0, -1, -1), (0, -1, 0, -1, 0, -1, 0, -1)] - \
f[(0, 0, 0, 0, -1, -1, -1, -1), (1, 1, -2, -2, 1, 1, -2, -2), (0, -1, 0, -1, 0, -1, 0, -1)] - \
f[(0, 0, 0, 0, -1, -1, -1, -1), (0, 0, -1, -1, 0, 0, -1, -1), (1, -2, 1, -2, 1, -2, 1, -2)] - \
f[(1, 1, 1, 1, -2, -2, -2, -2), (1, 1, -2, -2, 1, 1, -2, -2), (0, -1, 0, -1, 0, -1, 0, -1)] - \
f[(0, 0, 0, 0, -1, -1, -1, -1), (1, 1, -2, -2, 1, 1, -2, -2), (1, -2, 1, -2, 1, -2, 1, -2)] - \
f[(1, 1, 1, 1, -2, -2, -2, -2), (0, 0, -1, -1, 0, 0, -1, -1), (1, -2, 1, -2, 1, -2, 1, -2)] - \
f[(1, 1, 1, 1, -2, -2, -2, -2), (1, 1, -2, -2,
1, 1, -2, -2), (1, -2, 1, -2, 1, -2, 1, -2)]
@property
def data(self):
return self._f[1:-1, 1:-1, 1:-1]
def _check_bounds(self, x_new, y_new, z_new):
"""Check the inputs for being in the bounds of the interpolated data.
Args:
x_new (float array):
y_new (float array):
Returns:
out_of_bounds (Boolean array): The mask on x_new and y_new of
values that are NOT of bounds.
"""
below_bounds_x = x_new < self._xlim[0]
above_bounds_x = x_new > self._xlim[1]
below_bounds_y = y_new < self._ylim[0]
above_bounds_y = y_new > self._ylim[1]
below_bounds_z = z_new < self._zlim[0]
above_bounds_z = z_new > self._zlim[1]
# !! Could provide more information about which values are out of bounds
if np.any(below_bounds_x):
raise ValueError('A value in x is below the interpolation range.')
if np.any(above_bounds_x):
raise ValueError('A value in x is above the interpolation range.')
if np.any(below_bounds_y):
raise ValueError('A value in y is below the interpolation range.')
if np.any(above_bounds_y):
raise ValueError('A value in y is above the interpolation range.')
if np.any(below_bounds_z):
raise ValueError('A value in z is below the interpolation range.')
if np.any(above_bounds_z):
raise ValueError('A value in z is above the interpolation range.')
def ev_energy_fast(self, x, y, z):
val = _tricube.reg_ev_energy(
x, y, z, self._f, self._x, self._y, self._z)
return val[:, np.newaxis]
def ev_energy(self, xi, yi, zi):
x = np.atleast_1d(xi)
y = np.atleast_1d(yi)
z = np.atleast_1d(zi) # This will not modify x1,y1,z1.
self._check_bounds(x, y, z)
return self.ev_energy_fast(x, y, z)
def ev_forces_fast(self, x, y, z):
val_dx0, val_dx1, val_dx2 = _tricube.reg_ev_forces(
x, y, z, self._f, self._x, self._y, self._z)
return val_dx0[:, np.newaxis], val_dx1[:, np.newaxis], val_dx2[:, np.newaxis]
def ev_forces(self, xi, yi, zi):
x = np.atleast_1d(xi)
y = np.atleast_1d(yi)
z = np.atleast_1d(zi) # This will not modify x1,y1,z1.
self._check_bounds(x, y, z)
return self.ev_forces_fast(x, y, z)
def ev_all_fast(self, x, y, z):
val, val_dx0, val_dx1, val_dx2 = _tricube.reg_ev_all(
x, y, z, self._f, self._x, self._y, self._z)
return val[:, np.newaxis], val_dx0[:, np.newaxis], val_dx1[:, np.newaxis], val_dx2[:, np.newaxis]
def ev_all(self, xi, yi, zi):
x = np.atleast_1d(xi)
y = np.atleast_1d(yi)
z = np.atleast_1d(zi) # This will not modify x1,y1,z1.
self._check_bounds(x, y, z)
return self.ev_all_fast(x, y, z)
def __call__(self, xi, yi, zi, nu=0):
if nu == 0:
return self.ev_energy(xi, yi, zi)
elif nu == 1:
return self.ev_forces(xi, yi, zi)
else:
raise NotImplementedError()
@classmethod
def load(cls, filename):
data = np.load(filename)
x, y, z, energies = data['x'], data['y'], data['z'], data['f']
return cls(x, y, z, energies)
def save(self, filename):
np.savez_compressed(
filename, x=self._x[1:-1], y=self._y[1:-1], z=self._z[1:-1], f=self._f[1:-1, 1:-1, 1:-1])
if __name__ == '__main__':
from pathlib import Path
n = 10
x, y, z = np.linspace(0, 10, n), np.linspace(
0, 10, n), np.linspace(0, 10, n)
f = np.random.rand(n, n, n)
g1 = Spline3D(x, y, z, f)
filename = Path('grid.npz')
g1.save(filename)
g2 = Spline3D.load(filename)
print(g1.ev_all(3., 4.5, 6.))
print(g2.ev_all(3., 4.5, 6.))
# x, y, z = np.linspace(0, 2, 3), np.linspace(0, 2, 3), np.linspace(0, 2, 3)
# f = np.linspace(0, 3 ** 3 - 1, 3 ** 3).reshape(3, 3, 3)
#
# print(x, y, z)
# print(f)
#
# s = Spline3D(x, y, z, f)
#
# print(f[0, 0, 0], s.ev_energy(0, 0, 0))
# print(f[0, 0, 1], s.ev_energy(0, 0, 1))
#
# print(s.ev_energy(0, 0, 0.5))
# x, y, z = np.linspace(1, 2, 3), np.linspace(1, 3, 3), np.linspace(1, 4, 4)
# print(x,y,z)
# f = x[:, np.newaxis, np.newaxis] * y[np.newaxis, :, np.newaxis]**2 * z[np.newaxis, np.newaxis, :]**3
# print(f.shape)
# print(type(f))
# print(f)
#
# s = Spline3D(x, y, z, f)
# for xi in range(len(x)):
# for yi in range(len(y)):
# for zi in range(len(z)):
#
# print(xi, yi, zi, f[xi, yi, zi], s.ev_energy([x[xi]], [y[yi]], [z[zi]]))
#
#
# print((0*z+x[1])*(0*z+y[1])**2 * z**3)
# print(s.ev_energy(0*z+x[1], 0*z+y[1], z))
#
# x, y, z = np.linspace(1, 4, 4), np.linspace(1, 5, 5), np.linspace(1, 6, 6)
# print(x, y, z)
# f = x[:, np.newaxis, np.newaxis] + y[np.newaxis, :, np.newaxis] ** 2 + z[np.newaxis, np.newaxis, :] ** 2
#
# s = Spline3D(x, y, z, f)
#
# for xi in range(len(x)):
# for yi in range(len(y)):
# for zi in range(len(z)):
# dx = 1
# dy = 2 * y[yi]
# dz = 2 * z[zi]
# print(xi, yi, zi, f[xi, yi, zi], dx, dy, dz, *s.ev_all(x[xi], y[yi], z[zi]))
| 9,856 | 37.205426 | 115 | py |
mff | mff-master/mff/models/base.py | # -*- coding: utf-8 -*-
from abc import ABCMeta
from pathlib import Path
class Model(metaclass=ABCMeta):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.grid = dict()
def save(self, path: Path):
pass
@classmethod
def load(cls):
pass
@property
def parameters(self):
return dict()
| 379 | 14.833333 | 41 | py |
mff | mff-master/mff/models/twothreeeam.py | # -*- coding: utf-8 -*-
import json
import logging
import warnings
from itertools import combinations_with_replacement
from pathlib import Path
import numpy as np
from mff import gp, interpolation, kernels, utility, models
from .base import Model
logger = logging.getLogger(__name__)
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def get_max_eam(X, rc, r0):
t_max = 0
for c in X:
dist = np.sum(c[:, :3]**2, axis=1)**0.5
cut_1 = 0.5*(1 + np.cos(np.pi*dist/rc))
t1 = np.exp(-(dist/r0 - 1))
t2 = -sum(cut_1*t1)**0.5
if t2 < t_max:
t_max = t2
return t_max
def get_max_eam_energy(X_glob, rc, r0):
t_max = 0
for X in X_glob:
t2 = get_max_eam(X, rc, r0)
if t2 < t_max:
t_max = t2
return t_max
class TwoThreeEamSingleSpeciesModel(Model):
""" 2-, 3-body and eam single species model class
Class managing the Gaussian processes and their mapped counterparts
Args:
element (int): The atomic number of the element considered
r_cut (float): The cutoff radius used to carve the atomic environments
sigma_2b (float): Lengthscale parameter of the 2-body Gaussian process
sigma_3b (float): Lengthscale parameter of the 3-body Gaussian process
sigma_3b (float): Lengthscale parameter of the EAM Gaussian process
theta_2b (float): decay ratio of the cutoff function in the 2-body Gaussian Process
theta_3b (float): decay ratio of the cutoff function in the 3-body Gaussian Process
r0 (float): distance parameter of the EAM kernel
noise (float): noise value associated with the training output data
max_grid_eam (float): Maximum (negative) value of the EAM descriptor
Attributes:
gp_2b (method): The 2-body single species Gaussian Process
gp_3b (method): The 3-body single species Gaussian Process
gp_eam (method): The eam single species Gaussian Process
grid_2b (method): The 2-body single species tabulated potential
grid_3b (method): The 3-body single species tabulated potential
grid_eam (method): The eam single species tabulated potential
grid_start (float): Minimum atomic distance for which the grids are defined (cannot be 0.0)
grid_num (int): Number of points per side used to create the 2- and 3-body grid. The 3-body
grid is 3-dimensional, therefore its total number of grid points will be grid_num^3
grid_start_eam (float): Start of the tabulated potential for EAM descriptor
grid_end_eam (float): End of the tabulated potential for EAM descriptor
grid_num_eam (float): Number of points per of the tabulated potential for EAM descriptor
"""
def __init__(self, element, r_cut, sigma_2b, sigma_3b, sigma_eam, theta_2b, theta_3b,
r0, noise, max_grid_eam = 0, rep_sig=1, **kwargs):
super().__init__()
self.element = element
self.r_cut = r_cut
self.rep_sig = rep_sig
self.max_grid_eam = max_grid_eam
kernel_2b = kernels.TwoBodySingleSpeciesKernel(
theta=[sigma_2b, theta_2b, r_cut])
self.gp_2b = gp.GaussianProcess(
kernel=kernel_2b, noise=noise, **kwargs)
kernel_3b = kernels.ThreeBodySingleSpeciesKernel(
theta=[sigma_3b, theta_3b, r_cut])
self.gp_3b = gp.GaussianProcess(
kernel=kernel_3b, noise=noise, **kwargs)
kernel_eam = kernels.EamSingleSpeciesKernel(
theta=[sigma_eam, r_cut, r0])
self.gp_eam = gp.GaussianProcess(
kernel=kernel_eam, noise=noise, **kwargs)
self.grid_2b, self.grid_3b, self.grid_eam, self.grid_start, self.grid_num = None, None, None, None, None
self.grid_start_eam, self.grid_end_eam, self.grid_num_eam = None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a 2- and
3-body single species force-force kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training forces and the 2-body predictions of force on the
training configurations
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = ("models/MODEL_combined_ntr_%i.json" %(len(forces)))
try:
model_comb = models.CombinedSingleSpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_comb.rep_sig
self.gp_2b = model_comb.gp_2b
self.gp_3b = model_comb.gp_3b
if self.rep_sig:
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded combined model to bootstart training")
combined_forces = model_comb.predict(confs, ncores=ncores)
self.gp_eam.fit(confs, forces - combined_forces, ncores=ncores)
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit(confs, forces, ncores=ncores)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
self.gp_3b.fit(confs, forces - two_body_forces, ncores=ncores)
three_body_forces = self.gp_3b.predict(confs, ncores=ncores)
self.gp_eam.fit(confs, forces - two_body_forces -
three_body_forces, ncores=ncores)
self.max_grid_eam = get_max_eam(self.gp_eam.X_train_, self.r_cut,
self.gp_eam.kernel.theta[2])
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species energy-energy kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training energies and the 2-body predictions of energies on the
training configurations.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = ("models/MODEL_combined_ntr_%i.json" %(len(energies)))
try:
model_comb = models.CombinedSingleSpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_comb.rep_sig
self.gp_2b = model_comb.gp_2b
self.gp_3b = model_comb.gp_3b
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(glob_confs, self.rep_sig)
energies -= self.rep_energies
print("Loaded combined model to bootstart training")
combined_energies = model_comb.predict_energy(glob_confs, ncores=ncores)
self.gp_eam.fit_energy(glob_confs, energies - combined_energies, ncores=ncores)
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(glob_confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.gp_2b.fit_energy(glob_confs, energies, ncores=ncores)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_energy(glob_confs, energies -
two_body_energies, ncores=ncores)
three_body_energies = self.gp_3b.predict_energy(
glob_confs, ncores=ncores)
self.gp_eam.fit_energy(glob_confs, energies - two_body_energies -
three_body_energies, ncores=ncores)
self.max_grid_eam = get_max_eam_energy(self.gp_eam.X_glob_train_, self.r_cut,
self.gp_eam.kernel.theta[2])
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species force-force, energy-energy, and energy-forces kernel
functions. The 2-body Gaussian process is first fitted, then the 3-body GP
is fitted to the difference between the training energies (and forces) and
the 2-body predictions of energies (and forces) on the training configurations.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = ("models/MODEL_combined_ntr_%i.json" %(len(energies)+len(forces)))
try:
model_comb = models.CombinedSingleSpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_comb.rep_sig
self.gp_2b = model_comb.gp_2b
self.gp_3b = model_comb.gp_3b
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded combined model to bootstart training")
combined_forces = model_comb.predict(confs, ncores=ncores)
combined_energies = model_comb.predict_energy(glob_confs, ncores=ncores)
self.gp_eam.fit_force_and_energy(confs, forces - combined_forces,
glob_confs, energies - combined_energies, ncores=ncores)
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_force_and_energy(
confs, forces - two_body_forces, glob_confs, energies - two_body_energies, ncores=ncores)
three_body_forces = self.gp_3b.predict(confs, ncores=ncores)
three_body_energies = self.gp_3b.predict_energy(
glob_confs, ncores=ncores)
self.gp_eam.fit_force_and_energy(confs, forces - two_body_forces - three_body_forces,
glob_confs, energies - two_body_energies - three_body_energies, ncores=ncores)
self.max_grid_eam = get_max_eam(self.gp_eam.X_train_, self.r_cut,
self.gp_eam.kernel.theta[2])
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GPs
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
force_2b, std_2b = self.gp_2b.predict(confs, return_std)
force_2b += rep_forces
else:
force_2b, std_2b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
force_3b, std_3b = self.gp_3b.predict(
confs, return_std, ncores=ncores)
force_eam, std_eam = self.gp_eam.predict(
confs, return_std, ncores=ncores)
return force_2b + force_3b + force_eam, std_2b + std_3b + std_eam
else:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
return self.gp_2b.predict(confs, return_std, ncores=ncores) + rep_forces + \
self.gp_3b.predict(confs, return_std, ncores=ncores) + \
self.gp_eam.predict(confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict(confs, return_std, ncores=ncores) + \
self.gp_3b.predict(confs, return_std, ncores=ncores) + \
self.gp_eam.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the local energies of the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energy_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_2b += rep_energies
else:
energy_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_3b, std_3b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_eam, std_eam = self.gp_eam.predict_energy(
glob_confs, return_std, ncores=ncores)
return energy_2b + energy_3b + energy_eam, std_2b + std_3b + std_eam
else:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
return self.gp_2b.predict_energy(glob_confs, return_std) + rep_energies +\
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores) +\
self.gp_eam.predict_energy(
glob_confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict_energy(glob_confs, return_std, ncores=ncores) + \
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores) +\
self.gp_eam.predict_energy(
glob_confs, return_std, ncores=ncores)
def build_grid(self, start, num_2b, num_3b, num_eam, ncores=1):
""" Build the mapped 2- and 3-body potentials.
Calculates the energy predicted by the GP for two and three atoms at all possible combination
of num distances ranging from start to r_cut. The energy for the 3-body mapped grid is
calculated only for ``valid`` triplets of atoms, i.e. sets of three distances which
form a triangle (this is checked via the triangle inequality).
The grid building exploits all the permutation invariances to reduce the number of energy
calculations needed to fill the grid.
The computed 2-body energies are stored in an array of values, and a 1D spline interpolation is created.
The computed 3-body energies are stored in a 3D cube of values, and a 3D spline interpolation is
created.
The total force or local energy can then be calculated for any atom by summing the pairwise and
triplet contributions of every valid couple and triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module, which is built to work within
the ase python package.
Args:
start (float): smallest interatomic distance for which the energy is predicted
by the GP and stored inn the 3-body mapped potential
num_2b (int): number of points to use in the grid of the 2-body mapped potential
num_3b (int): number of points to use to generate the list of distances used to
generate the triplets of atoms for the 2-body mapped potential
num_eam (int): number of points to use in the grid of the eam mapped potential
ncores (int): number of CPUs to use to calculate the energy predictions
"""
dists_2b = np.linspace(start, self.r_cut, num_2b)
confs = np.zeros((num_2b, 1, 5))
confs[:, 0, 0] = dists_2b
confs[:, 0, 3], confs[:, 0, 4] = self.element, self.element
grid_data = self.gp_2b.predict_energy(
confs, ncores=ncores, mapping=True)
if self.rep_sig:
grid_data += utility.get_repulsive_energies(
confs, self.rep_sig, mapping=True)
grid_2b = interpolation.Spline1D(dists_2b, grid_data)
# Mapping 3 body part
dists_3b = np.linspace(start, self.r_cut, num_3b)
inds, r_ij_x, r_ki_x, r_ki_y = self.generate_triplets(dists_3b)
confs = np.zeros((len(r_ij_x), 2, 5))
confs[:, 0, 0] = r_ij_x # Element on the x axis
confs[:, 1, 0] = r_ki_x # Reshape into confs shape: this is x2
confs[:, 1, 1] = r_ki_y # Reshape into confs shape: this is y2
# Permutations of elements
confs[:, :, 3] = self.element # Central element is always element 1
# Element on the x axis is always element 2
confs[:, 0, 4] = self.element
# Element on the xy plane is always element 3
confs[:, 1, 4] = self.element
grid_3b = np.zeros((num_3b, num_3b, num_3b))
grid_3b[inds] = self.gp_3b.predict_energy(
confs, ncores=ncores, mapping=True).flatten()
for ind_i in range(num_3b):
for ind_j in range(ind_i + 1):
for ind_k in range(ind_j + 1):
grid_3b[ind_i, ind_k, ind_j] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_j, ind_i, ind_k] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_j, ind_k, ind_i] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_k, ind_i, ind_j] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_k, ind_j, ind_i] = grid_3b[ind_i, ind_j, ind_k]
grid_3b = interpolation.Spline3D(dists_3b, dists_3b, dists_3b, grid_3b)
self.grid_start_eam = 3.0 * self.max_grid_eam
self.grid_end_eam = 0
self.grid_num_eam = num_eam
dists = list(np.linspace(self.grid_start_eam,
self.grid_end_eam, self.grid_num_eam))
grid_data = self.gp_eam.predict_energy(
dists, ncores=ncores, mapping=True)
grid_eam = interpolation.Spline1D(dists, grid_data)
self.grid_2b = grid_2b
self.grid_3b = grid_3b
self.grid_eam = grid_eam
self.grid_num_2b = num_2b
self.grid_num_3b = num_3b
self.grid_start = start
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potentials, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
### SAVE THE 2B MODEL ###
params = {
'model': self.__class__.__name__,
'element': self.element,
'r_cut': self.r_cut,
'rep_sig': self.rep_sig,
'fitted': self.gp_2b.fitted,
'gp_2b': {
'kernel': self.gp_2b.kernel.kernel_name,
'n_train': self.gp_2b.n_train,
'sigma': self.gp_2b.kernel.theta[0],
'theta': self.gp_2b.kernel.theta[1],
'noise': self.gp_2b.noise
},
'gp_3b': {
'kernel': self.gp_3b.kernel.kernel_name,
'n_train': self.gp_3b.n_train,
'sigma': self.gp_3b.kernel.theta[0],
'theta': self.gp_3b.kernel.theta[1],
'noise': self.gp_3b.noise
},
'gp_eam': {
'kernel': self.gp_eam.kernel.kernel_name,
'n_train': self.gp_eam.n_train,
'sigma': self.gp_eam.kernel.theta[0],
'r0': self.gp_eam.kernel.theta[2],
'max_eam': self.max_grid_eam,
'noise': self.gp_eam.noise
},
'grid_2b': {
'r_min': self.grid_start,
'r_num': self.grid_num_2b,
'filename': {}
} if self.grid_2b else {},
'grid_3b': {
'r_min': self.grid_start,
'r_num': self.grid_num_3b,
'filename': {}
} if self.grid_3b else {},
'grid_eam': {
'r_min': self.grid_start_eam,
'r_max': self.grid_end_eam,
'r_num': self.grid_num_eam,
'filename': {}
} if self.grid_eam else {}
}
gp_filename_2b = "GP_ker_{p[gp_2b][kernel]}_ntr_{p[gp_2b][n_train]}.npy".format(
p=params)
params['gp_2b']['filename'] = gp_filename_2b
self.gp_2b.save(path / gp_filename_2b)
if self.grid_2b:
grid_filename_2b = "GRID_ker_{p[gp_2b][kernel]}_ntr_{p[gp_2b][n_train]}.npz".format(
p=params)
print("Saved 2-body grid under name %s" % (grid_filename_2b))
params['grid_2b']['filename'] = grid_filename_2b
self.grid_2b.save(path / grid_filename_2b)
### SAVE THE 3B MODEL ###
gp_filename_3b = "GP_ker_{p[gp_3b][kernel]}_ntr_{p[gp_3b][n_train]}.npy".format(
p=params)
params['gp_3b']['filename'] = gp_filename_3b
self.gp_3b.save(path / gp_filename_3b)
if self.grid_3b:
grid_filename_3b = "GRID_ker_{p[gp_3b][kernel]}_ntr_{p[gp_3b][n_train]}.npz".format(
p=params)
print("Saved 3-body grid under name %s" % (grid_filename_3b))
params['grid_3b']['filename'] = grid_filename_3b
self.grid_3b.save(path / grid_filename_3b)
### SAVE THE EAM MODEL ###
gp_filename_eam = "GP_ker_{p[gp_eam][kernel]}_ntr_{p[gp_eam][n_train]}.npy".format(
p=params)
params['gp_eam']['filename'] = gp_filename_eam
self.gp_eam.save(path / gp_filename_eam)
if self.grid_eam:
grid_filename_eam = 'GRID_ker_{p[gp_eam][kernel]}_ntr_{p[gp_eam][n_train]}.npz'.format(
p=params)
print("Saved eam grid under name %s" % (grid_filename_eam))
params['grid_eam']['filename'] = grid_filename_eam
self.grid_eam.save(path / grid_filename_eam)
with open(path / "MODEL_23eam_ntr_{p[gp_2b][n_train]}.json".format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_23eam_ntr_{p[gp_2b][n_train]}.json".format(
p=params))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GPs and the mapped potentials, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['element'],
params['r_cut'],
params['gp_2b']['sigma'],
params['gp_3b']['sigma'],
params['gp_eam']['sigma'],
params['gp_2b']['theta'],
params['gp_3b']['theta'],
params['gp_eam']['r0'],
params['gp_2b']['noise'],
params['gp_eam']['max_eam'],
params['rep_sig'])
gp_filename_2b = params['gp_2b']['filename']
gp_filename_3b = params['gp_3b']['filename']
gp_filename_eam = params['gp_eam']['filename']
try:
model.gp_2b.load(directory / gp_filename_2b)
except:
warnings.warn("The 2-body GP file is missing")
pass
try:
model.gp_3b.load(directory / gp_filename_3b)
except:
warnings.warn("The 3-body GP file is missing")
pass
try:
model.gp_eam.load(directory / gp_filename_eam)
except:
warnings.warn("The EAM GP file is missing")
pass
if params['grid_2b']:
grid_filename_2b = params['grid_2b']['filename']
model.grid_2b = interpolation.Spline1D.load(
directory / grid_filename_2b)
grid_filename_3b = params['grid_3b']['filename']
model.grid_3b = interpolation.Spline3D.load(
directory / grid_filename_3b)
grid_filename_eam = params['grid_eam']['filename']
model.grid_eam = interpolation.Spline1D.load(
directory / grid_filename_eam)
model.grid_start = params['grid_2b']['r_min']
model.grid_num_2b = params['grid_2b']['r_num']
model.grid_num_3b = params['grid_3b']['r_num']
model.grid_start_eam = params['grid_eam']['r_min']
model.grid_end_eam = params['grid_eam']['r_max']
model.grid_num_eam = params['grid_eam']['r_num']
return model
def save_gp(self, filename_2b, filename_3b, filename_eam):
""" Saves the GP objects, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp_2b.save(filename_2b)
self.gp_3b.save(filename_3b)
self.gp_eam.save(filename_eam)
def load_gp(self, filename_2b, filename_3b, filename_eam):
""" Loads the GP objects, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp_2b.load(filename_2b)
self.gp_3b.load(filename_3b)
self.gp_eam.load(filename_eam)
@staticmethod
def generate_triplets(dists):
""" Generate a list of all valid triplets using perutational invariance.
Calculates the energy predicted by the GP for three atoms at all possible combination
of num distances ranging from start to r_cut. The energy is calculated only for ``valid``
triplets of atoms, i.e. sets of three distances which form a triangle (this is checked via
the triangle inequality). The grid building exploits all the permutation invariances to
reduce the number of energy calculations needed to fill the grid.
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
dists (array): array of floats containing all of the distances which can be used to
build triplets of atoms. This array is created by calling np.linspace(start, r_cut, num)
Returns:
inds (array): array of booleans indicating which triplets (three distance values) need to be
evaluated to fill the 3D grid of energy values.
r_ij_x (array): array containing the x coordinate of the second atom j w.r.t. the central atom i
r_ki_x (array): array containing the x coordinate of the third atom k w.r.t. the central atom i
r_ki_y (array): array containing the y coordinate of the third atom k w.r.t. the central atom i
"""
d_ij, d_jk, d_ki = np.meshgrid(
dists, dists, dists, indexing='ij', sparse=False, copy=True)
# Valid triangles according to triangle inequality
inds = np.logical_and(
d_ij <= d_jk + d_ki, np.logical_and(d_jk <= d_ki + d_ij, d_ki <= d_ij + d_jk))
# Utilizing permutation invariance
inds = np.logical_and(np.logical_and(d_ij >= d_jk, d_jk >= d_ki), inds)
# Element on the x axis
r_ij_x = d_ij[inds]
# Element on the xy plane
r_ki_x = (d_ij[inds] ** 2 - d_jk[inds] ** 2 +
d_ki[inds] ** 2) / (2 * d_ij[inds])
# using abs to avoid numerical error near to 0
r_ki_y = np.sqrt(np.abs(d_ki[inds] ** 2 - r_ki_x ** 2))
return inds, r_ij_x, r_ki_x, r_ki_y
class TwoThreeEamManySpeciesModel(Model):
""" 2-, 3-body and EAM many species model class
Class managing the Gaussian processes and their mapped counterparts
Args:
elements (int): The atomic numbers of the element considered
r_cut (float): The cutoff radius used to carve the atomic environments
sigma_2b (float): Lengthscale parameter of the 2-body Gaussian process
sigma_3b (fofloatat): Lengthscale parameter of the 3-body Gaussian process
sigma_eam (float): Lengthscale parameter of the EAM Gaussian process
theta_2b (float): decay ratio of the cutoff function in the 2-body Gaussian Process
theta_3b (float): decay ratio of the cutoff function in the 3-body Gaussian Process
max_grid_eam (float): Maximum (negative) value of the EAM descriptor
r0 (float): distance parameter of the EAM kernel
noise (float): noise value associated with the training output data
Attributes:
gp_2b (method): The 2-body many species Gaussian Process
gp_3b (method): The 3-body many species Gaussian Process
gp_eam (method): The eam many species Gaussian Process
grid_2b (method): The 2-body many species tabulated potentials
grid_3b (method): The 3-body many species tabulated potentials
grid_eam (method): The eam many species tabulated potentials
grid_start (float): Minimum atomic distance for which the grids are defined (cannot be 0.0)
grid_num (int): Number of points per side used to create the 2- and 3-body grid. The 3-body
grid is 3-dimensional, therefore its total number of grid points will be grid_num^3
grid_start_eam (float): Start of the tabulated potential for EAM descriptor
grid_end_eam (float): End of the tabulated potential for EAM descriptor
grid_num_eam (float): Number of points per of the tabulated potential for EAM descriptor
"""
def __init__(self, elements, r_cut, sigma_2b, sigma_3b, sigma_eam, theta_2b, theta_3b,
r0, noise, max_grid_eam = 0, rep_sig = 1, **kwargs):
super().__init__()
self.elements = list(np.sort(elements))
self.r_cut = r_cut
self.rep_sig = rep_sig
self.max_grid_eam = max_grid_eam
kernel_2b = kernels.TwoBodyManySpeciesKernel(
theta=[sigma_2b, theta_2b, r_cut])
self.gp_2b = gp.GaussianProcess(
kernel=kernel_2b, noise=noise, **kwargs)
kernel_3b = kernels.ThreeBodyManySpeciesKernel(
theta=[sigma_3b, theta_3b, r_cut])
self.gp_3b = gp.GaussianProcess(
kernel=kernel_3b, noise=noise, **kwargs)
kernel_eam = kernels.EamManySpeciesKernel(
theta=[sigma_eam, r_cut, r0])
self.gp_eam = gp.GaussianProcess(
kernel=kernel_eam, noise=noise, **kwargs)
self.grid_2b, self.grid_3b, self.grid_eam, self.grid_start, self.grid_start_eam = {}, {}, {}, None, None
self.grid_num_2b, self.grid_num_3b, self.grid_end_eam, self.grid_num_eam = None, None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a 2- and
3-body single species force-force kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training forces and the 2-body predictions of force on the
training configurations
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = ("models/MODEL_combined_ntr_%i.json" %(len(forces)))
try:
model_comb = models.CombinedManySpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_comb.rep_sig
self.gp_2b = model_comb.gp_2b
self.gp_3b = model_comb.gp_3b
if self.rep_sig:
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded combined model to bootstart training")
combined_forces = model_comb.predict(confs, ncores=ncores)
self.gp_eam.fit(confs, forces - combined_forces, ncores=ncores)
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit(confs, forces, ncores=ncores)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
self.gp_3b.fit(confs, forces - two_body_forces, ncores=ncores)
three_body_forces = self.gp_3b.predict(confs, ncores=ncores)
self.gp_eam.fit(confs, forces - two_body_forces -
three_body_forces, ncores=ncores)
self.max_grid_eam = get_max_eam(self.gp_eam.X_train_, self.r_cut,
self.gp_eam.kernel.theta[2])
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species energy-energy kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training energies and the 2-body predictions of energies on the
training configurations.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = ("models/MODEL_combined_ntr_%i.json" %(len(energies)))
try:
model_comb = models.CombinedManySpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_comb.rep_sig
self.gp_2b = model_comb.gp_2b
self.gp_3b = model_comb.gp_3b
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(glob_confs, self.rep_sig)
energies -= self.rep_energies
print("Loaded combined model to bootstart training")
combined_energies = model_comb.predict_energy(glob_confs, ncores=ncores)
self.gp_eam.fit_energy(glob_confs, energies - combined_energies, ncores=ncores)
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(glob_confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.gp_2b.fit_energy(glob_confs, energies, ncores=ncores)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_energy(glob_confs, energies -
two_body_energies, ncores=ncores)
three_body_energies = self.gp_3b.predict_energy(
glob_confs, ncores=ncores)
self.gp_eam.fit_energy(glob_confs, energies - two_body_energies -
three_body_energies, ncores=ncores)
self.max_grid_eam = get_max_eam_energy(self.gp_eam.X_glob_train_, self.r_cut,
self.gp_eam.kernel.theta[2])
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species force-force, energy-energy, and energy-forces kernel
functions. The 2-body Gaussian process is first fitted, then the 3-body GP
is fitted to the difference between the training energies (and forces) and
the 2-body predictions of energies (and forces) on the training configurations.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = ("models/MODEL_combined_ntr_%i.json" %(len(energies)+len(forces)))
try:
model_comb = models.CombinedManySpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_comb.rep_sig
self.gp_2b = model_comb.gp_2b
self.gp_3b = model_comb.gp_3b
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded combined model to bootstart training")
combined_forces = model_comb.predict(confs, ncores=ncores)
combined_energies = model_comb.predict_energy(glob_confs, ncores=ncores)
self.gp_eam.fit_force_and_energy(confs, forces - combined_forces,
glob_confs, energies - combined_energies, ncores=ncores)
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_force_and_energy(
confs, forces - two_body_forces, glob_confs, energies - two_body_energies, ncores=ncores)
three_body_forces = self.gp_3b.predict(confs, ncores=ncores)
three_body_energies = self.gp_3b.predict_energy(
glob_confs, ncores=ncores)
self.gp_eam.fit_force_and_energy(confs, forces - two_body_forces - three_body_forces,
glob_confs, energies - two_body_energies - three_body_energies, ncores=ncores)
self.max_grid_eam = get_max_eam(self.gp_eam.X_train_, self.r_cut,
self.gp_eam.kernel.theta[2])
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GPs
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
force_2b, std_2b = self.gp_2b.predict(confs, return_std)
force_2b += rep_forces
else:
force_2b, std_2b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
force_3b, std_3b = self.gp_3b.predict(
confs, return_std, ncores=ncores)
force_eam, std_eam = self.gp_eam.predict(
confs, return_std, ncores=ncores)
return force_2b + force_3b + force_eam, std_2b + std_3b + std_eam
else:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
return self.gp_2b.predict(confs, return_std, ncores=ncores) + rep_forces + \
self.gp_3b.predict(confs, return_std, ncores=ncores) + \
self.gp_eam.predict(confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict(confs, return_std, ncores=ncores) + \
self.gp_3b.predict(confs, return_std, ncores=ncores) + \
self.gp_eam.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the local energies of the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energy_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_2b += rep_energies
else:
energy_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_3b, std_3b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_eam, std_eam = self.gp_eam.predict_energy(
glob_confs, return_std, ncores=ncores)
return energy_2b + energy_3b + energy_eam, std_2b + std_3b + std_eam
else:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
return self.gp_2b.predict_energy(glob_confs, return_std) + rep_energies +\
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores) +\
self.gp_eam.predict_energy(
glob_confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict_energy(glob_confs, return_std, ncores=ncores) + \
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores) +\
self.gp_eam.predict_energy(
glob_confs, return_std, ncores=ncores)
def build_grid(self, start, num_2b, num_3b, num_eam, ncores=1):
"""Function used to create the three different 2-body energy grids for
atoms of elements 0-0, 0-1, and 1-1, and the four different 3-body energy grids for
atoms of elements 0-0-0, 0-0-1, 0-1-1, and 1-1-1. The function calls the
``build_grid_3b`` function for each of the 3-body grids to build.
Args:
start (float): smallest interatomic distance for which the energy is predicted
by the GP and stored inn the 3-body mapped potential
nnum_2bum (int): number of points to use in the grid of the 2-body mapped potentials
num_3b (int): number of points to use to generate the list of distances used to
generate the triplets of atoms for the 3-body mapped potentials
num_eam (int): number of points to use in the grid of the eam mapped potentials
ncores (int): number of CPUs to use to calculate the energy predictions
"""
self.grid_start = start
self.grid_num_2b = num_2b
self.grid_num_3b = num_2b
self.grid_num_eam = num_eam
self.grid_start_eam = 3.0 * self.max_grid_eam
self.grid_end = 0
perm_list_2b = list(combinations_with_replacement(self.elements, 2))
perm_list_3b = list(combinations_with_replacement(self.elements, 3))
dists_2b = np.linspace(start, self.r_cut, num_2b)
confs_2b = np.zeros((num_2b, 1, 5))
confs_2b[:, 0, 0] = dists_2b
for pair in perm_list_2b: # in this for loop, predicting then save for each individual one
confs_2b[:, 0, 3], confs_2b[:, 0,
4] = pair[0], pair[1]
mapped_energies = self.gp_2b.predict_energy(
confs_2b, ncores=ncores, mapping=True)
if self.rep_sig:
mapped_energies += utility.get_repulsive_energies(
confs_2b, self.rep_sig, mapping=True)
self.grid_2b[pair] = interpolation.Spline1D(
dists_2b, mapped_energies)
dists_3b = np.linspace(start, self.r_cut, num_3b)
for trip in perm_list_3b:
self.grid_3b[trip] = self.build_grid_3b(
dists_3b, trip[0], trip[1], trip[2], ncores=ncores)
dists_eam = list(np.linspace(self.grid_start_eam,
self.grid_end, self.grid_num_eam))
for el in self.elements:
grid_data = self.gp_eam.predict_energy(
dists_eam, ncores=ncores, mapping=True, alpha_1_descr=el)
self.grid_eam[(el)] = interpolation.Spline1D(dists_eam, grid_data)
def build_grid_3b(self, dists, element_k, element_i, element_j, ncores=1):
""" Build a mapped 3-body potential.
Calculates the energy predicted by the GP for three atoms of elements element_i, element_j, element_k,
at all possible combinations of num distances ranging from start to r_cut.
The energy is calculated only for ``valid`` triplets of atoms, i.e. sets of three distances
which form a triangle (this is checked via the triangle inequality), found by calling the
``generate_triplets_with_permutation_invariance`` function.
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
dists (array): array of floats containing all of the distances which can be used to
build triplets of atoms. This array is created by calling np.linspace(start, r_cut, num)
element_i (int): atomic number of the central atom i in a triplet
element_j (int): atomic number of the second atom j in a triplet
element_k (int): atomic number of the third atom k in a triplet
ncores (int): number of CPUs to use when computing the triplet local energies
Returns:
spline3D (obj): a 3D spline object that can be used to predict the energy and the force associated
to the central atom of a triplet.
"""
num = len(dists)
inds, r_ij_x, r_ki_x, r_ki_y = self.generate_triplets_all(dists)
confs = np.zeros((len(r_ij_x), 2, 5))
confs[:, 0, 0] = r_ij_x # Element on the x axis
confs[:, 1, 0] = r_ki_x # Reshape into confs shape: this is x2
confs[:, 1, 1] = r_ki_y # Reshape into confs shape: this is y2
# Permutations of elements
confs[:, :, 3] = element_i # Central element is always element 1
confs[:, 0, 4] = element_j # Element on the x axis is always element 2
# Element on the xy plane is always element 3
confs[:, 1, 4] = element_k
grid_3b = np.zeros((num, num, num))
grid_3b[inds] = self.gp_3b.predict_energy(
confs, ncores=ncores, mapping=True).flatten()
return interpolation.Spline3D(dists, dists, dists, grid_3b)
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potentials, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
### SAVE THE MODEL ###
params = {
'model': self.__class__.__name__,
'elements': self.elements,
'r_cut': self.r_cut,
'rep_sig': self.rep_sig,
'fitted': self.gp_2b.fitted,
'gp_2b': {
'kernel': self.gp_2b.kernel.kernel_name,
'n_train': self.gp_2b.n_train,
'sigma': self.gp_2b.kernel.theta[0],
'theta': self.gp_2b.kernel.theta[1],
'noise': self.gp_2b.noise
},
'gp_3b': {
'kernel': self.gp_3b.kernel.kernel_name,
'n_train': self.gp_3b.n_train,
'sigma': self.gp_3b.kernel.theta[0],
'theta': self.gp_3b.kernel.theta[1],
'noise': self.gp_3b.noise
},
'gp_eam': {
'kernel': self.gp_eam.kernel.kernel_name,
'n_train': self.gp_eam.n_train,
'sigma': self.gp_eam.kernel.theta[0],
'r0': self.gp_eam.kernel.theta[2],
'max_eam': self.max_grid_eam,
'noise': self.gp_eam.noise
},
'grid_2b': {
'r_min': self.grid_start,
'r_num': self.grid_num_2b,
'filename': {}
} if self.grid_2b else {},
'grid_3b': {
'r_min': self.grid_start,
'r_num': self.grid_num_3b,
'filename': {}
} if self.grid_3b else {},
'grid_eam': {
'r_min': self.grid_start_eam,
'r_max': self.grid_end,
'r_num': self.grid_num_eam,
'filename': {}
} if self.grid_eam else {}
}
gp_filename_2b = "GP_ker_{p[gp_2b][kernel]}_ntr_{p[gp_2b][n_train]}.npy".format(
p=params)
params['gp_2b']['filename'] = gp_filename_2b
self.gp_2b.save(path / gp_filename_2b)
for k, grid in self.grid_2b.items():
key = '_'.join(str(element) for element in k)
grid_filename_2b = "GRID_{}_ker_{p[gp_2b][kernel]}_ntr_{p[gp_2b][n_train]}.npz".format(
key, p=params)
print("Saved 2-body grid under name %s" % (grid_filename_2b))
params['grid_2b']['filename'][key] = grid_filename_2b
grid.save(path / grid_filename_2b)
### SAVE THE 3B MODEL ###
gp_filename_3b = "GP_ker_{p[gp_3b][kernel]}_ntr_{p[gp_3b][n_train]}.npy".format(
p=params)
params['gp_3b']['filename'] = gp_filename_3b
self.gp_3b.save(path / gp_filename_3b)
### SAVE THE EAM MODEL ###
gp_filename_eam = "GP_ker_{p[gp_eam][kernel]}_ntr_{p[gp_eam][n_train]}.npy".format(
p=params)
params['gp_eam']['filename'] = gp_filename_eam
self.gp_eam.save(path / gp_filename_eam)
for k, grid in self.grid_3b.items():
key = '_'.join(str(element) for element in k)
grid_filename_3b = "GRID_{}_ker_{p[gp_3b][kernel]}_ntr_{p[gp_3b][n_train]}.npz".format(
key, p=params)
print("Saved 3-body grid under name %s" % (grid_filename_3b))
params['grid_3b']['filename'][key] = grid_filename_3b
grid.save(path / grid_filename_3b)
for k, grid in self.grid_eam.items():
key = str(k)
grid_filename_eam = 'GRID_{}_ker_{p[gp_eam][kernel]}_ntr_{p[gp_eam][n_train]}.npz'.format(
key, p=params)
print("Saved eam grid under name %s" % (grid_filename_eam))
params['grid_eam']['filename'][key] = grid_filename_eam
grid.save(path / grid_filename_eam)
with open(path / "MODEL_23eam_ntr_{p[gp_2b][n_train]}.json".format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_23eam_ntr_{p[gp_2b][n_train]}.json".format(p=params))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GPs and the mapped potentials, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['elements'],
params['r_cut'],
params['gp_2b']['sigma'],
params['gp_3b']['sigma'],
params['gp_eam']['sigma'],
params['gp_2b']['theta'],
params['gp_3b']['theta'],
params['gp_eam']['r0'],
params['gp_2b']['noise'],
params['gp_eam']['max_eam'],
params['rep_sig'])
gp_filename_2b = params['gp_2b']['filename']
gp_filename_3b = params['gp_3b']['filename']
gp_filename_eam = params['gp_eam']['filename']
try:
model.gp_2b.load(directory / gp_filename_2b)
except:
warnings.warn("The 2-body GP file is missing")
pass
try:
model.gp_3b.load(directory / gp_filename_3b)
except:
warnings.warn("The 3-body GP file is missing")
pass
try:
model.gp_eam.load(directory / gp_filename_eam)
except:
warnings.warn("The EAM GP file is missing")
pass
if params['grid_2b']:
for key, grid_filename_2b in params['grid_2b']['filename'].items():
k = tuple(int(ind) for ind in key.split('_'))
model.grid_2b[k] = interpolation.Spline1D.load(
directory / grid_filename_2b)
for key, grid_filename_3b in params['grid_3b']['filename'].items():
k = tuple(int(ind) for ind in key.split('_'))
model.grid_3b[k] = interpolation.Spline3D.load(
directory / grid_filename_3b)
model.grid_start = params['grid_2b']['r_min']
model.grid_num_2b = params['grid_2b']['r_num']
model.grid_num_3b = params['grid_3b']['r_num']
for key, grid_filename in params['grid_eam']['filename'].items():
k = int(key)
model.grid_eam[k] = interpolation.Spline1D.load(
directory / grid_filename)
model.grid_start_eam = params['grid_eam']['r_min']
model.grid_end_eam = params['grid_eam']['r_max']
model.grid_num_eam = params['grid_eam']['r_num']
return model
def save_gp(self, filename_2b, filename_3b):
""" Saves the GP objects, now obsolete
"""
self.gp_2b.save(filename_2b)
self.gp_3b.save(filename_3b)
def load_gp(self, filename_2b, filename_3b):
""" Loads the GP objects, now obsolete
"""
self.gp_2b.load(filename_2b)
self.gp_3b.load(filename_3b)
@staticmethod
def generate_triplets_all(dists):
""" Generate a list of all valid triplets.
Calculates the energy predicted by the GP for three atoms at all possible combination
of num distances ranging from start to r_cut. The energy is calculated only for ``valid``
triplets of atoms, i.e. sets of three distances which form a triangle (this is checked via
the triangle inequality).
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
dists (array): array of floats containing all of the distances which can be used to
build triplets of atoms. This array is created by calling np.linspace(start, r_cut, num)
Returns:
inds (array): array of booleans indicating which triplets (three distance values) need to be
evaluated to fill the 3D grid of energy values.
r_ij_x (array): array containing the x coordinate of the second atom j w.r.t. the central atom i
r_ki_x (array): array containing the x coordinate of the third atom k w.r.t. the central atom i
r_ki_y (array): array containing the y coordinate of the third atom k w.r.t. the central atom i
"""
d_ij, d_jk, d_ki = np.meshgrid(
dists, dists, dists, indexing='ij', sparse=False, copy=True)
# Valid triangles according to triangle inequality
inds = np.logical_and(
d_ij <= d_jk + d_ki, np.logical_and(d_jk <= d_ki + d_ij, d_ki <= d_ij + d_jk))
# Element on the x axis
r_ij_x = d_ij[inds]
# Element on the xy plane
r_ki_x = (d_ij[inds] ** 2 - d_jk[inds] ** 2 +
d_ki[inds] ** 2) / (2 * d_ij[inds])
# using abs to avoid numerical error near to 0
r_ki_y = np.sqrt(np.abs(d_ki[inds] ** 2 - r_ki_x ** 2))
return inds, r_ij_x, r_ki_x, r_ki_y
| 62,212 | 44.477339 | 126 | py |
mff | mff-master/mff/models/twobody.py | # -*- coding: utf-8 -*-
import json
import warnings
from itertools import combinations_with_replacement
from pathlib import Path
import numpy as np
from mff import gp, interpolation, kernels, utility
from mff.models.base import Model
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class TwoBodySingleSpeciesModel(Model):
""" 2-body single species model class
Class managing the Gaussian process and its mapped counterpart
Args:
element (int): The atomic number of the element considered
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma (foat): Lengthscale parameter of the Gaussian process
theta (float): decay ratio of the cutoff function in the Gaussian Process
noise (float): noise value associated with the training output data
Attributes:
gp (method): The 2-body single species Gaussian Process
grid (method): The 2-body single species tabulated potential
grid_start (float): Minimum atomic distance for which the grid is defined (cannot be 0.0)
grid_num (int): number of points used to create the 2-body grid
"""
def __init__(self, element, r_cut, sigma, theta, noise, rep_sig=1, **kwargs):
super().__init__()
self.element = element
self.r_cut = r_cut
self.rep_sig = rep_sig
kernel = kernels.TwoBodySingleSpeciesKernel(
theta=[sigma, theta, r_cut])
self.gp = gp.GaussianProcess(kernel=kernel, noise=noise, **kwargs)
self.grid, self.grid_start, self.grid_num = None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a
2-body single species force-force kernel
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp.fit(confs, forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a
2-body single species energy-energy kernel
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(glob_confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.gp.fit_energy(glob_confs, energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training forces and energies using
2-body single species force-force, energy-force and energy-energy kernels
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using a GP
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GP
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
return self.gp.predict(confs, return_std, ncores=ncores) + rep_forces
else:
return self.gp.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the global energies of the central atoms of confs using a GP
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
return self.gp.predict_energy(glob_confs, return_std, ncores=ncores) + rep_energies
else:
return self.gp.predict_energy(glob_confs, return_std, ncores=ncores)
def save_gp(self, filename):
""" Saves the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.save(filename)
def load_gp(self, filename):
""" Loads the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.load(filename)
def build_grid(self, start, num, ncores=1):
""" Build the mapped 2-body potential.
Calculates the energy predicted by the GP for two atoms at distances that range from
start to r_cut, for a total of num points. These energies are stored and a 1D spline
interpolation is created, which can be used to predict the energy and, through its
analytic derivative, the force associated to any couple of atoms.
The total force or local energy can then be calculated for any atom by summing the
pairwise contributions of every other atom within a cutoff distance r_cut.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
start (float): smallest interatomic distance for which the energy is predicted
by the GP and stored inn the 2-body mapped potential
num (int): number of points to use in the grid of the mapped potential
"""
self.grid_start = start
self.grid_num = num
dists = np.linspace(start, self.r_cut, num)
confs = np.zeros((num, 1, 5))
confs[:, 0, 0] = dists
confs[:, 0, 3], confs[:, 0, 4] = self.element, self.element
confs = list(confs)
grid_data = self.gp.predict_energy(confs, ncores=ncores, mapping=True)
if self.rep_sig:
grid_data += utility.get_repulsive_energies(
confs, self.rep_sig, mapping=True)
self.grid = interpolation.Spline1D(dists, grid_data)
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potential, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
params = {
'model': self.__class__.__name__,
'element': self.element,
'r_cut': self.r_cut,
'rep_sig': self.rep_sig,
'fitted': self.gp.fitted,
'gp': {
'kernel': self.gp.kernel.kernel_name,
'n_train': self.gp.n_train,
'sigma': self.gp.kernel.theta[0],
'theta': self.gp.kernel.theta[1],
'noise': self.gp.noise
},
'grid': {
'r_min': self.grid_start,
'r_num': self.grid_num,
'filename': None
} if self.grid else {}
}
gp_filename = "GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy".format(
p=params)
params['gp']['filename'] = gp_filename
self.gp.save(path / gp_filename)
if self.grid:
grid_filename = 'GRID_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz'.format(
p=params)
params['grid']['filename'] = grid_filename
self.grid.save(path / grid_filename)
with open(path / 'MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json'.format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GP and the mapped potential, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['element'],
params['r_cut'],
params['gp']['sigma'],
params['gp']['theta'],
params['gp']['noise'],
params['rep_sig'])
gp_filename = params['gp']['filename']
model.gp.load(directory / gp_filename)
if params['grid']:
grid_filename = params['grid']['filename']
model.grid = interpolation.Spline1D.load(directory / grid_filename)
model.grid_start = params['grid']['r_min']
model.grid_num = params['grid']['r_num']
return model
class TwoBodyManySpeciesModel(Model):
""" 2-body many species model class
Class managing the Gaussian process and its mapped counterpart
Args:
elements (list): List containing the atomic numbers in increasing order
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma (foat): Lengthscale parameter of the Gaussian process
theta (float): decay ratio of the cutoff function in the Gaussian Process
noise (float): noise value associated with the training output data
Attributes:
gp (class): The 2-body two species Gaussian Process
grid (list): Contains the three 2-body two species tabulated potentials, accounting for
interactions between two atoms of types 0-0, 0-1, and 1-1.
grid_start (float): Minimum atomic distance for which the grid is defined (cannot be 0)
grid_num (int): number of points used to create the 2-body grids
"""
def __init__(self, elements, r_cut, sigma, theta, noise, rep_sig=1, **kwargs):
super().__init__()
self.elements = list(np.sort(elements))
self.r_cut = r_cut
self.rep_sig = rep_sig
kernel = kernels.TwoBodyManySpeciesKernel(theta=[sigma, theta, r_cut])
self.gp = gp.GaussianProcess(kernel=kernel, noise=noise, **kwargs)
self.grid, self.grid_start, self.grid_num = {}, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a
2-body single species force-force kernel
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp.fit(confs, forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a
2-body single species energy-energy kernel
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(glob_confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.gp.fit_energy(glob_confs, energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training forces and energies using
2-body single species force-force, energy-force and energy-energy kernels
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using a GP
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GP
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
return self.gp.predict(confs, return_std, ncores=ncores) + rep_forces
else:
return self.gp.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the global energies of the central atoms of confs using a GP
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
return self.gp.predict_energy(glob_confs, return_std, ncores=ncores) + rep_energies
else:
return self.gp.predict_energy(glob_confs, return_std, ncores=ncores)
def save_gp(self, filename):
""" Saves the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.save(filename)
def load_gp(self, filename):
""" Loads the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.load(filename)
def build_grid(self, start, num, ncores=1):
""" Build the mapped 2-body potential.
Calculates the energy predicted by the GP for two atoms at distances that range from
start to r_cut, for a total of num points. These energies are stored and a 1D spline
interpolation is created, which can be used to predict the energy and, through its
analytic derivative, the force associated to any couple of atoms.
The total force or local energy can then be calculated for any atom by summing the
pairwise contributions of every other atom within a cutoff distance r_cut.
Three distinct potentials are built for interactions between atoms of type 0 and 0,
type 0 and 1, and type 1 and 1.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
start (float): smallest interatomic distance for which the energy is predicted
by the GP and stored inn the 2-body mapped potential
num (int): number of points to use in the grid of the mapped potential
"""
self.grid_start = start
self.grid_num = num
dists = np.linspace(start, self.r_cut, num)
# defining the first row heading
confs = np.zeros((num, 1, 5))
confs[:, 0, 0] = dists
perm_list = list(combinations_with_replacement(self.elements, 2))
for pair in perm_list: # in this for loop, predicting then save for each individual one
confs[:, 0, 3], confs[:, 0,
4] = pair[0], pair[1]
grid_data = self.gp.predict_energy(
list(confs), ncores=ncores, mapping=True)
if self.rep_sig:
grid_data += utility.get_repulsive_energies(
confs, self.rep_sig, mapping=True)
self.grid[pair] = interpolation.Spline1D(dists, grid_data)
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potentials, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
params = {
'model': self.__class__.__name__,
'elements': self.elements,
'r_cut': self.r_cut,
'rep_sig': self.rep_sig,
'fitted': self.gp.fitted,
'gp': {
'kernel': self.gp.kernel.kernel_name,
'n_train': self.gp.n_train,
'sigma': self.gp.kernel.theta[0],
'theta': self.gp.kernel.theta[1],
'noise': self.gp.noise
},
'grid': {
'r_min': self.grid_start,
'r_num': self.grid_num,
'filename': {}
} if self.grid else {}
}
gp_filename = "GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy".format(
p=params)
params['gp']['filename'] = gp_filename
self.gp.save(path / gp_filename)
for k, grid in self.grid.items():
key = '_'.join(str(element) for element in k)
grid_filename = "GRID_{}_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz".format(
key, p=params)
params['grid']['filename'][key] = grid_filename
grid.save(path / grid_filename)
with open(path / "MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params))
@classmethod
def from_json(cls, path):
""" Load the models.
Loads the model, the associated GP and the mapped potential, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['elements'],
params['r_cut'],
params['gp']['sigma'],
params['gp']['theta'],
params['gp']['noise'],
params['rep_sig'])
gp_filename = params['gp']['filename']
try:
model.gp.load(directory / gp_filename)
except:
warnings.warn("The 2-body GP file is missing")
pass
if params['grid']:
model.grid_start = params['grid']['r_min']
model.grid_num = params['grid']['r_num']
for key, grid_filename in params['grid']['filename'].items():
k = tuple(int(ind) for ind in key.split('_'))
model.grid[k] = interpolation.Spline1D.load(
directory / grid_filename)
return model
if __name__ == '__main__':
def test_two_body_single_species_model():
confs = np.array([
np.hstack([np.random.randn(4, 3), 26 * np.ones((4, 2))]),
np.hstack([np.random.randn(5, 3), 26 * np.ones((5, 2))])
])
forces = np.random.randn(2, 3)
element, r_cut, sigma, theta, noise = 26, 2., 3., 4., 5.
filename = Path('test_model.json')
m = TwoBodySingleSpeciesModel(element, r_cut, sigma, theta, noise)
print(m)
print(m.parameters)
m.fit(confs, forces)
print(m)
print(m.parameters)
m.build_grid(1., 10)
print(m)
print(m.parameters)
m.save(filename)
m2 = TwoBodySingleSpeciesModel.from_json(filename)
def test_two_body_two_species_model():
elements = [2, 4]
confs = np.array([
np.hstack([np.random.randn(4, 3),
np.random.choice(elements, size=(4, 2))]),
np.hstack([np.random.randn(5, 3),
np.random.choice(elements, size=(5, 2))])
])
forces = np.random.randn(2, 3)
r_cut, sigma, theta, noise = 2., 3., 4., 5.
filename = Path('test_model.json')
m = TwoBodyManySpeciesModel(elements, r_cut, sigma, theta, noise)
print(m)
m.fit(confs, forces)
print(m)
m.build_grid(1., 10)
print(m)
m.save(filename)
m2 = TwoBodyManySpeciesModel.from_json(filename)
# test_two_body_single_species_model()
test_two_body_two_species_model()
| 25,131 | 36.849398 | 108 | py |
mff | mff-master/mff/models/manybody.py | # -*- coding: utf-8 -*-
import json
import warnings
from pathlib import Path
import numpy as np
from mff import gp, interpolation, kernels
from mff.models.base import Model
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class ManyBodySingleSpeciesModel(Model):
""" many-body single species model class
Class managing the Gaussian process and its mapped counterpart
Args:
element (int): The atomic number of the element considered
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma (foat): Lengthscale parameter of the Gaussian process
theta (float): decay ratio of the cutoff function in the Gaussian Process
noise (float): noise value associated with the training output data
Attributes:
gp (method): The many-body single species Gaussian Process
grid (method): The many-body single species tabulated potential
grid_start (float): Minimum atomic distance for which the grid is defined (cannot be 0.0)
grid_num (int): number of points used to create the many-body grid
"""
def __init__(self, element, r_cut, sigma, theta, noise, **kwargs):
super().__init__()
self.element = element
self.r_cut = r_cut
kernel = kernels.ManyBodySingleSpeciesKernel(
theta=[sigma, theta, r_cut])
self.gp = gp.GaussianProcess(kernel=kernel, noise=noise, **kwargs)
self.grid, self.grid_start, self.grid_num = None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a
many-body single species force-force kernel
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit(confs, forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a
many-body single species energy-energy kernel
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_energy(glob_confs, energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training forces and energies using
many-body single species force-force, energy-force and energy-energy kernels
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using a GP
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GP
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
return self.gp.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the global energies of the central atoms of confs using a GP
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
return self.gp.predict_energy(glob_confs, return_std, ncores=ncores)
def save_gp(self, filename):
""" Saves the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.save(filename)
def load_gp(self, filename):
""" Loads the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.load(filename)
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potential, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
params = {
'model': self.__class__.__name__,
'element': self.element,
'r_cut': self.r_cut,
'fitted': self.gp.fitted,
'gp': {
'kernel': self.gp.kernel.kernel_name,
'n_train': self.gp.n_train,
'sigma': self.gp.kernel.theta[0],
'theta': self.gp.kernel.theta[1],
'noise': self.gp.noise
},
'grid': {}
}
gp_filename = "GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy".format(
p=params)
params['gp']['filename'] = gp_filename
self.gp.save(path / gp_filename)
with open(path / 'MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json'.format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GP and the mapped potential, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['element'],
params['r_cut'],
params['gp']['sigma'],
params['gp']['theta'],
params['gp']['noise'])
gp_filename = params['gp']['filename']
try:
model.gp.load(directory / gp_filename)
except:
warnings.warn("The many-body GP file is missing")
pass
return model
class ManyBodyManySpeciesModel(Model):
""" many-body many species model class
Class managing the Gaussian process, there is no mapping method for this kernel.
Args:
elements (list): List containing the atomic numbers in increasing order
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma (foat): Lengthscale parameter of the Gaussian process
theta (float): decay ratio of the cutoff function in the Gaussian Process
noise (float): noise value associated with the training output data
Attributes:
gp (class): The many-body two species Gaussian Process
grid (list): None
grid_start (float): None
grid_num (int): None
"""
def __init__(self, elements, r_cut, sigma, theta, noise, **kwargs):
super().__init__()
self.elements = elements
self.r_cut = r_cut
kernel = kernels.ManyBodyManySpeciesKernel(theta=[sigma, theta, r_cut])
self.gp = gp.GaussianProcess(kernel=kernel, noise=noise, **kwargs)
self.grid, self.grid_start, self.grid_num = {}, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a two
body two species force-force kernel
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit(confs, forces, ncores=ncores)
def fit_energy(self, glob_confs, energy, ncores=1):
""" Fit the GP to a set of training energies using a two
body two species energy-energy kernel
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_energy(glob_confs, energy, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energy, ncores=1):
""" Fit the GP to a set of training forces and energies using two
body two species force-force, energy-force and energy-energy kernels
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_force_and_energy(
confs, forces, glob_confs, energy, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using a GP
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GP
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
return self.gp.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the global energies of the central atoms of confs using a GP
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
return self.gp.predict_energy(glob_confs, return_std, ncores=ncores)
def save_gp(self, filename):
""" Saves the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.save(filename)
def load_gp(self, filename):
""" Loads the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.load(filename)
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potentials, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
params = {
'model': self.__class__.__name__,
'elements': self.elements,
'r_cut': self.r_cut,
'fitted': self.gp.fitted,
'gp': {
'kernel': self.gp.kernel.kernel_name,
'n_train': self.gp.n_train,
'sigma': self.gp.kernel.theta[0],
'theta': self.gp.kernel.theta[1],
'noise': self.gp.noise
},
'grid': {}
}
gp_filename = "GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy".format(
p=params)
params['gp']['filename'] = gp_filename
self.gp.save(path / gp_filename)
with open(path / 'MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json'.format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GP and the mapped potential, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['elements'],
params['r_cut'],
params['gp']['sigma'],
params['gp']['theta'],
params['gp']['noise'])
gp_filename = params['gp']['filename']
try:
model.gp.load(directory / gp_filename)
except:
warnings.warn("The many-body GP file is missing")
pass
return model
| 15,695 | 35.165899 | 108 | py |
mff | mff-master/mff/models/eam.py | # -*- coding: utf-8 -*-
import json
import warnings
from itertools import combinations_with_replacement
from pathlib import Path
import numpy as np
from mff import gp, interpolation, kernels, utility
from mff.models.base import Model
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def get_max_eam(X, rc, r0):
t_max = 0
for c in X:
dist = np.sum(c[:, :3]**2, axis=1)**0.5
cut_1 = 0.5*(1 + np.cos(np.pi*dist/rc))
t1 = np.exp(-(dist/r0 - 1))
t2 = -sum(cut_1*t1)**0.5
if t2 < t_max:
t_max = t2
return t_max
def get_max_eam_energy(X_glob, rc, r0):
t_max = 0
for X in X_glob:
t2 = get_max_eam(X, rc, r0)
if t2 < t_max:
t_max = t2
return t_max
class EamSingleSpeciesModel(Model):
""" Eam single species model class
Class managing the Gaussian process and its mapped counterpart
Args:
element (int): The atomic number of the element considered
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma (foat): Lengthscale parameter of the Gaussian process
theta (float): decay ratio of the cutoff function in the Gaussian Process
noise (float): noise value associated with the training output data
Attributes:
gp (method): The eam single species Gaussian Process
grid (method): The eam single species tabulated potential
grid_start (float): Minimum descriptor value for which the grid is defined
grid_end (float): Maximum descriptor value for which the grid is defined
grid_num (int): number of points used to create the eam multi grid
"""
def __init__(self, element, r_cut, sigma, r0, noise, **kwargs):
super().__init__()
self.element = element
self.r_cut = r_cut
kernel = kernels.EamSingleSpeciesKernel(
theta=[sigma, r_cut, r0])
self.gp = gp.GaussianProcess(kernel=kernel, noise=noise, **kwargs)
self.grid, self.grid_start, self.grid_end, self.grid_num = None, None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a
eam single species force-force kernel
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit(confs, forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a
eam single species energy-energy kernel
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_energy(glob_confs, energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training forces and energies using
eam single species force-force, energy-force and energy-energy kernels
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using a GP
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GP
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
return self.gp.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the global energies of the central atoms of confs using a GP
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
return self.gp.predict_energy(glob_confs, return_std, ncores=ncores)
def save_gp(self, filename):
""" Saves the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.save(filename)
def load_gp(self, filename):
""" Loads the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.load(filename)
def build_grid(self, num, ncores=1):
""" Build the mapped eam potential.
Calculates the energy predicted by the GP for a configuration which eam descriptor
is evalued between start and end. These energies are stored and a 1D spline
interpolation is created, which can be used to predict the energy and, through its
analytic derivative, the force associated to any embedded atom.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
num (int): number of points to use in the grid of the mapped potential
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
if 'force' in self.gp.fitted:
self.grid_start = 3.0 * \
get_max_eam(self.gp.X_train_, self.r_cut,
self.gp.kernel.theta[2])
else:
self.grid_start = 3.0 * \
get_max_eam_energy(self.gp.X_glob_train_, self.r_cut,
self.gp.kernel.theta[2])
self.grid_end = 0
self.grid_num = num
dists = list(np.linspace(self.grid_start,
self.grid_end, self.grid_num))
grid_data = self.gp.predict_energy(dists, ncores=ncores, mapping=True)
self.grid = interpolation.Spline1D(dists, grid_data)
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potential, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
params = {
'model': self.__class__.__name__,
'element': self.element,
'r_cut': self.r_cut,
'fitted': self.gp.fitted,
'gp': {
'kernel': self.gp.kernel.kernel_name,
'n_train': self.gp.n_train,
'sigma': self.gp.kernel.theta[0],
'noise': self.gp.noise,
'r0': self.gp.kernel.theta[2]
},
'grid': {
'r_min': self.grid_start,
'r_max': self.grid_end,
'r_num': self.grid_num,
'filename': {}
} if self.grid else {}
}
gp_filename = "GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy".format(
p=params)
params['gp']['filename'] = gp_filename
self.gp.save(path / gp_filename)
if self.grid:
grid_filename = 'GRID_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz'.format(
p=params)
params['grid']['filename'] = grid_filename
self.grid.save(path / grid_filename)
with open(path / 'MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json'.format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GP and the mapped potential, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['element'],
params['r_cut'],
params['gp']['sigma'],
params['gp']['noise'],
params['gp']['r0'])
gp_filename = params['gp']['filename']
model.gp.load(directory / gp_filename)
if params['grid']:
grid_filename = params['grid']['filename']
model.grid = interpolation.Spline1D.load(directory / grid_filename)
model.grid_start = params['grid']['r_min']
model.grid_end = params['grid']['r_max']
model.grid_num = params['grid']['r_num']
return model
class EamManySpeciesModel(Model):
""" Eam many species model class
Class managing the Gaussian process and its mapped counterpart
Args:
elements (int): The atomic numbers of the element considered
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma (foat): Lengthscale parameter of the Gaussian process
r0 (float): radius in the exponent of the eam descriptor
noise (float): noise value associated with the training output data
Attributes:
gp (method): The eam single species Gaussian Process
grid (method): The eam single species tabulated potential
grid_start (float): Minimum descriptor value for which the grid is defined
grid_end (float): Maximum descriptor value for which the grid is defined
grid_num (int): number of points used to create the eam multi grid
"""
def __init__(self, elements, r_cut, sigma, r0, noise, **kwargs):
super().__init__()
self.elements = list(np.sort(elements))
self.r_cut = r_cut
kernel = kernels.EamManySpeciesKernel(
theta=[sigma, r_cut, r0])
self.gp = gp.GaussianProcess(kernel=kernel, noise=noise, **kwargs)
self.grid, self.grid_start, self.grid_end, self.grid_num = {}, None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a
eam single species force-force kernel
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit(confs, forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a
eam single species energy-energy kernel
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_energy(glob_confs, energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training forces and energies using
eam single species force-force, energy-force and energy-energy kernels
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using a GP
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GP
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
return self.gp.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the global energies of the central atoms of confs using a GP
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
return self.gp.predict_energy(glob_confs, return_std, ncores=ncores)
def save_gp(self, filename):
""" Saves the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.save(filename)
def load_gp(self, filename):
""" Loads the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.load(filename)
def build_grid(self, num, ncores=1):
""" Build the mapped eam potential.
Calculates the energy predicted by the GP for a configuration which eam descriptor
is evalued between start and end. These energies are stored and a 1D spline
interpolation is created, which can be used to predict the energy and, through its
analytic derivative, the force associated to any embedded atom.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
num (int): number of points to use in the grid of the mapped potential
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
if 'force' in self.gp.fitted:
self.grid_start = 3.0 * \
get_max_eam(self.gp.X_train_, self.r_cut,
self.gp.kernel.theta[2])
else:
self.grid_start = 3.0 * \
get_max_eam_energy(self.gp.X_glob_train_, self.r_cut,
self.gp.kernel.theta[2])
self.grid_end = 0
self.grid_num = num
dists = list(np.linspace(self.grid_start,
self.grid_end, self.grid_num))
for el in self.elements:
grid_data = self.gp.predict_energy(dists, ncores=ncores, mapping=True, alpha_1_descr=el)
self.grid[(el)] = interpolation.Spline1D(dists, grid_data)
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potential, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
params = {
'model': self.__class__.__name__,
'elements': self.elements,
'r_cut': self.r_cut,
'fitted': self.gp.fitted,
'gp': {
'kernel': self.gp.kernel.kernel_name,
'n_train': self.gp.n_train,
'sigma': self.gp.kernel.theta[0],
'noise': self.gp.noise,
'r0': self.gp.kernel.theta[2]
},
'grid': {
'r_min': self.grid_start,
'r_max': self.grid_end,
'r_num': self.grid_num,
'filename': {}
} if self.grid else {}
}
gp_filename = "GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy".format(
p=params)
params['gp']['filename'] = gp_filename
self.gp.save(path / gp_filename)
for k, grid in self.grid.items():
key = str(k)
grid_filename = "GRID_{}_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz".format(
key, p=params)
params['grid']['filename'][key] = grid_filename
grid.save(path / grid_filename)
with open(path / "MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GP and the mapped potential, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['elements'],
params['r_cut'],
params['gp']['sigma'],
params['gp']['noise'],
params['gp']['r0'])
gp_filename = params['gp']['filename']
model.gp.load(directory / gp_filename)
if params['grid']:
model.grid_start = params['grid']['r_min']
model.grid_end = params['grid']['r_max']
model.grid_num = params['grid']['r_num']
for key, grid_filename in params['grid']['filename'].items():
k = tuple(key)
model.grid[k] = interpolation.Spline1D.load(
directory / grid_filename)
return model
| 20,669 | 36.718978 | 108 | py |
mff | mff-master/mff/models/__init__.py | from .twobody import TwoBodySingleSpeciesModel, TwoBodyManySpeciesModel
from .threebody import ThreeBodySingleSpeciesModel, ThreeBodyManySpeciesModel
from .manybody import ManyBodySingleSpeciesModel, ManyBodyManySpeciesModel
from .combined import CombinedSingleSpeciesModel, CombinedManySpeciesModel
from .eam import EamSingleSpeciesModel, EamManySpeciesModel
from .twothreeeam import TwoThreeEamSingleSpeciesModel, TwoThreeEamManySpeciesModel
__all__ = [TwoBodySingleSpeciesModel,
TwoBodyManySpeciesModel,
ThreeBodySingleSpeciesModel,
ThreeBodyManySpeciesModel,
ManyBodySingleSpeciesModel,
ManyBodyManySpeciesModel,
CombinedSingleSpeciesModel,
CombinedManySpeciesModel,
EamSingleSpeciesModel,
EamManySpeciesModel,
TwoThreeEamSingleSpeciesModel,
TwoThreeEamManySpeciesModel]
| 899 | 41.857143 | 83 | py |
mff | mff-master/mff/models/threebody.py | # -*- coding: utf-8 -*-
import json
import sys
import warnings
from itertools import combinations_with_replacement
from pathlib import Path
import numpy as np
from mff import gp, interpolation, kernels
from mff.models.base import Model
sys.setrecursionlimit(100000)
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class ThreeBodySingleSpeciesModel(Model):
""" 3-body single species model class
Class managing the Gaussian process and its mapped counterpart
Args:
element (int): The atomic number of the element considered
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma (foat): Lengthscale parameter of the Gaussian process
theta (float): decay ratio of the cutoff function in the Gaussian Process
noise (float): noise value associated with the training output data
Attributes:
gp (method): The 3-body single species Gaussian Process
grid (method): The 3-body single species tabulated potential
grid_start (float): Minimum atomic distance for which the grid is defined (cannot be 0.0)
grid_num (int): number of points per side used to create the 3-body grid. This is a
3-dimensional grid, therefore the total number of grid points will be grid_num^3.
"""
def __init__(self, element, r_cut, sigma, theta, noise, **kwargs):
super().__init__()
self.element = element
self.r_cut = r_cut
kernel = kernels.ThreeBodySingleSpeciesKernel(
theta=[sigma, theta, r_cut])
self.gp = gp.GaussianProcess(kernel=kernel, noise=noise, **kwargs)
self.grid, self.grid_start, self.grid_num = None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a
3-body single species force-force kernel function
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit(confs, forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a
3-body single species energy-energy kernel function
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
energies (array) : Array containing the scalar local energies of
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_energy(glob_confs, energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training forces and energies using
3-body single species force-force, energy-force and energy-energy kernels
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
energies (array) : Array containing the scalar local energies of
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using a GP
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GP
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
return self.gp.predict(confs, return_std, ncores=ncores)
def predict_energy(self, confs, return_std=False, ncores=1):
""" Predict the global energies of the central atoms of confs using a GP
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array): array of force vectors predicted by the GP
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
return self.gp.predict_energy(confs, return_std, ncores=ncores)
def save_gp(self, filename):
""" Saves the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.save(filename)
def load_gp(self, filename):
""" Loads the GP object, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp.load(filename)
def build_grid(self, start, num, ncores=1):
""" Build the mapped 3-body potential.
Calculates the energy predicted by the GP for three atoms at all possible combination
of num distances ranging from start to r_cut. The energy is calculated only for ``valid``
triplets of atoms, i.e. sets of three distances which form a triangle (this is checked via
the triangle inequality). The grid building exploits all the permutation invariances to
reduce the number of energy calculations needed to fill the grid.
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
start (float): smallest interatomic distance for which the energy is predicted
by the GP and stored inn the 3-body mapped potential
num (int): number of points to use to generate the list of distances used to
generate the triplets of atoms for the mapped potential
ncores (int): number of CPUs to use to calculate the energy predictions
"""
self.grid_start = start
self.grid_num = num
dists = np.linspace(start, self.r_cut, num)
inds, r_ij_x, r_ki_x, r_ki_y = self.generate_triplets(dists)
confs = np.zeros((len(r_ij_x), 2, 5))
confs[:, 0, 0] = r_ij_x # Element on the x axis
confs[:, 1, 0] = r_ki_x # Reshape into confs shape: this is x2
confs[:, 1, 1] = r_ki_y # Reshape into confs shape: this is y2
# Permutations of elements
confs[:, :, 3] = self.element # Central element is always element 1
# Element on the x axis is always element 2
confs[:, 0, 4] = self.element
# Element on the xy plane is always element 3
confs[:, 1, 4] = self.element
confs = np.nan_to_num(confs) # Avoid nans to ruin everything
confs = list(confs)
grid_data = np.zeros((num, num, num))
grid_data[inds] = self.gp.predict_energy(
confs, ncores=ncores, mapping=True).flatten()
for ind_i in range(num):
for ind_j in range(ind_i + 1):
for ind_k in range(ind_j + 1):
grid_data[ind_i, ind_k,
ind_j] = grid_data[ind_i, ind_j, ind_k]
grid_data[ind_j, ind_i,
ind_k] = grid_data[ind_i, ind_j, ind_k]
grid_data[ind_j, ind_k,
ind_i] = grid_data[ind_i, ind_j, ind_k]
grid_data[ind_k, ind_i,
ind_j] = grid_data[ind_i, ind_j, ind_k]
grid_data[ind_k, ind_j,
ind_i] = grid_data[ind_i, ind_j, ind_k]
self.grid = interpolation.Spline3D(dists, dists, dists, grid_data)
@staticmethod
def generate_triplets(dists):
""" Generate a list of all valid triplets using perutational invariance.
Calculates the energy predicted by the GP for three atoms at all possible combination
of num distances ranging from start to r_cut. The energy is calculated only for ``valid``
triplets of atoms, i.e. sets of three distances which form a triangle (this is checked via
the triangle inequality). The grid building exploits all the permutation invariances to
reduce the number of energy calculations needed to fill the grid.
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
dists (array): array of floats containing all of the distances which can be used to
build triplets of atoms. This array is created by calling np.linspace(start, r_cut, num)
Returns:
inds (array): array of booleans indicating which triplets (three distance values) need to be
evaluated to fill the 3D grid of energy values.
r_ij_x (array): array containing the x coordinate of the second atom j w.r.t. the central atom i
r_ki_x (array): array containing the x coordinate of the third atom k w.r.t. the central atom i
r_ki_y (array): array containing the y coordinate of the third atom k w.r.t. the central atom i
"""
d_ij, d_jk, d_ki = np.meshgrid(
dists, dists, dists, indexing='ij', sparse=False, copy=True)
# Valid triangles according to triangle inequality
inds = np.logical_and(
d_ij <= d_jk + d_ki, np.logical_and(d_jk <= d_ki + d_ij, d_ki <= d_ij + d_jk))
# Utilizing permutation invariance
inds = np.logical_and(np.logical_and(d_ij >= d_jk, d_jk >= d_ki), inds)
# Element on the x axis
r_ij_x = d_ij[inds]
# Element on the xy plane
r_ki_x = (d_ij[inds] ** 2 - d_jk[inds] ** 2 +
d_ki[inds] ** 2) / (2 * d_ij[inds])
# using abs to avoid numerical error near to 0
r_ki_y = np.sqrt(np.abs(d_ki[inds] ** 2 - r_ki_x ** 2))
return inds, r_ij_x, r_ki_x, r_ki_y
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potential, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
params = {
'model': self.__class__.__name__,
'elements': self.element,
'r_cut': self.r_cut,
'fitted': self.gp.fitted,
'gp': {
'kernel': self.gp.kernel.kernel_name,
'n_train': self.gp.n_train,
'sigma': self.gp.kernel.theta[0],
'theta': self.gp.kernel.theta[1],
'noise': self.gp.noise
},
'grid': {
'r_min': self.grid_start,
'r_num': self.grid_num,
'filename': None
} if self.grid else {}
}
gp_filename = "GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy".format(
p=params)
params['gp']['filename'] = gp_filename
self.gp.save(path / gp_filename)
if self.grid:
grid_filename = 'GRID_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz'.format(
p=params)
params['grid']['filename'] = grid_filename
self.grid.save(path / grid_filename)
with open(path / 'MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json'.format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GP and the mapped potential, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['elements'],
params['r_cut'],
params['gp']['sigma'],
params['gp']['theta'],
params['gp']['noise'])
gp_filename = params['gp']['filename']
try:
model.gp.load(directory / gp_filename)
except:
warnings.warn("The 3-body GP file is missing")
pass
if params['grid']:
grid_filename = params['grid']['filename']
model.grid = interpolation.Spline3D.load(directory / grid_filename)
model.grid_start = params['grid']['r_min']
model.grid_num = params['grid']['r_num']
return model
class ThreeBodyManySpeciesModel(Model):
""" 3-body many species model class
Class managing the Gaussian process and its mapped counterpart
Args:
elements (list): List containing the atomic numbers in increasing order
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma (foat): Lengthscale parameter of the Gaussian process
theta (float): decay ratio of the cutoff function in the Gaussian Process
noise (float): noise value associated with the training output data
Attributes:
gp (class): The 3-body two species Gaussian Process
grid (list): Contains the three 3-body two species tabulated potentials, accounting for
interactions between three atoms of types 0-0-0, 0-0-1, 0-1-1, and 1-1-1.
grid_start (float): Minimum atomic distance for which the grid is defined (cannot be 0)
grid_num (int): number of points per side used to create the 3-body grids. These are
3-dimensional grids, therefore the total number of grid points will be grid_num^3.
"""
def __init__(self, elements, r_cut, sigma, theta, noise, **kwargs):
super().__init__()
self.elements = list(np.sort(elements))
self.r_cut = r_cut
kernel = kernels.ThreeBodyManySpeciesKernel(
theta=[sigma, theta, r_cut])
self.gp = gp.GaussianProcess(kernel=kernel, noise=noise, **kwargs)
self.grid, self.grid_start, self.grid_num = {}, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a
3-body two species force-force kernel function
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit(confs, forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a
3-body two species energy-energy kernel function
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
energies (array) : Array containing the scalar local energies of
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_energy(glob_confs, energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training forces and energies using
3-body two species force-force, energy-force and energy-energy kernels
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
energies (array) : Array containing the scalar local energies of
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
def update_force(self, confs, forces, ncores=1):
""" Update a fitted GP with a set of forces and using
3-body twp species force-force kernels
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_update(confs, forces, ncores=ncores)
def update_energy(self, glob_confs, energies, ncores=1):
""" Update a fitted GP with a set of energies and using
3-body two species energy-energy kernels
Args:
glob_confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
energies (array) : Array containing the scalar local energies of
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
self.gp.fit_update_energy(glob_confs, energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using a GP
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GP
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
return self.gp.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the local energies of the central atoms of confs using a GP
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array): array of force vectors predicted by the GP
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
return self.gp.predict_energy(glob_confs, return_std, ncores=ncores)
def save_gp(self, filename):
""" Saves the GP object, now obsolete
"""
self.gp.save(filename)
def load_gp(self, filename):
""" Loads the GP object, now obsolete
"""
self.gp.load(filename)
def build_grid(self, start, num, ncores=1):
"""Function used to create the four different 3-body energy grids for
atoms of elements 0-0-0, 0-0-1, 0-1-1, and 1-1-1. The function calls the
``build_grid_3b`` function for each of those combinations of elements.
Args:
start (float): smallest interatomic distance for which the energy is predicted
by the GP and stored inn the 3-body mapped potential
num (int): number of points to use to generate the list of distances used to
generate the triplets of atoms for the mapped potential
ncores (int): number of CPUs to use to calculate the energy predictions
"""
self.grid_start = start
self.grid_num = num
dists = np.linspace(start, self.r_cut, num)
perm_list = list(combinations_with_replacement(self.elements, 3))
for trip in perm_list:
self.grid[trip] = self.build_grid_3b(
dists, trip[0], trip[1], trip[2], ncores)
def build_grid_3b(self, dists, element_i, element_j, element_k, ncores):
""" Build a mapped 3-body potential.
Calculates the energy predicted by the GP for three atoms of elements element_i, element_j, element_k,
at all possible combinations of num distances ranging from start to r_cut.
The energy is calculated only for ``valid`` triplets of atoms, i.e. sets of three distances
which form a triangle (this is checked via the triangle inequality), found by calling the
``generate_triplets_with_permutation_invariance`` function.
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
dists (array): array of floats containing all of the distances which can be used to
build triplets of atoms. This array is created by calling np.linspace(start, r_cut, num)
element_i (int): atomic number of the central atom i in a triplet
element_j (int): atomic number of the second atom j in a triplet
element_k (int): atomic number of the third atom k in a triplet
ncores (int): number of CPUs to use when computing the triplet local energies
Returns:
spline3D (obj): a 3D spline object that can be used to predict the energy and the force associated
to the central atom of a triplet.
"""
num = len(dists)
inds, r_ij_x, r_ki_x, r_ki_y = self.generate_triplets_all(dists)
confs = np.zeros((len(r_ij_x), 2, 5))
confs[:, 0, 0] = r_ij_x # Element on the x axis
confs[:, 1, 0] = r_ki_x # Reshape into confs shape: this is x2
confs[:, 1, 1] = r_ki_y # Reshape into confs shape: this is y2
confs[:, :, 3] = element_i # Central element is always element 1
confs[:, 0, 4] = element_j # Element on the x axis is always element 2
# Element on the xy plane is always element 3
confs[:, 1, 4] = element_k
confs = np.nan_to_num(confs) # Avoid nans to ruin everything
confs = list(confs)
grid_3b = np.zeros((num, num, num))
grid_3b[inds] = self.gp.predict_energy(
confs, ncores=ncores, mapping=True).flatten()
return interpolation.Spline3D(dists, dists, dists, grid_3b)
@staticmethod
def generate_triplets_with_permutation_invariance(dists):
""" Generate a list of all valid triplets using perutational invariance.
Calculates the energy predicted by the GP for three atoms at all possible combination
of num distances ranging from start to r_cut. The energy is calculated only for ``valid``
triplets of atoms, i.e. sets of three distances which form a triangle (this is checked via
the triangle inequality). The grid building exploits all the permutation invariances to
reduce the number of energy calculations needed to fill the grid.
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
dists (array): array of floats containing all of the distances which can be used to
build triplets of atoms. This array is created by calling np.linspace(start, r_cut, num)
Returns:
inds (array): array of booleans indicating which triplets (three distance values) need to be
evaluated to fill the 3D grid of energy values.
r_ij_x (array): array containing the x coordinate of the second atom j w.r.t. the central atom i
r_ki_x (array): array containing the x coordinate of the third atom k w.r.t. the central atom i
r_ki_y (array): array containing the y coordinate of the third atom k w.r.t. the central atom i
"""
d_ij, d_jk, d_ki = np.meshgrid(
dists, dists, dists, indexing='ij', sparse=False, copy=True)
# Valid triangles according to triangle inequality
inds = np.logical_and(
d_ij <= d_jk + d_ki, np.logical_and(d_jk <= d_ki + d_ij, d_ki <= d_ij + d_jk))
# Utilizing permutation invariance
inds = np.logical_and(np.logical_and(d_ij >= d_jk, d_jk >= d_ki), inds)
# Element on the x axis
r_ij_x = d_ij[inds]
# Element on the xy plane
r_ki_x = (d_ij[inds] ** 2 - d_jk[inds] ** 2 +
d_ki[inds] ** 2) / (2 * d_ij[inds])
# using abs to avoid numerical error near to 0
r_ki_y = np.sqrt(np.abs(d_ki[inds] ** 2 - r_ki_x ** 2))
return inds, r_ij_x, r_ki_x, r_ki_y
@staticmethod
def generate_triplets_all(dists):
""" Generate a list of all valid triplets
Calculates the energy predicted by the GP for three atoms at all possible combination
of num distances ranging from start to r_cut. The energy is calculated only for ``valid``
triplets of atoms, i.e. sets of three distances which form a triangle (this is checked via
the triangle inequality).
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
dists (array): array of floats containing all of the distances which can be used to
build triplets of atoms. This array is created by calling np.linspace(start, r_cut, num)
Returns:
inds (array): array of booleans indicating which triplets (three distance values) need to be
evaluated to fill the 3D grid of energy values.
r_ij_x (array): array containing the x coordinate of the second atom j w.r.t. the central atom i
r_ki_x (array): array containing the x coordinate of the third atom k w.r.t. the central atom i
r_ki_y (array): array containing the y coordinate of the third atom k w.r.t. the central atom i
"""
d_ij, d_jk, d_ki = np.meshgrid(
dists, dists, dists, indexing='ij', sparse=False, copy=True)
# Valid triangles according to triangle inequality
inds = np.logical_and(
d_ij <= d_jk + d_ki, np.logical_and(d_jk <= d_ki + d_ij, d_ki <= d_ij + d_jk))
# Element on the x axis
r_ij_x = d_ij[inds]
# Element on the xy plane
r_ki_x = (d_ij[inds] ** 2 - d_jk[inds] ** 2 +
d_ki[inds] ** 2) / (2 * d_ij[inds])
# using abs to avoid numerical error near to 0
r_ki_y = np.sqrt(np.abs(d_ki[inds] ** 2 - r_ki_x ** 2))
return inds, r_ij_x, r_ki_x, r_ki_y
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potentials, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
params = {
'model': self.__class__.__name__,
'elements': self.elements,
'r_cut': self.r_cut,
'fitted': self.gp.fitted,
'gp': {
'kernel': self.gp.kernel.kernel_name,
'n_train': self.gp.n_train,
'sigma': self.gp.kernel.theta[0],
'theta': self.gp.kernel.theta[1],
'noise': self.gp.noise
},
'grid': {
'r_min': self.grid_start,
'r_num': self.grid_num,
'filename': {}
} if self.grid else {}
}
gp_filename = "GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy".format(
p=params)
params['gp']['filename'] = gp_filename
self.gp.save(path / gp_filename)
for k, grid in self.grid.items():
key = '_'.join(str(element) for element in k)
grid_filename = "GRID_{}_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz".format(
key, p=params)
params['grid']['filename'][key] = grid_filename
grid.save(path / grid_filename)
with open(path / "MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print('Saved model with name:', str(
path / "MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json".format(p=params)))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GP and the mapped potential, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['elements'],
params['r_cut'],
params['gp']['sigma'],
params['gp']['theta'],
params['gp']['noise'])
gp_filename = params['gp']['filename']
try:
model.gp.load(directory / gp_filename)
except:
warnings.warn("The 3-body GP file is missing")
pass
if params['grid']:
for key, grid_filename in params['grid']['filename'].items():
k = tuple(int(ind) for ind in key.split('_'))
model.grid[k] = interpolation.Spline3D.load(
directory / grid_filename)
model.grid_start = params['grid']['r_min']
model.grid_num = params['grid']['r_num']
return model
if __name__ == '__main__':
def test_three_body_single_species_model():
confs = np.array([
np.hstack([np.random.randn(4, 3), 26 * np.ones((4, 2))]),
np.hstack([np.random.randn(5, 3), 26 * np.ones((5, 2))])
])
forces = np.random.randn(2, 3)
element, r_cut, sigma, theta, noise = 26, 2., 3., 4., 5.
filename = Path() / 'test_model'
m = ThreeBodySingleSpeciesModel(element, r_cut, sigma, theta, noise)
print(m)
m.fit(confs, forces)
print(m)
m.build_grid(1., 10)
print(m)
m.save(filename)
def test_three_body_two_species_model():
elements = [2, 4]
confs = np.array([
np.hstack([np.random.randn(4, 3),
np.random.choice(elements, size=(4, 2))]),
np.hstack([np.random.randn(5, 3),
np.random.choice(elements, size=(5, 2))])
])
forces = np.random.randn(2, 3)
r_cut, sigma, theta, noise = 2., 3., 4., 5.
filename = Path() / 'test_model'
m = ThreeBodyManySpeciesModel(elements, r_cut, sigma, theta, noise)
print(m)
m.fit(confs, forces)
print(m)
m.build_grid(1., 10)
print(m)
m.save(filename)
test_three_body_single_species_model()
# test_three_body_two_species_model()
| 35,154 | 41.406514 | 111 | py |
mff | mff-master/mff/models/combined.py | # -*- coding: utf-8 -*-
import json
import logging
import warnings
from itertools import combinations_with_replacement
from pathlib import Path
import numpy as np
from mff import gp, interpolation, kernels, utility, models
from .base import Model
logger = logging.getLogger(__name__)
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
class CombinedSingleSpeciesModel(Model):
""" 2- and 3-body single species model class
Class managing the Gaussian processes and their mapped counterparts
Args:
element (int): The atomic number of the element considered
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma_2b (foat): Lengthscale parameter of the 2-body Gaussian process
sigma_3b (foat): Lengthscale parameter of the 2-body Gaussian process
theta_2b (float): decay ratio of the cutoff function in the 2-body Gaussian Process
theta_3b (float): decay ratio of the cutoff function in the 3-body Gaussian Process
noise (float): noise value associated with the training output data
Attributes:
gp_2b (method): The 2-body single species Gaussian Process
gp_3b (method): The 3-body single species Gaussian Process
grid_2b (method): The 2-body single species tabulated potential
grid_3b (method): The 3-body single species tabulated potential
grid_start (float): Minimum atomic distance for which the grids are defined (cannot be 0.0)
grid_num (int): number of points per side used to create the 2- and 3-body grid. The 3-body
grid is 3-dimensional, therefore its total number of grid points will be grid_num^3
"""
def __init__(self, element, r_cut, sigma_2b, sigma_3b, theta_2b, theta_3b, noise, rep_sig=1, **kwargs):
super().__init__()
self.element = element
self.r_cut = r_cut
self.rep_sig = rep_sig
kernel_2b = kernels.TwoBodySingleSpeciesKernel(
theta=[sigma_2b, theta_2b, r_cut])
self.gp_2b = gp.GaussianProcess(
kernel=kernel_2b, noise=noise, **kwargs)
kernel_3b = kernels.ThreeBodySingleSpeciesKernel(
theta=[sigma_3b, theta_3b, r_cut])
self.gp_3b = gp.GaussianProcess(
kernel=kernel_3b, noise=noise, **kwargs)
self.grid_2b, self.grid_3b, self.grid_start, self.grid_num = None, None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a 2- and
3-body single species force-force kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training forces and the 2-body predictions of force on the
training configurations
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = ("models/MODEL_ker_TwoBodySingleSpecies_ntr_%i.json" %(len(forces)))
try:
model_2b = models.TwoBodySingleSpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit(confs, forces, ncores=ncores)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
self.gp_3b.fit(confs, forces - two_body_forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species energy-energy kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training energies and the 2-body predictions of energies on the
training configurations.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = "models/MODEL_ker_TwoBodySingleSpecies_ntr_%i.json" %(len(energies))
try:
model_2b = models.TwoBodySingleSpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(glob_confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.gp_2b.fit_energy(glob_confs, energies, ncores=ncores)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_energy(glob_confs, energies -
two_body_energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species force-force, energy-energy, and energy-forces kernel
functions. The 2-body Gaussian process is first fitted, then the 3-body GP
is fitted to the difference between the training energies (and forces) and
the 2-body predictions of energies (and forces) on the training configurations.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = "models/MODEL_ker_TwoBodySingleSpecies_ntr_%i.json" %(len(energies)+len(forces))
try:
model_2b = models.TwoBodySingleSpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_force_and_energy(
confs, forces - two_body_forces, glob_confs, energies - two_body_energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GPs
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
force_2b, std_2b = self.gp_2b.predict(confs, return_std)
force_2b += rep_forces
else:
force_2b, std_2b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
force_3b, std_3b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
return force_2b + force_3b, std_2b + std_3b
else:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
return self.gp_2b.predict(confs, return_std, ncores=ncores) + rep_forces + \
self.gp_3b.predict(confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict(confs, return_std, ncores=ncores) + \
self.gp_3b.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the local energies of the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
force_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_2b += rep_energies
else:
energy_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncoress)
energy_3b, std_3b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
return energy_2b + energy_3b, std_2b + std_3b
else:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
return self.gp_2b.predict_energy(glob_confs, return_std) + rep_energies +\
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict_energy(glob_confs, return_std, ncores=ncores) + \
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores)
def build_grid(self, start, num_2b, num_3b, ncores=1):
""" Build the mapped 2- and 3-body potentials.
Calculates the energy predicted by the GP for two and three atoms at all possible combination
of num distances ranging from start to r_cut. The energy for the 3-body mapped grid is
calculated only for ``valid`` triplets of atoms, i.e. sets of three distances which
form a triangle (this is checked via the triangle inequality).
The grid building exploits all the permutation invariances to reduce the number of energy
calculations needed to fill the grid.
The computed 2-body energies are stored in an array of values, and a 1D spline interpolation is created.
The computed 3-body energies are stored in a 3D cube of values, and a 3D spline interpolation is
created.
The total force or local energy can then be calculated for any atom by summing the pairwise and
triplet contributions of every valid couple and triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module, which is built to work within
the ase python package.
Args:
start (float): smallest interatomic distance for which the energy is predicted
by the GP and stored inn the 3-body mapped potential
num_2b (int):number of points to use in the grid of the 2-body mapped potential
num_3b (int): number of points to use to generate the list of distances used to
generate the triplets of atoms for the 2-body mapped potential
ncores (int): number of CPUs to use to calculate the energy predictions
"""
dists_2b = np.linspace(start, self.r_cut, num_2b)
confs = np.zeros((num_2b, 1, 5))
confs[:, 0, 0] = dists_2b
confs[:, 0, 3], confs[:, 0, 4] = self.element, self.element
grid_data = self.gp_2b.predict_energy(
confs, ncores=ncores, mapping=True)
if self.rep_sig:
grid_data += utility.get_repulsive_energies(
confs, self.rep_sig, mapping=True)
grid_2b = interpolation.Spline1D(dists_2b, grid_data)
# Mapping 3 body part
dists_3b = np.linspace(start, self.r_cut, num_3b)
inds, r_ij_x, r_ki_x, r_ki_y = self.generate_triplets(dists_3b)
confs = np.zeros((len(r_ij_x), 2, 5))
confs[:, 0, 0] = r_ij_x # Element on the x axis
confs[:, 1, 0] = r_ki_x # Reshape into confs shape: this is x2
confs[:, 1, 1] = r_ki_y # Reshape into confs shape: this is y2
# Permutations of elements
confs[:, :, 3] = self.element # Central element is always element 1
# Element on the x axis is always element 2
confs[:, 0, 4] = self.element
# Element on the xy plane is always element 3
confs[:, 1, 4] = self.element
grid_3b = np.zeros((num_3b, num_3b, num_3b))
grid_3b[inds] = self.gp_3b.predict_energy(
confs, ncores=ncores, mapping=True).flatten()
for ind_i in range(num_3b):
for ind_j in range(ind_i + 1):
for ind_k in range(ind_j + 1):
grid_3b[ind_i, ind_k, ind_j] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_j, ind_i, ind_k] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_j, ind_k, ind_i] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_k, ind_i, ind_j] = grid_3b[ind_i, ind_j, ind_k]
grid_3b[ind_k, ind_j, ind_i] = grid_3b[ind_i, ind_j, ind_k]
grid_3b = interpolation.Spline3D(dists_3b, dists_3b, dists_3b, grid_3b)
self.grid_2b = grid_2b
self.grid_3b = grid_3b
self.grid_num_2b = num_2b
self.grid_num_3b = num_3b
self.grid_start = start
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potentials, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
### SAVE THE 2B MODEL ###
params = {
'model': self.__class__.__name__,
'element': self.element,
'r_cut': self.r_cut,
'rep_sig': self.rep_sig,
'fitted': self.gp_2b.fitted,
'gp_2b': {
'kernel': self.gp_2b.kernel.kernel_name,
'n_train': self.gp_2b.n_train,
'sigma': self.gp_2b.kernel.theta[0],
'theta': self.gp_2b.kernel.theta[1],
'noise': self.gp_2b.noise
},
'gp_3b': {
'kernel': self.gp_3b.kernel.kernel_name,
'n_train': self.gp_3b.n_train,
'sigma': self.gp_3b.kernel.theta[0],
'theta': self.gp_3b.kernel.theta[1],
'noise': self.gp_3b.noise
},
'grid_2b': {
'r_min': self.grid_start,
'r_num': self.grid_num_2b,
'filename': {}
} if self.grid_2b else {},
'grid_3b': {
'r_min': self.grid_start,
'r_num': self.grid_num_3b,
'filename': {}
} if self.grid_3b else {}
}
gp_filename_2b = "GP_ker_{p[gp_2b][kernel]}_ntr_{p[gp_2b][n_train]}.npy".format(
p=params)
params['gp_2b']['filename'] = gp_filename_2b
self.gp_2b.save(path / gp_filename_2b)
if self.grid_2b:
grid_filename_2b = "GRID_ker_{p[gp_2b][kernel]}_ntr_{p[gp_2b][n_train]}.npz".format(
p=params)
print("Saved 2-body grid under name %s" % (grid_filename_2b))
params['grid_2b']['filename'] = grid_filename_2b
self.grid_2b.save(path / grid_filename_2b)
### SAVE THE 3B MODEL ###
gp_filename_3b = "GP_ker_{p[gp_3b][kernel]}_ntr_{p[gp_3b][n_train]}.npy".format(
p=params)
params['gp_3b']['filename'] = gp_filename_3b
self.gp_3b.save(path / gp_filename_3b)
if self.grid_3b:
grid_filename_3b = "GRID_ker_{p[gp_3b][kernel]}_ntr_{p[gp_3b][n_train]}.npz".format(
p=params)
print("Saved 3-body grid under name %s" % (grid_filename_3b))
params['grid_3b']['filename'] = grid_filename_3b
self.grid_3b.save(path / grid_filename_3b)
with open(path / "MODEL_combined_ntr_{p[gp_2b][n_train]}.json".format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_combined_ntr_{p[gp_2b][n_train]}.json".format(p=params))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GPs and the mapped potentials, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['element'],
params['r_cut'],
params['gp_2b']['sigma'],
params['gp_3b']['sigma'],
params['gp_2b']['theta'],
params['gp_3b']['theta'],
params['gp_2b']['noise'],
params['rep_sig'])
gp_filename_2b = params['gp_2b']['filename']
gp_filename_3b = params['gp_3b']['filename']
try:
model.gp_2b.load(directory / gp_filename_2b)
except:
warnings.warn("The 2-body GP file is missing")
pass
try:
model.gp_3b.load(directory / gp_filename_3b)
except:
warnings.warn("The 3-body GP file is missing")
pass
if params['grid_2b']:
grid_filename_2b = params['grid_2b']['filename']
model.grid_2b = interpolation.Spline1D.load(
directory / grid_filename_2b)
grid_filename_3b = params['grid_3b']['filename']
model.grid_3b = interpolation.Spline3D.load(
directory / grid_filename_3b)
model.grid_start = params['grid_2b']['r_min']
model.grid_num_2b = params['grid_2b']['r_num']
model.grid_num_3b = params['grid_3b']['r_num']
return model
def save_gp(self, filename_2b, filename_3b):
""" Saves the GP objects, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp_2b.save(filename_2b)
self.gp_3b.save(filename_3b)
def load_gp(self, filename_2b, filename_3b):
""" Loads the GP objects, now obsolete
"""
warnings.warn('use save and load function', DeprecationWarning)
self.gp_2b.load(filename_2b)
self.gp_3b.load(filename_3b)
@staticmethod
def generate_triplets(dists):
""" Generate a list of all valid triplets using perutational invariance.
Calculates the energy predicted by the GP for three atoms at all possible combination
of num distances ranging from start to r_cut. The energy is calculated only for ``valid``
triplets of atoms, i.e. sets of three distances which form a triangle (this is checked via
the triangle inequality). The grid building exploits all the permutation invariances to
reduce the number of energy calculations needed to fill the grid.
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
dists (array): array of floats containing all of the distances which can be used to
build triplets of atoms. This array is created by calling np.linspace(start, r_cut, num)
Returns:
inds (array): array of booleans indicating which triplets (three distance values) need to be
evaluated to fill the 3D grid of energy values.
r_ij_x (array): array containing the x coordinate of the second atom j w.r.t. the central atom i
r_ki_x (array): array containing the x coordinate of the third atom k w.r.t. the central atom i
r_ki_y (array): array containing the y coordinate of the third atom k w.r.t. the central atom i
"""
d_ij, d_jk, d_ki = np.meshgrid(
dists, dists, dists, indexing='ij', sparse=False, copy=True)
# Valid triangles according to triangle inequality
inds = np.logical_and(
d_ij <= d_jk + d_ki, np.logical_and(d_jk <= d_ki + d_ij, d_ki <= d_ij + d_jk))
# Utilizing permutation invariance
inds = np.logical_and(np.logical_and(d_ij >= d_jk, d_jk >= d_ki), inds)
# Element on the x axis
r_ij_x = d_ij[inds]
# Element on the xy plane
r_ki_x = (d_ij[inds] ** 2 - d_jk[inds] ** 2 +
d_ki[inds] ** 2) / (2 * d_ij[inds])
# using abs to avoid numerical error near to 0
r_ki_y = np.sqrt(np.abs(d_ki[inds] ** 2 - r_ki_x ** 2))
return inds, r_ij_x, r_ki_x, r_ki_y
class CombinedManySpeciesModel(Model):
""" 2- and 3-body many species model class
Class managing the Gaussian processes and their mapped counterparts
Args:
elements (list): List containing the atomic numbers in increasing order
r_cut (foat): The cutoff radius used to carve the atomic environments
sigma_2b (foat): Lengthscale parameter of the 2-body Gaussian process
sigma_3b (foat): Lengthscale parameter of the 2-body Gaussian process
theta_2b (float): decay ratio of the cutoff function in the 2-body Gaussian Process
theta_3b (float): decay ratio of the cutoff function in the 3-body Gaussian Process
noise (float): noise value associated with the training output data
Attributes:
gp_2b (method): The 2-body single species Gaussian Process
gp_3b (method): The 3-body single species Gaussian Process
grid_2b (list): Contains the three 2-body two species tabulated potentials, accounting for
interactions between two atoms of types 0-0, 0-1, and 1-1.
grid_2b (list): Contains the three 3-body two species tabulated potentials, accounting for
interactions between three atoms of types 0-0-0, 0-0-1, 0-1-1, and 1-1-1.
grid_start (float): Minimum atomic distance for which the grids are defined (cannot be 0.0)
grid_num_2b (int):number of points to use in the grid of the 2-body mapped potential
grid_num_3b (int): number of points to use to generate the list of distances used to
generate the triplets of atoms for the 2-body mapped potential
"""
def __init__(self, elements, r_cut, sigma_2b, sigma_3b, theta_2b, theta_3b, noise, rep_sig=1, **kwargs):
super().__init__()
self.elements = list(np.sort(elements))
self.r_cut = r_cut
self.rep_sig = rep_sig
kernel_2b = kernels.TwoBodyManySpeciesKernel(
theta=[sigma_2b, theta_2b, r_cut])
self.gp_2b = gp.GaussianProcess(
kernel=kernel_2b, noise=noise, **kwargs)
kernel_3b = kernels.ThreeBodyManySpeciesKernel(
theta=[sigma_3b, theta_3b, r_cut])
self.gp_3b = gp.GaussianProcess(
kernel=kernel_3b, noise=noise, **kwargs)
self.grid_2b, self.grid_3b, self.grid_start, self.grid_num_2b, self.grid_num_3b = {
}, {}, None, None, None
def fit(self, confs, forces, ncores=1):
""" Fit the GP to a set of training forces using a 2- and
3-body single species force-force kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training forces and the 2-body predictions of force on the
training configurations
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = "models/MODEL_ker_TwoBodyManySpecies_ntr_%i.json" %(len(forces))
try:
model_2b = models.TwoBodyManySpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit(confs, forces, ncores=ncores)
ntr = len(confs)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
self.gp_3b.fit(confs, forces - two_body_forces, ncores=ncores)
def fit_energy(self, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species energy-energy kernel functions. The 2-body Gaussian
process is first fitted, then the 3-body GP is fitted to the difference
between the training energies and the 2-body predictions of energies on the
training configurations.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = "models/MODEL_ker_TwoBodyManySpecies_ntr_%i.json" %(len(energies))
try:
model_2b = models.TwoBodyManySpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(glob_confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.gp_2b.fit_energy(glob_confs, energies, ncores=1)
ntr = len(glob_confs)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_energy(glob_confs, energies -
two_body_energies, ncores=ncores)
def fit_force_and_energy(self, confs, forces, glob_confs, energies, ncores=1):
""" Fit the GP to a set of training energies using a 2- and
3-body single species force-force, energy-energy, and energy-forces kernel
functions. The 2-body Gaussian process is first fitted, then the 3-body GP
is fitted to the difference between the training energies (and forces) and
the 2-body predictions of energies (and forces) on the training configurations.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
forces (array) : Array containing the vector forces on
the central atoms of the training configurations
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
energies (array) : Array containing the total energy of each snapshot
ncores (int): number of CPUs to use for the gram matrix evaluation
"""
hypotetical_model_name = "models/MODEL_ker_TwoBodyManySpecies_ntr_%i.json" %(len(forces) + len(energies))
try:
model_2b = models.TwoBodyManySpeciesModel.from_json(hypotetical_model_name)
self.rep_sig = model_2b.rep_sig
self.gp_2b = model_2b.gp
if self.rep_sig:
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
print("Loaded 2-body model to bootstart training")
except:
if self.rep_sig:
self.rep_sig = utility.find_repulstion_sigma(confs)
self.rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
energies -= self.rep_energies
self.rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
forces -= self.rep_forces
self.gp_2b.fit_force_and_energy(
confs, forces, glob_confs, energies, ncores=ncores)
two_body_forces = self.gp_2b.predict(confs, ncores=ncores)
two_body_energies = self.gp_2b.predict_energy(
glob_confs, ncores=ncores)
self.gp_3b.fit_force_and_energy(
confs, forces - two_body_forces, glob_confs, energies - two_body_energies, ncores=ncores)
def predict(self, confs, return_std=False, ncores=1):
""" Predict the forces acting on the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
confs (list): List of M x 5 arrays containing coordinates and
atomic numbers of atoms within a cutoff from the central one
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
forces (array): array of force vectors predicted by the GPs
forces_errors (array): errors associated to the force predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
force_2b, std_2b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
force_2b += rep_forces
else:
force_2b, std_2b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
force_3b, std_3b = self.gp_2b.predict(
confs, return_std, ncores=ncores)
return force_2b + force_3b, std_2b + std_3b
else:
if self.rep_sig:
rep_forces = utility.get_repulsive_forces(confs, self.rep_sig)
return self.gp_2b.predict(confs, return_std, ncores=ncores) + rep_forces + \
self.gp_3b.predict(confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict(confs, return_std, ncores=ncores) + \
self.gp_3b.predict(confs, return_std, ncores=ncores)
def predict_energy(self, glob_confs, return_std=False, ncores=1):
""" Predict the local energies of the central atoms of confs using the
2- and 3-body GPs. The total force is the sum of the two predictions.
Args:
glob_confs (list of lists): List of configurations arranged so that
grouped configurations belong to the same snapshot
return_std (bool): if True, returns the standard deviation
associated to predictions according to the GP framework
Returns:
energies (array) : Array containing the total energy of each snapshot
energies_errors (array): errors associated to the energies predictions,
returned only if return_std is True
"""
if return_std:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
force_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_2b += rep_energies
else:
energy_2b, std_2b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
energy_3b, std_3b = self.gp_2b.predict_energy(
glob_confs, return_std, ncores=ncores)
return energy_2b + energy_3b, std_2b + std_3b
else:
if self.rep_sig:
rep_energies = utility.get_repulsive_energies(
glob_confs, self.rep_sig)
return self.gp_2b.predict_energy(glob_confs, return_std, ncores=ncores) + rep_energies +\
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores)
else:
return self.gp_2b.predict_energy(glob_confs, return_std, ncores=ncores) + \
self.gp_3b.predict_energy(
glob_confs, return_std, ncores=ncores)
def build_grid(self, start, num_2b, num_3b, ncores=1):
"""Function used to create the three different 2-body energy grids for
atoms of elements 0-0, 0-1, and 1-1, and the four different 3-body energy grids for
atoms of elements 0-0-0, 0-0-1, 0-1-1, and 1-1-1. The function calls the
``build_grid_3b`` function for each of the 3-body grids to build.
Args:
start (float): smallest interatomic distance for which the energy is predicted
by the GP and stored inn the 3-body mapped potential
num (int): number of points to use in the grid of the 2-body mapped potentials
num_3b (int): number of points to use to generate the list of distances used to
generate the triplets of atoms for the 3-body mapped potentials
ncores (int): number of CPUs to use to calculate the energy predictions
"""
self.grid_start = start
self.grid_num_2b = num_2b
self.grid_num_3b = num_2b
perm_list_2b = list(combinations_with_replacement(self.elements, 2))
perm_list_3b = list(combinations_with_replacement(self.elements, 3))
dists_2b = np.linspace(start, self.r_cut, num_2b)
confs_2b = np.zeros((num_2b, 1, 5))
confs_2b[:, 0, 0] = dists_2b
for pair in perm_list_2b: # in this for loop, predicting then save for each individual one
confs_2b[:, 0, 3], confs_2b[:, 0,
4] = pair[0], pair[1]
mapped_energies = self.gp_2b.predict_energy(
list(confs_2b), ncores=ncores, mapping=True)
if self.rep_sig:
mapped_energies += utility.get_repulsive_energies(
confs_2b, self.rep_sig, mapping=True)
self.grid_2b[pair] = interpolation.Spline1D(dists_2b, mapped_energies)
dists_3b = np.linspace(start, self.r_cut, num_3b)
for trip in perm_list_3b:
self.grid_3b[trip] = self.build_grid_3b(
dists_3b, trip[0], trip[1], trip[2], ncores = ncores)
def build_grid_3b(self, dists, element_k, element_i, element_j, ncores=1):
""" Build a mapped 3-body potential.
Calculates the energy predicted by the GP for three atoms of elements element_i, element_j, element_k,
at all possible combinations of num distances ranging from start to r_cut.
The energy is calculated only for ``valid`` triplets of atoms, i.e. sets of three distances
which form a triangle (this is checked via the triangle inequality), found by calling the
``generate_triplets_with_permutation_invariance`` function.
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
dists (array): array of floats containing all of the distances which can be used to
build triplets of atoms. This array is created by calling np.linspace(start, r_cut, num)
element_i (int): atomic number of the central atom i in a triplet
element_j (int): atomic number of the second atom j in a triplet
element_k (int): atomic number of the third atom k in a triplet
ncores (int): number of CPUs to use when computing the triplet local energies
Returns:
spline3D (obj): a 3D spline object that can be used to predict the energy and the force associated
to the central atom of a triplet.
"""
num = len(dists)
inds, r_ij_x, r_ki_x, r_ki_y = self.generate_triplets_all(dists)
confs = np.zeros((len(r_ij_x), 2, 5))
confs[:, 0, 0] = r_ij_x # Element on the x axis
confs[:, 1, 0] = r_ki_x # Reshape into confs shape: this is x2
confs[:, 1, 1] = r_ki_y # Reshape into confs shape: this is y2
# Permutations of elements
confs[:, :, 3] = element_i # Central element is always element 1
confs[:, 0, 4] = element_j # Element on the x axis is always element 2
# Element on the xy plane is always element 3
confs[:, 1, 4] = element_k
grid_3b = np.zeros((num, num, num))
grid_3b[inds] = self.gp_3b.predict_energy(
confs, ncores=ncores, mapping=True).flatten()
return interpolation.Spline3D(dists, dists, dists, grid_3b)
def save(self, path):
""" Save the model.
This creates a .json file containing the parameters of the model and the
paths to the GP objects and the mapped potentials, which are saved as
separate .gpy and .gpz files, respectively.
Args:
path (str): path to the file
"""
if not isinstance(path, Path):
path = Path(path)
### SAVE THE MODEL ###
params = {
'model': self.__class__.__name__,
'elements': self.elements,
'r_cut': self.r_cut,
'rep_sig': self.rep_sig,
'fitted': self.gp_2b.fitted,
'gp_2b': {
'kernel': self.gp_2b.kernel.kernel_name,
'n_train': self.gp_2b.n_train,
'sigma': self.gp_2b.kernel.theta[0],
'theta': self.gp_2b.kernel.theta[1],
'noise': self.gp_2b.noise
},
'gp_3b': {
'kernel': self.gp_3b.kernel.kernel_name,
'n_train': self.gp_3b.n_train,
'sigma': self.gp_3b.kernel.theta[0],
'theta': self.gp_3b.kernel.theta[1],
'noise': self.gp_3b.noise
},
'grid_2b': {
'r_min': self.grid_start,
'r_num': self.grid_num_2b,
'filename': {}
} if self.grid_2b else {},
'grid_3b': {
'r_min': self.grid_start,
'r_num': self.grid_num_3b,
'filename': {}
} if self.grid_3b else {}
}
gp_filename_2b = "GP_ker_{p[gp_2b][kernel]}_ntr_{p[gp_2b][n_train]}.npy".format(
p=params)
params['gp_2b']['filename'] = gp_filename_2b
self.gp_2b.save(path / gp_filename_2b)
for k, grid in self.grid_2b.items():
key = '_'.join(str(element) for element in k)
grid_filename_2b = "GRID_{}_ker_{p[gp_2b][kernel]}_ntr_{p[gp_2b][n_train]}.npz".format(
key, p=params)
print("Saved 2-body grid under name %s" % (grid_filename_2b))
params['grid_2b']['filename'][key] = grid_filename_2b
grid.save(path / grid_filename_2b)
### SAVE THE 3B MODEL ###
gp_filename_3b = "GP_ker_{p[gp_3b][kernel]}_ntr_{p[gp_3b][n_train]}.npy".format(
p=params)
params['gp_3b']['filename'] = gp_filename_3b
self.gp_3b.save(path / gp_filename_3b)
for k, grid in self.grid_3b.items():
key = '_'.join(str(element) for element in k)
grid_filename_3b = "GRID_{}_ker_{p[gp_3b][kernel]}_ntr_{p[gp_3b][n_train]}.npz".format(
key, p=params)
print("Saved 3-body grid under name %s" % (grid_filename_3b))
params['grid_3b']['filename'][key] = grid_filename_3b
grid.save(path / grid_filename_3b)
with open(path / "MODEL_combined_ntr_{p[gp_2b][n_train]}.json".format(p=params), 'w') as fp:
json.dump(params, fp, indent=4, cls=NpEncoder)
print("Saved model with name: MODEL_combined_ntr_{p[gp_2b][n_train]}.json".format(p=params))
@classmethod
def from_json(cls, path):
""" Load the model.
Loads the model, the associated GPs and the mapped potentials, if available.
Args:
path (str): path to the .json model file
Return:
model (obj): the model object
"""
if not isinstance(path, Path):
path = Path(path)
directory, prefix = path.parent, path.stem
with open(path) as fp:
params = json.load(fp)
model = cls(params['elements'],
params['r_cut'],
params['gp_2b']['sigma'],
params['gp_3b']['sigma'],
params['gp_2b']['theta'],
params['gp_3b']['theta'],
params['gp_2b']['noise'],
params['rep_sig'])
gp_filename_2b = params['gp_2b']['filename']
gp_filename_3b = params['gp_3b']['filename']
try:
model.gp_2b.load(directory / gp_filename_2b)
except:
warnings.warn("The 2-body GP file is missing")
pass
try:
model.gp_3b.load(directory / gp_filename_3b)
except:
warnings.warn("The 3-body GP file is missing")
pass
if params['grid_2b']:
for key, grid_filename_2b in params['grid_2b']['filename'].items():
k = tuple(int(ind) for ind in key.split('_'))
model.grid_2b[k] = interpolation.Spline1D.load(
directory / grid_filename_2b)
for key, grid_filename_3b in params['grid_3b']['filename'].items():
k = tuple(int(ind) for ind in key.split('_'))
model.grid_3b[k] = interpolation.Spline3D.load(
directory / grid_filename_3b)
model.grid_start = params['grid_2b']['r_min']
model.grid_num_2b = params['grid_2b']['r_num']
model.grid_num_3b = params['grid_3b']['r_num']
return model
def save_gp(self, filename_2b, filename_3b):
""" Saves the GP objects, now obsolete
"""
self.gp_2b.save(filename_2b)
self.gp_3b.save(filename_3b)
def load_gp(self, filename_2b, filename_3b):
""" Loads the GP objects, now obsolete
"""
self.gp_2b.load(filename_2b)
self.gp_3b.load(filename_3b)
@staticmethod
def generate_triplets_all(dists):
""" Generate a list of all valid triplets.
Calculates the energy predicted by the GP for three atoms at all possible combination
of num distances ranging from start to r_cut. The energy is calculated only for ``valid``
triplets of atoms, i.e. sets of three distances which form a triangle (this is checked via
the triangle inequality).
The computed energies are stored in a 3D cube of values, and a 3D spline interpolation is
created, which can be used to predict the energy and, through its analytic derivative,
the force associated to any triplet of atoms.
The total force or local energy can then be calculated for any atom by summing the
triplet contributions of every valid triplet of atoms of which one is always the central one.
The prediction is done by the ``calculator`` module which is built to work within
the ase python package.
Args:
dists (array): array of floats containing all of the distances which can be used to
build triplets of atoms. This array is created by calling np.linspace(start, r_cut, num)
Returns:
inds (array): array of booleans indicating which triplets (three distance values) need to be
evaluated to fill the 3D grid of energy values.
r_ij_x (array): array containing the x coordinate of the second atom j w.r.t. the central atom i
r_ki_x (array): array containing the x coordinate of the third atom k w.r.t. the central atom i
r_ki_y (array): array containing the y coordinate of the third atom k w.r.t. the central atom i
"""
d_ij, d_jk, d_ki = np.meshgrid(
dists, dists, dists, indexing='ij', sparse=False, copy=True)
# Valid triangles according to triangle inequality
inds = np.logical_and(
d_ij <= d_jk + d_ki, np.logical_and(d_jk <= d_ki + d_ij, d_ki <= d_ij + d_jk))
# Element on the x axis
r_ij_x = d_ij[inds]
# Element on the xy plane
r_ki_x = (d_ij[inds] ** 2 - d_jk[inds] ** 2 +
d_ki[inds] ** 2) / (2 * d_ij[inds])
# using abs to avoid numerical error near to 0
r_ki_y = np.sqrt(np.abs(d_ki[inds] ** 2 - r_ki_x ** 2))
return inds, r_ij_x, r_ki_x, r_ki_y
| 49,756 | 44.316029 | 113 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.