keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
2D | lherron2/thermomaps-ising | thermomaps-root/tm/core/__init__.py | .py | 0 | 0 | null | Python |
2D | lherron2/thermomaps-ising | thermomaps-root/tm/core/loader.py | .py | 12,578 | 459 | from torch.utils.data import Dataset
import torch
import numpy as np
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Transform:
"""
Base class for data transforms.
"""
def __init__(self, data):
"""
Initialize a Transform.
Args:
data (torch.Tensor): Input data.
"""
pass
class NormalTransform(Transform):
"""
Whitening data transform.
"""
def __init__(self, data):
"""
Initialize a WhitenTransform.
Args:
data (torch.Tensor): Input data.
"""
super().__init__(data)
self.mean = data.mean(0)
self.std = data.std(0)
def forward(self, x):
"""
Forward transformation: Standardizes the input data.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Standardized data.
"""
try:
(_, nc, _, _) = x.shape
return (x - self.mean[:nc, :, :]) / (self.std[:nc, :, :])
except:
return (x - self.mean[-1, :, :]) / (self.std[-1, :, :])
def reverse(self, x):
"""
Reverse transformation: Reverts standardized data to its original scale.
Args:
x (torch.Tensor): Standardized data.
Returns:
torch.Tensor: Original-scale data.
"""
try:
(_, nc, _, _) = x.shape
return x * (self.std[:nc, :, :]) + self.mean[:nc, :, :]
except:
return x * (self.std[-1, :, :]) + self.mean[-1, :, :]
class MinMaxTransform(Transform):
"""
Min-Max scaling data transform.
"""
def __init__(self, data, dim, pos):
"""
Initialize a MinMaxTransform.
Args:
data (torch.Tensor): Input data.
dim (int): Dimension to standardize.
pos (tuple): Position within the dimension to standardize.
"""
super().__init__(data, dim, pos)
self.min_data = data.min(0)[pos]
self.max_data = data.max(0)[pos]
def forward(self, x):
"""
Forward transformation: Applies Min-Max scaling to the input data.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Transformed data.
"""
return (x - self.min_data) / (2 * (self.max_data - self.min_data))
def reverse(self, x):
"""
Reverse transformation: Reverts Min-Max scaled data to its original scale.
Args:
x (torch.Tensor): Transformed data.
Returns:
torch.Tensor: Original-scale data.
"""
return 2 * (self.max_data - self.min_data) * x + self.min_data
class IdentityTransform(Transform):
"""
Identity data transform.
"""
def __init__(self, *args, **kwargs):
"""
Initialize an IdentityTransform.
"""
super().__init__(*args, **kwargs)
def forward(self, x):
"""
Forward transformation: Returns the input data unchanged.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Unchanged data.
"""
return x
def reverse(self, x):
"""
Reverse transformation: Returns the input data unchanged.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Unchanged data.
"""
return x
TRANSFORMS = {
"normal": NormalTransform,
"min_max": MinMaxTransform,
"identity": IdentityTransform,
}
class Dequantizer:
"""
Base class for dequantization methods.
"""
def __init__(self, scale):
"""
Initialize a Dequantizer.
Args:
scale (float): Dequantization scale.
"""
self.scale = scale
class NormalDequantization(Dequantizer):
"""
Normal dequantization method.
"""
def __init__(self, scale):
"""
Initialize a NormalDequantization.
Args:
scale (float): Dequantization scale.
"""
super().__init__(scale)
def forward(self, x):
"""
Forward dequantization: Adds normal noise to the input data.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Dequantized data.
"""
return x + torch.randn(*x.shape) * self.scale
class UniformDequantization(Dequantizer):
"""
Uniform dequantization method.
"""
def __init__(self, scale):
"""
Initialize a UniformDequantization.
Args:
scale (float): Dequantization scale.
"""
super().__init__(scale)
def forward(self, x):
"""
Forward dequantization: Adds uniform noise to the input data.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Dequantized data.
"""
return x + torch.rand(*x.shape) * self.scale
DEQUANTIZERS = {"normal": NormalDequantization, "uniform": UniformDequantization}
class Loader(Dataset):
"""
Dataset loader with optional data transformations and dequantization.
"""
def __init__(
self,
data: torch.Tensor = None,
temperatures: np.ndarray = None,
transform_type: str = "normal",
control_axis: int = 1,
control_dims: tuple = (3,5),
dequantize: bool = True,
dequantize_type: str = "normal",
dequantize_scale: float = 1e-2,
TRANSFORMS: dict = TRANSFORMS,
DEQUANTIZERS: dict = DEQUANTIZERS,
):
"""
Initialize a Loader instance.
Args:
directory (Directory): Data directory.
transform_type (str, optional): Type of data transformation. Defaults to "whiten".
control_tuple (tuple, optional): Control parameter settings. Defaults to ((1),(3,5)).
dequantize (bool, optional): Whether to apply dequantization. Defaults to False.
dequantize_type (str, optional): Type of dequantization. Defaults to "normal".
dequantize_scale (float, optional): Dequantization scale. Defaults to 1e-2.
TRANSFORMS (dict, optional): Dictionary of data transforms. Defaults to TRANSFORMS.
DEQUANTIZERS (dict, optional): Dictionary of dequantization methods. Defaults to DEQUANTIZERS.
"""
# Load data from npz file using path from Directory.
self.control_tuple = (control_axis, control_dims)
# Check the type of 'data' and load it accordingly
if isinstance(data, str):
self.data = torch.from_numpy(np.load(data)).float()
elif isinstance(data, np.ndarray):
self.data = torch.from_numpy(data).float()
elif isinstance(data, torch.Tensor):
self.data = data
# Check the type of 'temperatures' and load it accordingly
if isinstance(temperatures, str):
self.temps = np.load(temperatures)
elif isinstance(temperatures, np.ndarray):
self.temps = temperatures
# If dequantize is True, apply the dequantization
if dequantize:
self.dequantizer = DEQUANTIZERS[dequantize_type](dequantize_scale)
self.data = self.dequantize(self.data)
# Get the dimensions of the data
self.data_dim = self.data.shape[-1]
self.num_channels = self.data.shape[1]
self.num_dims = len(self.data.shape)
# Build slice objects to retrieve control params and batch from Tensor.
self.control_slice = self.build_control_slice(control_axis, control_dims, self.num_dims)
self.batch_slice = self.build_batch_slice(self.num_dims)
# Apply the specified transform to the data
self.transform = TRANSFORMS[transform_type](self.data)
# Standardize the control data
self.unstd_control = self.data[self.control_slice][self.batch_slice]
self.std_control = self.standardize(self.data)[self.batch_slice]
def build_control_slice(self, control_axis, control_dims, data_dim):
"""
Builds a slice object to retrieve the control parameters from the tensor.
Args:
control_tuple (tuple): Control parameter settings.
data_dim (int): Number of dimensions in the tensor.
Returns:
tuple: Control slice, control dimension, and control position.
"""
# # Create a list of slice objects that select all elements along each dimension
# control_slice = [slice(None) for _ in range(data_dim)]
# # If control_dims is not None, modify the slice objects for the control dimensions
# if control_dims is not None:
# for dim in control_dims:
# control_slice[dim] = slice(control_dims[0], control_dims[1])
if control_axis is None and control_dims is None:
control_slice = [slice(None) for _ in range(data_dim)]
else:
control_slice = [slice(None, None) for _ in range(data_dim)]
for axis in [control_axis]:
control_slice[axis] = slice(control_dims[0], control_dims[1])
return tuple(control_slice)
def build_batch_slice(self, data_dim, batch_dim=0):
"""
Preserves the batch dimension of a tensor while taking the first element along
the other dimensions.
Args:
data_dim (int): Number of dimensions in the tensor.
batch_dim (int, optional): Batch dimension. Defaults to 0.
Returns:
list: Batch slice.
"""
batch_slice = [slice(0, 1) for dim in range(data_dim)]
batch_slice[batch_dim] = slice(None, None)
return batch_slice
def dequantize(self, x):
"""
Calls the dequantization method defined in DEQUANTIZERS.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Dequantized data.
"""
return self.dequantizer.forward(x)
def standardize(self, x):
"""
Calls the standardizing transform defined in TRANSFORMS.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Standardized data.
"""
return self.transform.forward(x)
def unstandardize(self, x):
"""
Calls the inverse of the standardizing transform defined in TRANSFORMS.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Unstandardized data.
"""
return self.transform.reverse(x)
def get_data_dim(self):
"""
Get the data dimension.
Returns:
int: Data dimension.
"""
return self.data_dim
def get_num_dims(self):
"""
Get the number of dimensions.
Returns:
int: Number of dimensions.
"""
return self.num_dims
def get_num_channels(self):
"""
Get the number of channels.
Returns:
int: Number of channels.
"""
return self.num_channels
def get_all_but_batch_dim(self):
"""
Get dimensions of data excluding the batch dimension.
Returns:
tuple: Dimensions of data excluding batch dimension.
"""
return self.data.shape[1:]
def get_batch(self, index):
"""
Get a batch of data by index (for testing purposes).
Args:
index (int): Index of the batch.
Returns:
torch.Tensor: Batch of data.
"""
x = self.data[index : index + 1]
std_control = self.std_control[index : index + 1]
x[self.control_slice] = std_control
return x.float()
def __getitem__(self, index):
"""
Get an item from the dataset.
Args:
index (int): Index of the item.
Returns:
tuple: Tuple containing standardized control parameters and data.
"""
x = torch.clone(self.data[index])
temps = self.temps[index]
logger.debug(f"{x.shape}")
mag = abs(x[0].sum())/x.shape[-1]**2
logger.debug(f"Fetching sample with magnetization {mag} and temperature {temps}")
return temps, x.float()
def __len__(self):
"""
Get the total number of samples in the dataset.
Returns:
int: Total number of samples.
"""
return np.shape(self.data)[0]
| Python |
2D | lherron2/thermomaps-ising | thermomaps-root/tm/core/utils.py | .py | 2,164 | 90 | import yaml
import torch.nn.functional as F
import numpy as np
import os
import re
def exists(x):
"""
Check if a variable exists (is not None).
Args:
x: Any variable.
Returns:
bool: True if the variable is not None, False otherwise.
"""
return x is not None
def default(val, d):
"""
Return the value if it exists, otherwise return a default value or result.
Args:
val: Any variable.
d: Default value or function to call if val does not exist.
Returns:
Any: val if it exists, otherwise d.
"""
if exists(val):
return val
# Uncomment the following line if d is a function (is_lambda(d))
# return d() if is_lambda(d) else d
def compute_model_dim(data_dim, groups):
"""
Compute the model dimension based on data dimension and groups.
Args:
data_dim (int): Dimension of the data.
groups (int): Number of groups.
Returns:
int: Model dimension.
"""
return int(np.ceil(data_dim / groups) * groups)
class Interpolater:
"""
Reshapes irregularly (or unconventionally) shaped data to be compatible with a model.
"""
def __init__(self, data_shape: tuple, target_shape: tuple):
"""
Initialize the Interpolater with data and target shapes.
Args:
data_shape (tuple): Shape of the original data.
target_shape (tuple): Target shape for interpolation.
"""
self.data_shape, self.target_shape = data_shape, target_shape
def to_target(self, x):
"""
Interpolate data to the target shape.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Interpolated data.
"""
return F.interpolate(x, size=self.target_shape, mode="nearest-exact")
def from_target(self, x):
"""
Interpolate data from the target shape to the original shape.
Args:
x (torch.Tensor): Input data.
Returns:
torch.Tensor: Interpolated data.
"""
return F.interpolate(x, size=self.data_shape, mode="nearest-exact")
| Python |
2D | lherron2/thermomaps-ising | thermomaps-root/tm/core/Prior_old.py | .py | 13,014 | 429 | import torch
from scipy.optimize import curve_fit
import numpy as np
from typing import Any, Callable, Dict, List
import logging
logging.basicConfig(level=logging.DEBUG)
def temperature_density_rescaling(std_temp, ref_temp):
"""
Calculate temperature density rescaling factor.
Args:
std_temp (torch.Tensor): Standardized temperature.
ref_temp (float): Reference temperature.
Returns:
torch.Tensor: Rescaling factor.
"""
return (std_temp / ref_temp).pow(0.5)
def identity(t, *args, **kwargs):
"""
Identity function.
Args:
t (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Unchanged input tensor.
"""
return t
RESCALE_FUNCS = {"density": temperature_density_rescaling, "no_rescale": identity}
def linear_fit(x, a, b):
"""
Linear fit function.
Args:
x (torch.Tensor): Input tensor.
a (float): Slope.
b (float): Intercept.
Returns:
torch.Tensor: Fitted values.
"""
return x * a + b
FIT_FUNCS = {"linear": linear_fit}
BOUNDS = {"linear": ([0, -np.inf], [np.inf, np.inf])}
INITIAL_GUESS = {"linear": [1, 1]}
def parse_kwargs(**kwargs):
"""
Parse keyword arguments.
Args:
**kwargs: Keyword arguments to be parsed.
Returns:
dict: Parsed keyword arguments with default values.
"""
kwargs_ = {"mean": 0, "std": 1}
for k, v in kwargs.items():
kwargs_[k] = v
return kwargs_
def rmsd(v, temp):
"""
Calculate root mean square deviation (RMSD).
Args:
v (torch.Tensor): Input tensor.
temp: Temperature.
Returns:
torch.Tensor: RMSD.
"""
return ((v - v.mean(0)) ** 2).mean(0).sum(0)[None, :, :]
def filter_bounds(RMSD, mult=None, cutoff=None):
"""
Filter RMSD values based on bounds.
Args:
RMSD (list of np.ndarray): List of RMSD arrays.
mult (float, optional): Multiplication factor for bounds. Defaults to None.
cutoff (float, optional): Cutoff value for RMSD. Defaults to None.
Returns:
np.ndarray: Filtered RMSD array.
"""
mean, std = RMSD.mean(0), RMSD.std(0)
RMSD_ = []
if mult is not None:
ub = mean + mult * std
lb = mean - mult * std
for x in RMSD:
if mult is not None:
np.putmask(x, x > ub, ub)
np.putmask(x, x < lb, lb)
x[x < cutoff] = cutoff
RMSD_.append(x)
RMSD = np.stack(RMSD_)
return RMSD
def parallel_curve_fit(func, x, y, **kwargs):
"""
Perform parallel curve fitting.
Args:
func (callable): Function to fit.
x (np.ndarray): Input data.
y (np.ndarray): Output data.
**kwargs: Additional keyword arguments for curve_fit.
Returns:
np.ndarray: Fitted parameters.
"""
params = np.ones_like(y[0])[None, :].repeat(2, 0)
(_, xdims, ydims) = y.shape
for i in range(xdims):
for j in range(ydims):
popt, pcov = curve_fit(func, x, y[:, i, j], bounds=kwargs["bounds"])
params[:, i, j] = popt
return params
class NormalPrior:
"""
Normal prior distribution.
"""
def __init__(self, **kwargs):
"""
Initialize a NormalPrior.
Args:
**kwargs: Keyword arguments for the prior distribution (mean and std).
"""
self.kwargs = parse_kwargs(kwargs)
self.prior = torch.distributions.normal.Normal(
self.kwargs["mean"], self.kwargs["std"]
)
def sample_prior(self, batch_size, *args, **kwargs):
"""
Sample from the prior distribution.
Args:
batch_size (int): Batch size.
**kwargs: Additional keyword arguments for sampling.
Returns:
torch.Tensor: Sampled values from the prior.
"""
return self.prior.sample(sample_shape=batch_size)
class LocalEquilibriumHarmonicPrior(NormalPrior):
"""
Local equilibrium harmonic prior.
"""
def __init__(
self,
data,
temps,
fit_key,
cutoff,
BOUNDS=BOUNDS,
INITIAL_GUESS=INITIAL_GUESS,
FIT_FUNCS=FIT_FUNCS,
**kwargs
):
"""
Initialize a LocalEquilibriumHarmonicPrior.
Args:
data (dict): Data dictionary.
temps (list): List of temperatures.
fit_key (str): Key for the fitting function.
cutoff (float): Cutoff value for RMSD.
BOUNDS (dict, optional): Bounds for fitting. Defaults to BOUNDS.
INITIAL_GUESS (dict, optional): Initial guess for fitting. Defaults to INITIAL_GUESS.
FIT_FUNCS (dict, optional): Fitting functions. Defaults to FIT_FUNCS.
**kwargs: Keyword arguments for the prior distribution (mean and std).
"""
self.kwargs = parse_kwargs(**kwargs)
self.fit = FIT_FUNCS[fit_key]
self.cutoff = cutoff
RMSD = np.concatenate([rmsd(v, k) for k, v in data.items()], axis=0)
RMSD_arr = filter_bounds(RMSD, mult=None, cutoff=cutoff)
T = [float(k.split("_")[0]) for k in temps]
self.params = parallel_curve_fit(self.fit, T, RMSD, bounds=BOUNDS["linear"])
self.RMSD_d = {}
for i, temp in enumerate(data.keys()):
self.RMSD_d[temp] = RMSD_arr[i]
def sample_prior_from_data(self, batch_size, temp, n_dims=4):
"""
Sample prior from data.
Args:
batch_size (int): Batch size.
temp (list): List of temperatures.
n_dims (int, optional): Number of dimensions. Defaults to 4.
Returns:
torch.Tensor: Sampled values from the prior.
"""
stds = []
for t in temp:
std = torch.Tensor(self.RMSD_d[t]) ** 0.5
std = torch.repeat_interleave(std[None, :, :], n_dims, dim=0)
stds.append(std)
stds = torch.stack(stds)
stds[stds < self.cutoff] = self.cutoff
extra_dims = torch.ones_like(stds)[:, 0, :, :]
stds = torch.cat([stds, extra_dims[:, None, :, :]], dim=1)
prior = torch.distributions.normal.Normal(0, stds)
return prior.sample()
def fit_prior(self, batch_size, temp, **kwargs):
"""
Fit prior distribution.
Args:
batch_size (int): Batch size.
temp (float): Temperature.
**kwargs: Additional keyword arguments for fitting.
Returns:
torch.Tensor: Fitted standard deviations.
"""
temp = np.repeat(np.array([temp]), batch_size, axis=0)
stds = self.fit(temp[:, None, None, None], *self.params)
stds = torch.Tensor(stds) ** 0.5
stds[torch.isnan(stds)] = self.cutoff
return stds
def sample_prior_from_fit(self, batch_size, temp, n_dims=4):
"""
Sample prior from fitted parameters.
Args:
batch_size (int): Batch size.
temp (float): Temperature.
n_dims (int, optional): Number of dimensions. Defaults to 4.
Returns:
torch.Tensor: Sampled values from the prior.
"""
stds = self.fit_prior(batch_size, temp)
stds[stds < self.cutoff] = self.cutoff
stds = torch.repeat_interleave(stds, n_dims, dim=1)
extra_dims = torch.ones_like(stds)[:, 0, :, :]
stds = torch.cat([stds, extra_dims[:, None, :, :]], dim=1)
prior = torch.distributions.normal.Normal(0, torch.Tensor(stds))
return prior.sample()
def sample_prior(self, batch_size, temp, sample_type, n_dims, *args, **kwargs):
"""
Sample prior distribution based on the sample_type.
Args:
batch_size (int): Batch size.
temp (list): List of temperatures.
sample_type (str): Type of sampling ('from_data' or 'from_fit').
n_dims (int): Number of dimensions.
**kwargs: Additional keyword arguments for sampling.
Returns:
torch.Tensor: Sampled values from the prior.
"""
if sample_type == "from_data":
return self.sample_prior_from_data(batch_size, temp, n_dims=n_dims)
if sample_type == "from_fit":
samp = self.sample_prior_from_fit(batch_size, temp, n_dims=n_dims)
return samp
class GlobalEquilibriumHarmonicPrior(LocalEquilibriumHarmonicPrior):
"""
Global equilibrium harmonic prior.
"""
def __init__(
self,
data: Dict,
fit_key: str = "linear",
BOUNDS: Dict[str, tuple] = BOUNDS,
INITIAL_GUESS: Dict[str, float] = INITIAL_GUESS,
FIT_FUNCS: Dict[str, Callable] = FIT_FUNCS,
**kwargs: Any
):
"""
Initialize a GlobalEquilibriumHarmonicPrior.
Args:
data (dict): Data dictionary.
temps (list): List of temperatures.
fit_key (str): Key for the fitting function.
BOUNDS (dict, optional): Bounds for fitting. Defaults to BOUNDS.
INITIAL_GUESS (dict, optional): Initial guess for fitting. Defaults to INITIAL_GUESS.
FIT_FUNCS (dict, optional): Fitting functions. Defaults to FIT_FUNCS.
**kwargs: Keyword arguments for the prior distribution (mean and std).
"""
self.kwargs = parse_kwargs(**kwargs)
self.fit = FIT_FUNCS[fit_key]
self.cutoff = 1e-2
self.mult = None
T = [float(str(k).split("_")[0]) for k in data.keys()]
RMSD = np.concatenate([rmsd(v, k) for k, v in data.items()], axis=0)
for i, temp in enumerate(T):
RMSD[i] = np.ones_like(RMSD[i]) * temp
self.params = parallel_curve_fit(self.fit, T, RMSD, bounds=BOUNDS["linear"])
self.RMSD_d = {}
for i, temp in enumerate(T):
self.RMSD_d[str(temp)] = np.ones_like(RMSD[i]) * float(temp)
logging.debug(f"Fluctuation keys: {self.RMSD_d.keys()}")
def sample_prior_from_data(self, batch_size, temp, n_dims=4):
"""
Sample prior from data.
Args:
batch_size (int): Batch size.
temp (list): List of temperatures.
n_dims (int, optional): Number of dimensions. Defaults to 4.
Returns:
torch.Tensor: Sampled values from the prior.
"""
stds = []
for t in temp:
std = torch.Tensor(self.RMSD_d[t]) ** 0.5
std = torch.repeat_interleave(std[None, :, :], n_dims, dim=0)
stds.append(std)
stds = torch.stack(stds)
stds[stds < self.cutoff] = self.cutoff
extra_dims = torch.ones_like(stds)[:, 0, :, :]
stds = torch.cat([stds, extra_dims[:, None, :, :]], dim=1)
prior = torch.distributions.normal.Normal(0, stds)
return prior.sample()
def fit_prior(self, batch_size, temp, **kwargs):
"""
Fit prior distribution.
Args:
batch_size (int): Batch size.
temp (float): Temperature.
**kwargs: Additional keyword arguments for fitting.
Returns:
torch.Tensor: Fitted standard deviations.
"""
temp = np.repeat(np.array([temp]), batch_size, axis=0)
stds = self.fit(temp[:, None, None, None], *self.params)
stds = torch.Tensor(stds) ** 0.5
stds[torch.isnan(stds)] = self.cutoff
return stds
def sample_prior_from_fit(self, batch_size, temp, n_dims=4):
"""
Sample prior from fitted parameters.
Args:
batch_size (int): Batch size.
temp (float): Temperature.
n_dims (int, optional): Number of dimensions. Defaults to 4.
Returns:
torch.Tensor: Sampled values from the prior.
"""
stds = self.fit_prior(batch_size, temp)
stds[stds < self.cutoff] = self.cutoff
stds = torch.repeat_interleave(stds, n_dims, dim=1)
extra_dims = torch.ones_like(stds)[:, 0, :, :]
stds = torch.cat([stds, extra_dims[:, None, :, :]], dim=1)
prior = torch.distributions.normal.Normal(0, torch.Tensor(stds))
return prior.sample()
def sample_prior(self, batch_size, temp, sample_type, n_dims, *args, **kwargs):
"""
Sample prior distribution based on the sample_type.
Args:
batch_size (int): Batch size.
temp (list): List of temperatures.
sample_type (str): Type of sampling ('from_data' or 'from_fit').
n_dims (int): Number of dimensions.
**kwargs: Additional keyword arguments for sampling.
Returns:
torch.Tensor: Sampled values from the prior.
"""
if sample_type == "from_data":
samp = self.sample_prior_from_data(batch_size, temp, n_dims=n_dims)
if sample_type == "from_fit":
samp = self.sample_prior_from_fit(batch_size, temp, n_dims=n_dims)
return samp
| Python |
2D | lherron2/thermomaps-ising | thermomaps-root/tm/architectures/unet_2d_mid_attn.py | .py | 12,485 | 445 | import math
from functools import partial
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
from einops.layers.torch import Rearrange
# constants
ModelPrediction = namedtuple("ModelPrediction", ["pred_noise", "pred_x_start"])
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def identity(t, *args, **kwargs):
return t
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out=None):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode="nearest"),
nn.Conv2d(dim, default(dim_out, dim), 3, padding=1),
)
def Downsample(dim, dim_out=None):
return nn.Sequential(
Rearrange("b c (h p1) (w p2) -> b (c p1 p2) h w", p1=2, p2=2),
nn.Conv2d(dim * 4, default(dim_out, dim), 1),
)
class WeightStandardizedConv2d(nn.Conv2d):
"""
https://arxiv.org/abs/1903.10520
weight standardization purportedly works synergistically with group normalization
"""
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
weight = self.weight
mean = reduce(weight, "o ... -> o 1 1 1", "mean")
var = reduce(weight, "o ... -> o 1 1 1", partial(torch.var, unbiased=False))
normalized_weight = (weight - mean) * (var + eps).rsqrt()
return F.conv2d(
x,
normalized_weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim=1, unbiased=False, keepdim=True)
mean = torch.mean(x, dim=1, keepdim=True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# sinusoidal positional embeds
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class RandomOrLearnedSinusoidalPosEmb(nn.Module):
"""following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb"""
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim, is_random=False):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim), requires_grad=not is_random)
def forward(self, x):
x = rearrange(x, "b -> b 1")
freqs = x * rearrange(self.weights, "d -> 1 d") * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim=-1)
fouriered = torch.cat((x, fouriered), dim=-1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups=8):
super().__init__()
self.proj = WeightStandardizedConv2d(dim, dim_out, 3, padding=1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift=None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim=None, groups=8):
super().__init__()
self.mlp = (
nn.Sequential(nn.SiLU(), nn.Linear(time_emb_dim, dim_out * 2))
if exists(time_emb_dim)
else None
)
self.block1 = Block(dim, dim_out, groups=groups)
self.block2 = Block(dim_out, dim_out, groups=groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb=None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, "b c -> b c 1 1")
scale_shift = time_emb.chunk(2, dim=1)
h = self.block1(x, scale_shift=scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
self.to_out = nn.Sequential(nn.Conv2d(hidden_dim, dim, 1), LayerNorm(dim))
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim=1)
q, k, v = map(
lambda t: rearrange(t, "b (h c) x y -> b h c (x y)", h=self.heads), qkv
)
q = q.softmax(dim=-2)
k = k.softmax(dim=-1)
q = q * self.scale
v = v / (h * w)
context = torch.einsum("b h d n, b h e n -> b h d e", k, v)
out = torch.einsum("b h d e, b h d n -> b h e n", context, q)
out = rearrange(out, "b h c (x y) -> b (h c) x y", h=self.heads, x=h, y=w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim=1)
q, k, v = map(
lambda t: rearrange(t, "b (h c) x y -> b h c (x y)", h=self.heads), qkv
)
q = q * self.scale
sim = einsum("b h d i, b h d j -> b h i j", q, k)
attn = sim.softmax(dim=-1)
out = einsum("b h i j, b h d j -> b h i d", attn, v)
out = rearrange(out, "b h (x y) d -> b (h d) x y", x=h, y=w)
return self.to_out(out)
# model
class Unet2D(nn.Module):
def __init__(
self,
dim,
init_dim=None,
out_dim=None,
dim_mults=(1, 2, 4, 8),
channels=3,
self_condition=False,
resnet_block_groups=8,
learned_variance=False,
learned_sinusoidal_cond=False,
random_fourier_features=False,
learned_sinusoidal_dim=16,
):
super().__init__()
# determine dimensions
self.channels = channels
self.self_condition = self_condition
input_channels = channels * (2 if self_condition else 1)
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding=3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups=resnet_block_groups)
# time embeddings
time_dim = dim * 4
self.random_or_learned_sinusoidal_cond = (
learned_sinusoidal_cond or random_fourier_features
)
if self.random_or_learned_sinusoidal_cond:
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(
learned_sinusoidal_dim, random_fourier_features
)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(dim)
fourier_dim = dim
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim),
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(
nn.ModuleList(
[
block_klass(dim_in, dim_in, time_emb_dim=time_dim),
block_klass(dim_in, dim_in, time_emb_dim=time_dim),
block_klass(dim_in, dim_in, time_emb_dim=time_dim),
block_klass(dim_in, dim_in, time_emb_dim=time_dim),
Downsample(dim_in, dim_out)
if not is_last
else nn.Conv2d(dim_in, dim_out, 3, padding=1),
]
)
)
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim=time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim=time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(
nn.ModuleList(
[
block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
Upsample(dim_out, dim_in)
if not is_last
else nn.Conv2d(dim_out, dim_in, 3, padding=1),
]
)
)
default_out_dim = channels * (1 if not learned_variance else 2)
self.out_dim = default(out_dim, default_out_dim)
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim=time_dim)
self.final_conv = nn.Conv2d(dim, self.out_dim, 1)
def forward(self, x, time, x_self_cond=None):
if self.self_condition:
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim=1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, block3, block4, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
h.append(x)
x = block3(x, t)
h.append(x)
x = block4(x, t)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, block3, block4, upsample in self.ups:
x = torch.cat((x, h.pop()), dim=1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim=1)
x = block2(x, t)
x = torch.cat((x, h.pop()), dim=1)
x = block3(x, t)
x = torch.cat((x, h.pop()), dim=1)
x = block4(x, t)
x = upsample(x)
x = torch.cat((x, r), dim=1)
x = self.final_res_block(x, t)
return self.final_conv(x)
| Python |
2D | lherron2/thermomaps-ising | thermomaps-root/tm/architectures/UNet2D.py | .py | 12,922 | 454 | import math
from functools import partial
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
from einops.layers.torch import Rearrange
# constants
ModelPrediction = namedtuple("ModelPrediction", ["pred_noise", "pred_x_start"])
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def identity(t, *args, **kwargs):
return t
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out=None):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode="nearest"),
nn.Conv2d(dim, default(dim_out, dim), 3, padding=1),
)
def Downsample(dim, dim_out=None):
return nn.Sequential(
Rearrange("b c (h p1) (w p2) -> b (c p1 p2) h w", p1=2, p2=2),
nn.Conv2d(dim * 4, default(dim_out, dim), 1),
)
class WeightStandardizedConv2d(nn.Conv2d):
"""
https://arxiv.org/abs/1903.10520
weight standardization purportedly works synergistically with group normalization
"""
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
weight = self.weight
mean = reduce(weight, "o ... -> o 1 1 1", "mean")
var = reduce(weight, "o ... -> o 1 1 1", partial(torch.var, unbiased=False))
normalized_weight = (weight - mean) * (var + eps).rsqrt()
return F.conv2d(
x,
normalized_weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim=1, unbiased=False, keepdim=True)
mean = torch.mean(x, dim=1, keepdim=True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# sinusoidal positional embeds
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class RandomOrLearnedSinusoidalPosEmb(nn.Module):
"""following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb"""
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim, is_random=False):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim), requires_grad=not is_random)
def forward(self, x):
x = rearrange(x, "b -> b 1")
freqs = x * rearrange(self.weights, "d -> 1 d") * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim=-1)
fouriered = torch.cat((x, fouriered), dim=-1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups=8):
super().__init__()
self.proj = WeightStandardizedConv2d(dim, dim_out, 3, padding=1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift=None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim=None, groups=8):
super().__init__()
self.mlp = (
nn.Sequential(nn.SiLU(), nn.Linear(time_emb_dim, dim_out * 2))
if exists(time_emb_dim)
else None
)
self.block1 = Block(dim, dim_out, groups=groups)
self.block2 = Block(dim_out, dim_out, groups=groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb=None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, "b c -> b c 1 1")
scale_shift = time_emb.chunk(2, dim=1)
h = self.block1(x, scale_shift=scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
self.to_out = nn.Sequential(nn.Conv2d(hidden_dim, dim, 1), LayerNorm(dim))
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim=1)
q, k, v = map(
lambda t: rearrange(t, "b (h c) x y -> b h c (x y)", h=self.heads), qkv
)
q = q.softmax(dim=-2)
k = k.softmax(dim=-1)
q = q * self.scale
v = v / (h * w)
context = torch.einsum("b h d n, b h e n -> b h d e", k, v)
out = torch.einsum("b h d e, b h d n -> b h e n", context, q)
out = rearrange(out, "b h c (x y) -> b (h c) x y", h=self.heads, x=h, y=w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim=1)
q, k, v = map(
lambda t: rearrange(t, "b (h c) x y -> b h c (x y)", h=self.heads), qkv
)
q = q * self.scale
sim = einsum("b h d i, b h d j -> b h i j", q, k)
attn = sim.softmax(dim=-1)
out = einsum("b h i j, b h d j -> b h i d", attn, v)
out = rearrange(out, "b h (x y) d -> b (h d) x y", x=h, y=w)
return self.to_out(out)
# model
class Unet2D(nn.Module):
def __init__(
self,
dim,
init_dim=None,
out_dim=None,
dim_mults=(1, 2, 4, 8),
channels=3,
self_condition=False,
resnet_block_groups=8,
learned_variance=False,
learned_sinusoidal_cond=False,
random_fourier_features=False,
learned_sinusoidal_dim=16,
):
super().__init__()
# determine dimensions
self.channels = channels
self.self_condition = self_condition
input_channels = channels * (2 if self_condition else 1)
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding=3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups=resnet_block_groups)
# time embeddings
time_dim = dim * 4
self.random_or_learned_sinusoidal_cond = (
learned_sinusoidal_cond or random_fourier_features
)
if self.random_or_learned_sinusoidal_cond:
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(
learned_sinusoidal_dim, random_fourier_features
)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(dim)
fourier_dim = dim
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim),
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(
nn.ModuleList(
[
block_klass(dim_in, dim_in, time_emb_dim=time_dim),
block_klass(dim_in, dim_in, time_emb_dim=time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
block_klass(dim_in, dim_in, time_emb_dim=time_dim),
block_klass(dim_in, dim_in, time_emb_dim=time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out)
if not is_last
else nn.Conv2d(dim_in, dim_out, 3, padding=1),
]
)
)
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim=time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim=time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(
nn.ModuleList(
[
block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in)
if not is_last
else nn.Conv2d(dim_out, dim_in, 3, padding=1),
]
)
)
default_out_dim = channels * (1 if not learned_variance else 2)
self.out_dim = default(out_dim, default_out_dim)
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim=time_dim)
self.final_conv = nn.Conv2d(dim, self.out_dim, 1)
def forward(self, x, time, x_self_cond=None):
if self.self_condition:
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim=1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn1, block3, block4, attn2, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn1(x)
h.append(x)
x = block3(x, t)
h.append(x)
x = block4(x, t)
x = attn2(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn1, block3, block4, attn2, upsample in self.ups:
x = torch.cat((x, h.pop()), dim=1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim=1)
x = block2(x, t)
x = attn1(x)
x = torch.cat((x, h.pop()), dim=1)
x = block3(x, t)
x = torch.cat((x, h.pop()), dim=1)
x = block4(x, t)
x = attn2(x)
x = upsample(x)
x = torch.cat((x, r), dim=1)
x = self.final_res_block(x, t)
return self.final_conv(x)
| Python |
2D | lherron2/thermomaps-ising | thermomaps-root/tm/architectures/__init__.py | .py | 0 | 0 | null | Python |
2D | lherron2/thermomaps-ising | thermomaps-root/tm/architectures/UNet2D_pbc.py | .py | 32,108 | 908 | import math
import copy
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
from multiprocessing import cpu_count
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
#from torchvision import transforms as T, utils
from einops import rearrange, reduce
from einops.layers.torch import Rearrange
#from PIL import Image
from tqdm.auto import tqdm
#from ema_pytorch import EMA
#from accelerate import Accelerator
#from denoising_diffusion_pytorch.version import __version__
# constants
ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def identity(t, *args, **kwargs):
return t
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1, padding_mode='circular')
)
def Downsample(dim, dim_out = None):
return nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = 2, p2 = 2),
nn.Conv2d(dim * 4, default(dim_out, dim), 1, padding_mode='circular')
)
class WeightStandardizedConv2d(nn.Conv2d):
"""
https://arxiv.org/abs/1903.10520
weight standardization purportedly works synergistically with group normalization
"""
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
weight = self.weight
mean = reduce(weight, 'o ... -> o 1 1 1', 'mean')
var = reduce(weight, 'o ... -> o 1 1 1', partial(torch.var, unbiased = False))
normalized_weight = (weight - mean) * (var + eps).rsqrt()
return F.conv2d(x, normalized_weight, self.bias, self.stride, self.dilation, self.groups)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# sinusoidal positional embeds
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class RandomOrLearnedSinusoidalPosEmb(nn.Module):
""" following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb """
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim, is_random = False):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim), requires_grad = not is_random)
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = WeightStandardizedConv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1, padding_mode='circular') if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False, padding_mode='circular')
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1, padding_mode='circular'),
LayerNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
v = v / (h * w)
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False, padding_mode='circular')
self.to_out = nn.Conv2d(hidden_dim, dim, 1, padding_mode='circular')
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q * self.scale
sim = einsum('b h d i, b h d j -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h d j -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
init_dim = None,
out_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
self_condition = False,
resnet_block_groups = 8,
learned_variance = False,
learned_sinusoidal_cond = False,
random_fourier_features = False,
learned_sinusoidal_dim = 16
):
super().__init__()
# determine dimensions
self.channels = channels
self.self_condition = self_condition
input_channels = channels * (2 if self_condition else 1)
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3, padding_mode='circular')
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
self.random_or_learned_sinusoidal_cond = learned_sinusoidal_cond or random_fourier_features
if self.random_or_learned_sinusoidal_cond:
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, random_fourier_features)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(dim)
fourier_dim = dim
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1, padding_mode='circular')
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1, padding_mode='circular')
]))
default_out_dim = channels * (1 if not learned_variance else 2)
self.out_dim = default(out_dim, default_out_dim)
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv2d(dim, self.out_dim, 1, padding_mode='circular')
def forward(self, x, time, x_self_cond = None):
if self.self_condition:
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim = 1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
# print ("shape before downsampling",x.shape)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
return self.final_conv(x)
# gaussian diffusion trainer class
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def linear_beta_schedule(timesteps):
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return torch.linspace(beta_start, beta_end, timesteps, dtype = torch.float64)
def cosine_beta_schedule(timesteps, s = 0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype = torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
class GaussianDiffusion(nn.Module):
def __init__(
self,
model,
*,
image_size,
timesteps = 1000,
sampling_timesteps = None,
loss_type = 'l1',
objective = 'pred_noise',
beta_schedule = 'cosine',
p2_loss_weight_gamma = 0., # p2 loss weight, from https://arxiv.org/abs/2204.00227 - 0 is equivalent to weight of 1 across time - 1. is recommended
p2_loss_weight_k = 1,
ddim_sampling_eta = 1.
):
super().__init__()
assert not (type(self) == GaussianDiffusion and model.channels != model.out_dim)
assert not model.random_or_learned_sinusoidal_cond
self.model = model
self.channels = self.model.channels
self.self_condition = self.model.self_condition
self.image_size = image_size
self.objective = objective
assert objective in {'pred_noise', 'pred_x0', 'pred_v'}, 'objective must be either pred_noise (predict noise) or pred_x0 (predict image start) or pred_v (predict v [v-parameterization as defined in appendix D of progressive distillation paper, used in imagen-video successfully])'
if beta_schedule == 'linear':
betas = linear_beta_schedule(timesteps)
elif beta_schedule == 'cosine':
betas = cosine_beta_schedule(timesteps)
else:
raise ValueError(f'unknown beta schedule {beta_schedule}')
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.loss_type = loss_type
# sampling related parameters
self.sampling_timesteps = default(sampling_timesteps, timesteps) # default num sampling timesteps to number of timesteps at training
assert self.sampling_timesteps <= timesteps
self.is_ddim_sampling = self.sampling_timesteps < timesteps
self.ddim_sampling_eta = ddim_sampling_eta
# helper function to register buffer from float64 to float32
register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
register_buffer('betas', betas)
register_buffer('alphas_cumprod', alphas_cumprod)
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# calculate p2 reweighting
register_buffer('p2_loss_weight', (p2_loss_weight_k + alphas_cumprod / (1 - alphas_cumprod)) ** -p2_loss_weight_gamma)
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / \
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def predict_v(self, x_start, t, noise):
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * noise -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * x_start
)
def predict_start_from_v(self, x_t, t, v):
return (
extract(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def model_predictions(self, x, t, x_self_cond = None, clip_x_start = False):
model_output = self.model(x, t, x_self_cond)
maybe_clip = partial(torch.clamp, min = -1., max = 1.) if clip_x_start else identity
if self.objective == 'pred_noise':
pred_noise = model_output
x_start = self.predict_start_from_noise(x, t, pred_noise)
x_start = maybe_clip(x_start)
elif self.objective == 'pred_x0':
x_start = model_output
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
elif self.objective == 'pred_v':
v = model_output
x_start = self.predict_start_from_v(x, t, v)
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
return ModelPrediction(pred_noise, x_start)
def p_mean_variance(self, x, t, x_self_cond = None, clip_denoised = True):
preds = self.model_predictions(x, t, x_self_cond)
x_start = preds.pred_x_start
if clip_denoised:
x_start.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start = x_start, x_t = x, t = t)
return model_mean, posterior_variance, posterior_log_variance, x_start
@torch.no_grad()
def p_sample(self, x, t: int, x_self_cond = None, clip_denoised = True):
b, *_, device = *x.shape, x.device
batched_times = torch.full((x.shape[0],), t, device = x.device, dtype = torch.long)
model_mean, _, model_log_variance, x_start = self.p_mean_variance(x = x, t = batched_times, x_self_cond = x_self_cond, clip_denoised = clip_denoised)
noise = torch.randn_like(x) if t > 0 else 0. # no noise if t == 0
pred_img = model_mean + (0.5 * model_log_variance).exp() * noise
return pred_img, x_start
@torch.no_grad()
def p_sample_loop(self, shape):
batch, device = shape[0], self.betas.device
img = torch.randn(shape, device=device)
x_start = None
for t in tqdm(reversed(range(0, self.num_timesteps)), desc = 'sampling loop time step', total = self.num_timesteps):
self_cond = x_start if self.self_condition else None
img, x_start = self.p_sample(img, t, self_cond)
img = unnormalize_to_zero_to_one(img)
return img
@torch.no_grad()
def ddim_sample(self, shape, clip_denoised = True):
batch, device, total_timesteps, sampling_timesteps, eta, objective = shape[0], self.betas.device, self.num_timesteps, self.sampling_timesteps, self.ddim_sampling_eta, self.objective
times = torch.linspace(-1, total_timesteps - 1, steps=sampling_timesteps + 1) # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps
times = list(reversed(times.int().tolist()))
time_pairs = list(zip(times[:-1], times[1:])) # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]
img = torch.randn(shape, device = device)
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
time_cond = torch.full((batch,), time, device=device, dtype=torch.long)
self_cond = x_start if self.self_condition else None
pred_noise, x_start, *_ = self.model_predictions(img, time_cond, self_cond, clip_x_start = clip_denoised)
if time_next < 0:
img = x_start
continue
alpha = self.alphas_cumprod[time]
alpha_next = self.alphas_cumprod[time_next]
sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
c = (1 - alpha_next - sigma ** 2).sqrt()
noise = torch.randn_like(img)
img = x_start * alpha_next.sqrt() + \
c * pred_noise + \
sigma * noise
img = unnormalize_to_zero_to_one(img)
return img
@torch.no_grad()
def sample(self, batch_size = 16):
image_size, channels = self.image_size, self.channels
sample_fn = self.p_sample_loop if not self.is_ddim_sampling else self.ddim_sample
return sample_fn((batch_size, channels, image_size, image_size))
@torch.no_grad()
def interpolate(self, x1, x2, t = None, lam = 0.5):
b, *_, device = *x1.shape, x1.device
t = default(t, self.num_timesteps - 1)
assert x1.shape == x2.shape
t_batched = torch.stack([torch.tensor(t, device = device)] * b)
xt1, xt2 = map(lambda x: self.q_sample(x, t = t_batched), (x1, x2))
img = (1 - lam) * xt1 + lam * xt2
for i in tqdm(reversed(range(0, t)), desc = 'interpolation sample time step', total = t):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))
return img
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
@property
def loss_fn(self):
if self.loss_type == 'l1':
return F.l1_loss
elif self.loss_type == 'l2':
return F.mse_loss
else:
raise ValueError(f'invalid loss type {self.loss_type}')
def p_losses(self, x_start, t, noise = None):
b, c, h, w = x_start.shape
noise = default(noise, lambda: torch.randn_like(x_start))
# noise sample
x = self.q_sample(x_start = x_start, t = t, noise = noise)
# if doing self-conditioning, 50% of the time, predict x_start from current set of times
# and condition with unet with that
# this technique will slow down training by 25%, but seems to lower FID significantly
x_self_cond = None
if self.self_condition and random() < 0.5:
with torch.no_grad():
x_self_cond = self.model_predictions(x, t).pred_x_start
x_self_cond.detach_()
# predict and take gradient step
model_out = self.model(x, t, x_self_cond)
if self.objective == 'pred_noise':
target = noise
elif self.objective == 'pred_x0':
target = x_start
elif self.objective == 'pred_v':
v = self.predict_v(x_start, t, noise)
target = v
else:
raise ValueError(f'unknown objective {self.objective}')
loss = self.loss_fn(model_out, target, reduction = 'none')
loss = reduce(loss, 'b ... -> b (...)', 'mean')
loss = loss * extract(self.p2_loss_weight, t, loss.shape)
return loss.mean()
def forward(self, img, *args, **kwargs):
b, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
img = normalize_to_neg_one_to_one(img)
return self.p_losses(img, t, *args, **kwargs)
# dataset classes
class Dataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png', 'tiff'],
augment_horizontal_flip = False,
convert_image_to = None
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
maybe_convert_fn = partial(convert_image_to_fn, convert_image_to) if exists(convert_image_to) else nn.Identity()
self.transform = T.Compose([
T.Lambda(maybe_convert_fn),
T.Resize(image_size),
T.RandomHorizontalFlip() if augment_horizontal_flip else nn.Identity(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
train_batch_size = 16,
gradient_accumulate_every = 1,
augment_horizontal_flip = True,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
fp16 = False,
split_batches = True,
convert_image_to = None
):
super().__init__()
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = 'fp16' if fp16 else 'no'
)
self.accelerator.native_amp = amp
self.model = diffusion_model
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
self.image_size = diffusion_model.image_size
# dataset and dataloader
self.ds = Dataset(folder, self.image_size, augment_horizontal_flip = augment_horizontal_flip, convert_image_to = convert_image_to)
dl = DataLoader(self.ds, batch_size = train_batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
if self.accelerator.is_main_process:
self.ema = EMA(diffusion_model, beta = ema_decay, update_every = ema_update_every)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None,
'version': __version__
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
accelerator = self.accelerator
device = accelerator.device
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'), map_location=device)
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
self.ema.load_state_dict(data['ema'])
if 'version' in data:
print(f"loading from version {data['version']}")
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
accelerator.clip_grad_norm_(self.model.parameters(), 1.0)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
self.step += 1
if accelerator.is_main_process:
self.ema.to(device)
self.ema.update()
if self.step != 0 and self.step % self.save_and_sample_every == 0:
self.ema.ema_model.eval()
with torch.no_grad():
milestone = self.step // self.save_and_sample_every
batches = num_to_groups(self.num_samples, self.batch_size)
all_images_list = list(map(lambda n: self.ema.ema_model.sample(batch_size=n), batches))
all_images = torch.cat(all_images_list, dim = 0)
utils.save_image(all_images, str(self.results_folder / f'sample-{milestone}.png'), nrow = int(math.sqrt(self.num_samples)))
self.save(milestone)
pbar.update(1)
accelerator.print('training complete')
| Python |
2D | lherron2/thermomaps-ising | thermomaps-root/tm/architectures/unet_1d.py | .py | 11,681 | 418 | import math
from functools import partial
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def identity(t, *args, **kwargs):
return t
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out=None):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode="nearest"),
nn.Conv1d(dim, default(dim_out, dim), 3, padding=1),
)
def Downsample(dim, dim_out=None):
return nn.Conv1d(dim, default(dim_out, dim), 4, 2, 1)
class WeightStandardizedConv2d(nn.Conv1d):
"""
https://arxiv.org/abs/1903.10520
weight standardization purportedly works synergistically with group normalization
"""
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
weight = self.weight
mean = reduce(weight, "o ... -> o 1 1", "mean")
var = reduce(weight, "o ... -> o 1 1", partial(torch.var, unbiased=False))
normalized_weight = (weight - mean) * (var + eps).rsqrt()
return F.conv1d(
x,
normalized_weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim=1, unbiased=False, keepdim=True)
mean = torch.mean(x, dim=1, keepdim=True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# sinusoidal positional embeds
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class RandomOrLearnedSinusoidalPosEmb(nn.Module):
"""following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb"""
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim, is_random=False):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim), requires_grad=not is_random)
def forward(self, x):
x = rearrange(x, "b -> b 1")
freqs = x * rearrange(self.weights, "d -> 1 d") * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim=-1)
fouriered = torch.cat((x, fouriered), dim=-1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups=8):
super().__init__()
self.proj = WeightStandardizedConv2d(dim, dim_out, 3, padding=1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift=None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim=None, groups=8):
super().__init__()
self.mlp = (
nn.Sequential(nn.SiLU(), nn.Linear(time_emb_dim, dim_out * 2))
if exists(time_emb_dim)
else None
)
self.block1 = Block(dim, dim_out, groups=groups)
self.block2 = Block(dim_out, dim_out, groups=groups)
self.res_conv = nn.Conv1d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb=None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, "b c -> b c 1")
scale_shift = time_emb.chunk(2, dim=1)
h = self.block1(x, scale_shift=scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv1d(dim, hidden_dim * 3, 1, bias=False)
self.to_out = nn.Sequential(nn.Conv1d(hidden_dim, dim, 1), LayerNorm(dim))
def forward(self, x):
b, c, n = x.shape
qkv = self.to_qkv(x).chunk(3, dim=1)
q, k, v = map(lambda t: rearrange(t, "b (h c) n -> b h c n", h=self.heads), qkv)
q = q.softmax(dim=-2)
k = k.softmax(dim=-1)
q = q * self.scale
context = torch.einsum("b h d n, b h e n -> b h d e", k, v)
out = torch.einsum("b h d e, b h d n -> b h e n", context, q)
out = rearrange(out, "b h c n -> b (h c) n", h=self.heads)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.scale = dim_head**-0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv1d(dim, hidden_dim * 3, 1, bias=False)
self.to_out = nn.Conv1d(hidden_dim, dim, 1)
def forward(self, x):
b, c, n = x.shape
qkv = self.to_qkv(x).chunk(3, dim=1)
q, k, v = map(lambda t: rearrange(t, "b (h c) n -> b h c n", h=self.heads), qkv)
q = q * self.scale
sim = einsum("b h d i, b h d j -> b h i j", q, k)
attn = sim.softmax(dim=-1)
out = einsum("b h i j, b h d j -> b h i d", attn, v)
out = rearrange(out, "b h n d -> b (h d) n")
return self.to_out(out)
# model
class Unet1D(nn.Module):
def __init__(
self,
dim,
init_dim=None,
out_dim=None,
dim_mults=(1, 2, 4, 8),
channels=3,
self_condition=False,
resnet_block_groups=8,
learned_variance=False,
learned_sinusoidal_cond=False,
random_fourier_features=False,
learned_sinusoidal_dim=16,
):
super().__init__()
# determine dimensions
self.channels = channels
self.self_condition = self_condition
input_channels = channels * (2 if self_condition else 1)
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv1d(input_channels, init_dim, 7, padding=3)
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups=resnet_block_groups)
# time embeddings
time_dim = dim * 4
self.random_or_learned_sinusoidal_cond = (
learned_sinusoidal_cond or random_fourier_features
)
if self.random_or_learned_sinusoidal_cond:
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(
learned_sinusoidal_dim, random_fourier_features
)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(dim)
fourier_dim = dim
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim),
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(
nn.ModuleList(
[
block_klass(dim_in, dim_in, time_emb_dim=time_dim),
block_klass(dim_in, dim_in, time_emb_dim=time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out)
if not is_last
else nn.Conv1d(dim_in, dim_out, 3, padding=1),
]
)
)
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim=time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim=time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(
nn.ModuleList(
[
block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim=time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in)
if not is_last
else nn.Conv1d(dim_out, dim_in, 3, padding=1),
]
)
)
default_out_dim = channels * (1 if not learned_variance else 2)
self.out_dim = default(out_dim, default_out_dim)
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim=time_dim)
self.final_conv = nn.Conv1d(dim, self.out_dim, 1)
def forward(self, x, time, x_self_cond=None):
if self.self_condition:
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim=1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim=1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim=1)
x = block2(x, t)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim=1)
x = self.final_res_block(x, t)
return self.final_conv(x)
| Python |
2D | lherron2/thermomaps-ising | thermomaps-root/tm/architectures/UNet_PBC.py | .py | 32,108 | 908 | import math
import copy
from pathlib import Path
from random import random
from functools import partial
from collections import namedtuple
from multiprocessing import cpu_count
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
#from torchvision import transforms as T, utils
from einops import rearrange, reduce
from einops.layers.torch import Rearrange
#from PIL import Image
from tqdm.auto import tqdm
#from ema_pytorch import EMA
#from accelerate import Accelerator
#from denoising_diffusion_pytorch.version import __version__
# constants
ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def identity(t, *args, **kwargs):
return t
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# normalization functions
def normalize_to_neg_one_to_one(img):
return img * 2 - 1
def unnormalize_to_zero_to_one(t):
return (t + 1) * 0.5
# small helper modules
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
def Upsample(dim, dim_out = None):
return nn.Sequential(
nn.Upsample(scale_factor = 2, mode = 'nearest'),
nn.Conv2d(dim, default(dim_out, dim), 3, padding = 1, padding_mode='circular')
)
def Downsample(dim, dim_out = None):
return nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (c p1 p2) h w', p1 = 2, p2 = 2),
nn.Conv2d(dim * 4, default(dim_out, dim), 1, padding_mode='circular')
)
class WeightStandardizedConv2d(nn.Conv2d):
"""
https://arxiv.org/abs/1903.10520
weight standardization purportedly works synergistically with group normalization
"""
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
weight = self.weight
mean = reduce(weight, 'o ... -> o 1 1 1', 'mean')
var = reduce(weight, 'o ... -> o 1 1 1', partial(torch.var, unbiased = False))
normalized_weight = (weight - mean) * (var + eps).rsqrt()
return F.conv2d(x, normalized_weight, self.bias, self.stride, self.dilation, self.groups)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.fn(x)
# sinusoidal positional embeds
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class RandomOrLearnedSinusoidalPosEmb(nn.Module):
""" following @crowsonkb 's lead with random (learned optional) sinusoidal pos emb """
""" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """
def __init__(self, dim, is_random = False):
super().__init__()
assert (dim % 2) == 0
half_dim = dim // 2
self.weights = nn.Parameter(torch.randn(half_dim), requires_grad = not is_random)
def forward(self, x):
x = rearrange(x, 'b -> b 1')
freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi
fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)
fouriered = torch.cat((x, fouriered), dim = -1)
return fouriered
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups = 8):
super().__init__()
self.proj = WeightStandardizedConv2d(dim, dim_out, 3, padding = 1)
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x, scale_shift = None):
x = self.proj(x)
x = self.norm(x)
if exists(scale_shift):
scale, shift = scale_shift
x = x * (scale + 1) + shift
x = self.act(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8):
super().__init__()
self.mlp = nn.Sequential(
nn.SiLU(),
nn.Linear(time_emb_dim, dim_out * 2)
) if exists(time_emb_dim) else None
self.block1 = Block(dim, dim_out, groups = groups)
self.block2 = Block(dim_out, dim_out, groups = groups)
self.res_conv = nn.Conv2d(dim, dim_out, 1, padding_mode='circular') if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
scale_shift = None
if exists(self.mlp) and exists(time_emb):
time_emb = self.mlp(time_emb)
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
scale_shift = time_emb.chunk(2, dim = 1)
h = self.block1(x, scale_shift = scale_shift)
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False, padding_mode='circular')
self.to_out = nn.Sequential(
nn.Conv2d(hidden_dim, dim, 1, padding_mode='circular'),
LayerNorm(dim)
)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q.softmax(dim = -2)
k = k.softmax(dim = -1)
q = q * self.scale
v = v / (h * w)
context = torch.einsum('b h d n, b h e n -> b h d e', k, v)
out = torch.einsum('b h d e, b h d n -> b h e n', context, q)
out = rearrange(out, 'b h c (x y) -> b (h c) x y', h = self.heads, x = h, y = w)
return self.to_out(out)
class Attention(nn.Module):
def __init__(self, dim, heads = 4, dim_head = 32):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False, padding_mode='circular')
self.to_out = nn.Conv2d(hidden_dim, dim, 1, padding_mode='circular')
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> b h c (x y)', h = self.heads), qkv)
q = q * self.scale
sim = einsum('b h d i, b h d j -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h d j -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', x = h, y = w)
return self.to_out(out)
# model
class Unet(nn.Module):
def __init__(
self,
dim,
init_dim = None,
out_dim = None,
dim_mults=(1, 2, 4, 8),
channels = 3,
self_condition = False,
resnet_block_groups = 8,
learned_variance = False,
learned_sinusoidal_cond = False,
random_fourier_features = False,
learned_sinusoidal_dim = 16
):
super().__init__()
# determine dimensions
self.channels = channels
self.self_condition = self_condition
input_channels = channels * (2 if self_condition else 1)
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv2d(input_channels, init_dim, 7, padding = 3, padding_mode='circular')
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
block_klass = partial(ResnetBlock, groups = resnet_block_groups)
# time embeddings
time_dim = dim * 4
self.random_or_learned_sinusoidal_cond = learned_sinusoidal_cond or random_fourier_features
if self.random_or_learned_sinusoidal_cond:
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, random_fourier_features)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(dim)
fourier_dim = dim
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
for ind, (dim_in, dim_out) in enumerate(in_out):
is_last = ind >= (num_resolutions - 1)
self.downs.append(nn.ModuleList([
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
block_klass(dim_in, dim_in, time_emb_dim = time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if not is_last else nn.Conv2d(dim_in, dim_out, 3, padding = 1, padding_mode='circular')
]))
mid_dim = dims[-1]
self.mid_block1 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
self.mid_attn = Residual(PreNorm(mid_dim, Attention(mid_dim)))
self.mid_block2 = block_klass(mid_dim, mid_dim, time_emb_dim = time_dim)
for ind, (dim_in, dim_out) in enumerate(reversed(in_out)):
is_last = ind == (len(in_out) - 1)
self.ups.append(nn.ModuleList([
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
block_klass(dim_out + dim_in, dim_out, time_emb_dim = time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
Upsample(dim_out, dim_in) if not is_last else nn.Conv2d(dim_out, dim_in, 3, padding = 1, padding_mode='circular')
]))
default_out_dim = channels * (1 if not learned_variance else 2)
self.out_dim = default(out_dim, default_out_dim)
self.final_res_block = block_klass(dim * 2, dim, time_emb_dim = time_dim)
self.final_conv = nn.Conv2d(dim, self.out_dim, 1, padding_mode='circular')
def forward(self, x, time, x_self_cond = None):
if self.self_condition:
x_self_cond = default(x_self_cond, lambda: torch.zeros_like(x))
x = torch.cat((x_self_cond, x), dim = 1)
x = self.init_conv(x)
r = x.clone()
t = self.time_mlp(time)
h = []
for block1, block2, attn, downsample in self.downs:
x = block1(x, t)
h.append(x)
x = block2(x, t)
x = attn(x)
h.append(x)
# print ("shape before downsampling",x.shape)
x = downsample(x)
x = self.mid_block1(x, t)
x = self.mid_attn(x)
x = self.mid_block2(x, t)
for block1, block2, attn, upsample in self.ups:
x = torch.cat((x, h.pop()), dim = 1)
x = block1(x, t)
x = torch.cat((x, h.pop()), dim = 1)
x = block2(x, t)
x = attn(x)
x = upsample(x)
x = torch.cat((x, r), dim = 1)
x = self.final_res_block(x, t)
return self.final_conv(x)
# gaussian diffusion trainer class
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def linear_beta_schedule(timesteps):
scale = 1000 / timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return torch.linspace(beta_start, beta_end, timesteps, dtype = torch.float64)
def cosine_beta_schedule(timesteps, s = 0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype = torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
class GaussianDiffusion(nn.Module):
def __init__(
self,
model,
*,
image_size,
timesteps = 1000,
sampling_timesteps = None,
loss_type = 'l1',
objective = 'pred_noise',
beta_schedule = 'cosine',
p2_loss_weight_gamma = 0., # p2 loss weight, from https://arxiv.org/abs/2204.00227 - 0 is equivalent to weight of 1 across time - 1. is recommended
p2_loss_weight_k = 1,
ddim_sampling_eta = 1.
):
super().__init__()
assert not (type(self) == GaussianDiffusion and model.channels != model.out_dim)
assert not model.random_or_learned_sinusoidal_cond
self.model = model
self.channels = self.model.channels
self.self_condition = self.model.self_condition
self.image_size = image_size
self.objective = objective
assert objective in {'pred_noise', 'pred_x0', 'pred_v'}, 'objective must be either pred_noise (predict noise) or pred_x0 (predict image start) or pred_v (predict v [v-parameterization as defined in appendix D of progressive distillation paper, used in imagen-video successfully])'
if beta_schedule == 'linear':
betas = linear_beta_schedule(timesteps)
elif beta_schedule == 'cosine':
betas = cosine_beta_schedule(timesteps)
else:
raise ValueError(f'unknown beta schedule {beta_schedule}')
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.loss_type = loss_type
# sampling related parameters
self.sampling_timesteps = default(sampling_timesteps, timesteps) # default num sampling timesteps to number of timesteps at training
assert self.sampling_timesteps <= timesteps
self.is_ddim_sampling = self.sampling_timesteps < timesteps
self.ddim_sampling_eta = ddim_sampling_eta
# helper function to register buffer from float64 to float32
register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
register_buffer('betas', betas)
register_buffer('alphas_cumprod', alphas_cumprod)
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# calculate p2 reweighting
register_buffer('p2_loss_weight', (p2_loss_weight_k + alphas_cumprod / (1 - alphas_cumprod)) ** -p2_loss_weight_gamma)
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / \
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def predict_v(self, x_start, t, noise):
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * noise -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * x_start
)
def predict_start_from_v(self, x_t, t, v):
return (
extract(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def model_predictions(self, x, t, x_self_cond = None, clip_x_start = False):
model_output = self.model(x, t, x_self_cond)
maybe_clip = partial(torch.clamp, min = -1., max = 1.) if clip_x_start else identity
if self.objective == 'pred_noise':
pred_noise = model_output
x_start = self.predict_start_from_noise(x, t, pred_noise)
x_start = maybe_clip(x_start)
elif self.objective == 'pred_x0':
x_start = model_output
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
elif self.objective == 'pred_v':
v = model_output
x_start = self.predict_start_from_v(x, t, v)
x_start = maybe_clip(x_start)
pred_noise = self.predict_noise_from_start(x, t, x_start)
return ModelPrediction(pred_noise, x_start)
def p_mean_variance(self, x, t, x_self_cond = None, clip_denoised = True):
preds = self.model_predictions(x, t, x_self_cond)
x_start = preds.pred_x_start
if clip_denoised:
x_start.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start = x_start, x_t = x, t = t)
return model_mean, posterior_variance, posterior_log_variance, x_start
@torch.no_grad()
def p_sample(self, x, t: int, x_self_cond = None, clip_denoised = True):
b, *_, device = *x.shape, x.device
batched_times = torch.full((x.shape[0],), t, device = x.device, dtype = torch.long)
model_mean, _, model_log_variance, x_start = self.p_mean_variance(x = x, t = batched_times, x_self_cond = x_self_cond, clip_denoised = clip_denoised)
noise = torch.randn_like(x) if t > 0 else 0. # no noise if t == 0
pred_img = model_mean + (0.5 * model_log_variance).exp() * noise
return pred_img, x_start
@torch.no_grad()
def p_sample_loop(self, shape):
batch, device = shape[0], self.betas.device
img = torch.randn(shape, device=device)
x_start = None
for t in tqdm(reversed(range(0, self.num_timesteps)), desc = 'sampling loop time step', total = self.num_timesteps):
self_cond = x_start if self.self_condition else None
img, x_start = self.p_sample(img, t, self_cond)
img = unnormalize_to_zero_to_one(img)
return img
@torch.no_grad()
def ddim_sample(self, shape, clip_denoised = True):
batch, device, total_timesteps, sampling_timesteps, eta, objective = shape[0], self.betas.device, self.num_timesteps, self.sampling_timesteps, self.ddim_sampling_eta, self.objective
times = torch.linspace(-1, total_timesteps - 1, steps=sampling_timesteps + 1) # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps
times = list(reversed(times.int().tolist()))
time_pairs = list(zip(times[:-1], times[1:])) # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]
img = torch.randn(shape, device = device)
x_start = None
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
time_cond = torch.full((batch,), time, device=device, dtype=torch.long)
self_cond = x_start if self.self_condition else None
pred_noise, x_start, *_ = self.model_predictions(img, time_cond, self_cond, clip_x_start = clip_denoised)
if time_next < 0:
img = x_start
continue
alpha = self.alphas_cumprod[time]
alpha_next = self.alphas_cumprod[time_next]
sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
c = (1 - alpha_next - sigma ** 2).sqrt()
noise = torch.randn_like(img)
img = x_start * alpha_next.sqrt() + \
c * pred_noise + \
sigma * noise
img = unnormalize_to_zero_to_one(img)
return img
@torch.no_grad()
def sample(self, batch_size = 16):
image_size, channels = self.image_size, self.channels
sample_fn = self.p_sample_loop if not self.is_ddim_sampling else self.ddim_sample
return sample_fn((batch_size, channels, image_size, image_size))
@torch.no_grad()
def interpolate(self, x1, x2, t = None, lam = 0.5):
b, *_, device = *x1.shape, x1.device
t = default(t, self.num_timesteps - 1)
assert x1.shape == x2.shape
t_batched = torch.stack([torch.tensor(t, device = device)] * b)
xt1, xt2 = map(lambda x: self.q_sample(x, t = t_batched), (x1, x2))
img = (1 - lam) * xt1 + lam * xt2
for i in tqdm(reversed(range(0, t)), desc = 'interpolation sample time step', total = t):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long))
return img
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
@property
def loss_fn(self):
if self.loss_type == 'l1':
return F.l1_loss
elif self.loss_type == 'l2':
return F.mse_loss
else:
raise ValueError(f'invalid loss type {self.loss_type}')
def p_losses(self, x_start, t, noise = None):
b, c, h, w = x_start.shape
noise = default(noise, lambda: torch.randn_like(x_start))
# noise sample
x = self.q_sample(x_start = x_start, t = t, noise = noise)
# if doing self-conditioning, 50% of the time, predict x_start from current set of times
# and condition with unet with that
# this technique will slow down training by 25%, but seems to lower FID significantly
x_self_cond = None
if self.self_condition and random() < 0.5:
with torch.no_grad():
x_self_cond = self.model_predictions(x, t).pred_x_start
x_self_cond.detach_()
# predict and take gradient step
model_out = self.model(x, t, x_self_cond)
if self.objective == 'pred_noise':
target = noise
elif self.objective == 'pred_x0':
target = x_start
elif self.objective == 'pred_v':
v = self.predict_v(x_start, t, noise)
target = v
else:
raise ValueError(f'unknown objective {self.objective}')
loss = self.loss_fn(model_out, target, reduction = 'none')
loss = reduce(loss, 'b ... -> b (...)', 'mean')
loss = loss * extract(self.p2_loss_weight, t, loss.shape)
return loss.mean()
def forward(self, img, *args, **kwargs):
b, c, h, w, device, img_size, = *img.shape, img.device, self.image_size
assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
img = normalize_to_neg_one_to_one(img)
return self.p_losses(img, t, *args, **kwargs)
# dataset classes
class Dataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png', 'tiff'],
augment_horizontal_flip = False,
convert_image_to = None
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
maybe_convert_fn = partial(convert_image_to_fn, convert_image_to) if exists(convert_image_to) else nn.Identity()
self.transform = T.Compose([
T.Lambda(maybe_convert_fn),
T.Resize(image_size),
T.RandomHorizontalFlip() if augment_horizontal_flip else nn.Identity(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# trainer class
class Trainer(object):
def __init__(
self,
diffusion_model,
folder,
*,
train_batch_size = 16,
gradient_accumulate_every = 1,
augment_horizontal_flip = True,
train_lr = 1e-4,
train_num_steps = 100000,
ema_update_every = 10,
ema_decay = 0.995,
adam_betas = (0.9, 0.99),
save_and_sample_every = 1000,
num_samples = 25,
results_folder = './results',
amp = False,
fp16 = False,
split_batches = True,
convert_image_to = None
):
super().__init__()
self.accelerator = Accelerator(
split_batches = split_batches,
mixed_precision = 'fp16' if fp16 else 'no'
)
self.accelerator.native_amp = amp
self.model = diffusion_model
assert has_int_squareroot(num_samples), 'number of samples must have an integer square root'
self.num_samples = num_samples
self.save_and_sample_every = save_and_sample_every
self.batch_size = train_batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.train_num_steps = train_num_steps
self.image_size = diffusion_model.image_size
# dataset and dataloader
self.ds = Dataset(folder, self.image_size, augment_horizontal_flip = augment_horizontal_flip, convert_image_to = convert_image_to)
dl = DataLoader(self.ds, batch_size = train_batch_size, shuffle = True, pin_memory = True, num_workers = cpu_count())
dl = self.accelerator.prepare(dl)
self.dl = cycle(dl)
# optimizer
self.opt = Adam(diffusion_model.parameters(), lr = train_lr, betas = adam_betas)
# for logging results in a folder periodically
if self.accelerator.is_main_process:
self.ema = EMA(diffusion_model, beta = ema_decay, update_every = ema_update_every)
self.results_folder = Path(results_folder)
self.results_folder.mkdir(exist_ok = True)
# step counter state
self.step = 0
# prepare model, dataloader, optimizer with accelerator
self.model, self.opt = self.accelerator.prepare(self.model, self.opt)
def save(self, milestone):
if not self.accelerator.is_local_main_process:
return
data = {
'step': self.step,
'model': self.accelerator.get_state_dict(self.model),
'opt': self.opt.state_dict(),
'ema': self.ema.state_dict(),
'scaler': self.accelerator.scaler.state_dict() if exists(self.accelerator.scaler) else None,
'version': __version__
}
torch.save(data, str(self.results_folder / f'model-{milestone}.pt'))
def load(self, milestone):
accelerator = self.accelerator
device = accelerator.device
data = torch.load(str(self.results_folder / f'model-{milestone}.pt'), map_location=device)
model = self.accelerator.unwrap_model(self.model)
model.load_state_dict(data['model'])
self.step = data['step']
self.opt.load_state_dict(data['opt'])
self.ema.load_state_dict(data['ema'])
if 'version' in data:
print(f"loading from version {data['version']}")
if exists(self.accelerator.scaler) and exists(data['scaler']):
self.accelerator.scaler.load_state_dict(data['scaler'])
def train(self):
accelerator = self.accelerator
device = accelerator.device
with tqdm(initial = self.step, total = self.train_num_steps, disable = not accelerator.is_main_process) as pbar:
while self.step < self.train_num_steps:
total_loss = 0.
for _ in range(self.gradient_accumulate_every):
data = next(self.dl).to(device)
with self.accelerator.autocast():
loss = self.model(data)
loss = loss / self.gradient_accumulate_every
total_loss += loss.item()
self.accelerator.backward(loss)
accelerator.clip_grad_norm_(self.model.parameters(), 1.0)
pbar.set_description(f'loss: {total_loss:.4f}')
accelerator.wait_for_everyone()
self.opt.step()
self.opt.zero_grad()
accelerator.wait_for_everyone()
self.step += 1
if accelerator.is_main_process:
self.ema.to(device)
self.ema.update()
if self.step != 0 and self.step % self.save_and_sample_every == 0:
self.ema.ema_model.eval()
with torch.no_grad():
milestone = self.step // self.save_and_sample_every
batches = num_to_groups(self.num_samples, self.batch_size)
all_images_list = list(map(lambda n: self.ema.ema_model.sample(batch_size=n), batches))
all_images = torch.cat(all_images_list, dim = 0)
utils.save_image(all_images, str(self.results_folder / f'sample-{milestone}.png'), nrow = int(math.sqrt(self.num_samples)))
self.save(milestone)
pbar.update(1)
accelerator.print('training complete')
| Python |
2D | lherron2/thermomaps-ising | thermomaps-root/data/dataset.py | .py | 6,050 | 151 | from data.trajectory import Trajectory
from data.generic import Summary
from typing import List, Dict, Union, Iterable
from slurmflow.serializer import ObjectSerializer
from sklearn.model_selection import ShuffleSplit
from tm.core.loader import Loader
import numpy as np
import pandas as pd
import logging
import sys
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
class MultiEnsembleDataset:
def __init__(self, trajectories: Union[Iterable[Trajectory], Iterable[str]], summary: Summary = Summary()):
"""
Initialize a MultiEnsembleDataset.
Args:
trajectories (Union[Iterable[Trajectory], Iterable[str]]): Either an iterable of Trajectory objects or an iterable of strings.
If an iterable of strings is provided, it is assumed that these are paths to the trajectories, and the
trajectories are loaded from these paths using the ObjectSerializer.
summary (Summary): The summary of the dataset.
"""
# If trajectories is an iterable of strings, load the trajectories from the provided paths
trajectories_ = []
is_iterable = isinstance(trajectories, Iterable)
is_strs = all(isinstance(path, str) for path in trajectories)
if is_iterable and is_strs:
for path in trajectories:
OS = ObjectSerializer(path)
trajectories_.append(OS.load())
else:
trajectories_ = trajectories
self.trajectories = trajectories_
self.summary = summary
def save(self, filename: str, overwrite: bool = True):
"""
Save the dataset to disk.
Args:
filename (str): The filename to save to.
overwrite (bool, optional): Whether to overwrite an existing file. Defaults to False.
"""
OS = ObjectSerializer(filename)
OS.serialize(self, overwrite=overwrite)
@classmethod
def load(cls, filename: str) -> 'Dataset':
"""
Load a dataset from disk.
Args:
filename (str): The filename to load from.
Returns:
Dataset: The loaded dataset.
"""
OS = ObjectSerializer(filename)
return OS.load()
def to_dataframe(self) -> pd.DataFrame:
"""
Convert the dataset to a pandas DataFrame.
Returns:
pd.DataFrame: The dataset as a DataFrame.
"""
def create_or_append_df(existing_df, new_data):
new_df = pd.DataFrame([new_data])
if existing_df is None:
return new_df
else:
return pd.concat([existing_df, new_df], ignore_index=True)
df = None
for index, traj in enumerate(self.trajectories):
row = {"index": index, **traj.summary.__dict__}
df = create_or_append_df(df, row)
return df
def from_dataframe(self, df: pd.DataFrame) -> 'Dataset':
"""
Convert a pandas DataFrame to a dataset.
Args:
df (pd.DataFrame): The DataFrame to convert.
Returns:
Dataset: The dataset.
"""
trajectories = [self.trajectories[row['index']] for _, row in df.iterrows()]
new_dataset = MultiEnsembleDataset(trajectories, summary=self.summary)
return new_dataset
def get_loader_args(self, state_variables: List[str]) -> Dict[str, List]:
complete_dataset, paired_state_vars = [], []
for trajectory in self.trajectories:
state_var_chs = []
state_var_vector = []
if len(trajectory.coordinates.shape) == 3: # No channel dim
coord_ch = np.expand_dims(trajectory.coordinates, 1)
else:
coord_ch = trajectory.coordinates
for k in state_variables:
state_var_chs.append(np.ones_like(coord_ch) * trajectory.summary[k])
state_var_vector.append(np.ones((len(coord_ch), 1)) * trajectory.summary[k])
state_var_chs = np.concatenate(state_var_chs, 1)
state_vector = np.concatenate([coord_ch, state_var_chs], 1)
state_var_vector = np.concatenate(state_var_vector, 1)
complete_dataset.append(state_vector)
paired_state_vars.append(state_var_vector)
complete_dataset = np.concatenate(complete_dataset)
paired_state_vars = np.concatenate(paired_state_vars)
n_coords_ch = coord_ch.shape[1]
n_state_var_ch = state_var_vector.shape[1]
control_dims = (n_coords_ch, n_coords_ch + n_state_var_ch)
return complete_dataset, paired_state_vars, control_dims
def to_TMLoader(self, train_size: float, test_size: float, state_variables: List[str], **TMLoader_kwargs) -> Loader:
"""
Convert the dataset to a DataLoader.
Args:
trajectories (Union[Iterable[Trajectory], Iterable[str]]): Either a Trajectories object or an iterable of strings.
If an iterable of strings is provided, it is assumed that these are paths to the trajectories, and the
trajectories are loaded from these paths using the ObjectSerializer.
TMLoader_kwargs: Additional keyword arguments for the Loader.
Returns:
DataLoader: The DataLoader.
"""
tm_dataset, paired_state_vars, control_dims = self.get_loader_args(state_variables)
splitter = ShuffleSplit(n_splits=1, test_size=test_size, train_size=train_size)
train_idxs, test_idxs = next(splitter.split(tm_dataset))
train_loader = Loader(data=tm_dataset[train_idxs], temperatures=paired_state_vars[train_idxs], control_dims=control_dims, **TMLoader_kwargs)
test_loader = Loader(data=tm_dataset[test_idxs], temperatures=paired_state_vars[test_idxs], control_dims=control_dims, **TMLoader_kwargs)
return train_loader, test_loader | Python |
2D | lherron2/thermomaps-ising | thermomaps-root/data/__init__.py | .py | 0 | 0 | null | Python |
2D | lherron2/thermomaps-ising | thermomaps-root/data/trajectory.py | .py | 7,609 | 196 | import os
import numpy as np
from data.observables import Observable
from data.generic import DataFormat, Summary
from typing import List, Optional, Dict, Union, Iterable
import collections
import logging
import sys
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
class Trajectory(DataFormat):
def __init__(self, summary: Summary, coordinates: np.ndarray = None):
"""
Initialize a Trajectory object.
Args:
summary (Summary): The summary of the trajectory.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
"""
super().__init__(summary)
self.summary = summary
self.observables = {}
self.coordinates = coordinates
def add_observable(self, observables: Union[Observable, List[Observable]]):
"""
Add one or more observables to the trajectory.
Args:
observables (Union[Observable, List[Observable]]): The observable or list of observables to add.
"""
if not isinstance(observables, list):
observables = [observables]
for observable in observables:
self.observables[observable.name] = observable
def __getitem__(self, index: Union[int, slice, Iterable[int]]):
"""
Get a specific frame or a slice of frames from the trajectory.
Args:
index (int or slice): The index or slice to retrieve.
Returns:
Trajectory: A new Trajectory object with the specific frame or slice of frames and the corresponding observables.
Raises:
IndexError: If the index is out of range.
"""
if self.coordinates is None:
frame = None
else:
frame = self.coordinates[index]
observables = {name: obs[index] for name, obs in self.observables.items()}
# Create a new Trajectory object with the specific frame and observables
new_trajectory = Trajectory(self.summary, frame)
for name, value in observables.items():
new_trajectory.add_observable(value)
return new_trajectory
@classmethod
def merge(cls, summary: Summary, trajectories: List['Trajectory'], frame_indices: Optional[List[List[int]]] = None) -> 'Trajectory':
"""
Merge multiple Trajectory objects into a single Trajectory.
Args:
summary (Summary): The summary of the merged trajectory.
trajectories (List[Trajectory]): The list of Trajectory objects to merge.
frame_indices (Optional[List[List[int]]]): The list of frame indices to include from each trajectory.
If None, all frames from each trajectory are included.
Returns:
Trajectory: The merged Trajectory object.
Raises:
ValueError: If the number of trajectories does not match the number of frame index lists.
"""
if frame_indices is not None and len(trajectories) != len(frame_indices):
raise ValueError("The number of trajectories must match the number of frame index lists.")
if frame_indices is None:
frame_indices = [list(range(len(traj))) for traj in trajectories]
# Concatenate frames from each trajectory
merged_frames = [traj[i] for traj, indices in zip(trajectories, frame_indices) for i in indices]
merged_frames = np.concatenate(merged_frames)
# Initialize the merged trajectory
merged_trajectory = cls(summary, merged_frames)
# Merge observables
merged_observables = {}
for traj, indices in zip(trajectories, frame_indices):
for name, obs in traj.observables.items():
if name not in merged_observables:
merged_observables[name] = obs[indices]
else:
merged_observables[name] = merged_observables[name].__listadd__(obs[indices])
# Set the merged observables to the new trajectory
merged_trajectory.observables = merged_observables
return merged_trajectory
def sort_by(self, observable_name: str, reverse: bool = False):
"""
Sort the frames in the trajectory by an observable.
Args:
observable_name (str): The name of the observable to sort by.
reverse (bool, optional): Whether to sort in reverse order. Defaults to False.
Raises:
ValueError: If no observable with the given name is found.
"""
if observable_name not in self.observables:
raise ValueError(f"No observable named '{observable_name}' found.")
# Get the observable values and sort the indices
quantity = self.observables[observable_name].quantity
sorted_indices = sorted(range(len(quantity)), # indices
key=quantity.__getitem__, # sort values[indices]
reverse=reverse)
# Create a new trajectory with the sorted frames
sorted_trajectory = self[sorted_indices]
# Reset the current trajectory's attributes to the sorted trajectory's attributes
self.__dict__ = sorted_trajectory.__dict__
def __len__(self):
"""
Get the number of frames in the trajectory.
Returns:
int: The number of frames in the trajectory.
"""
return len(self.coordinates)
class EnsembleTrajectory(Trajectory):
def __init__(self, summary: Summary, state_variables: Summary, coordinates: np.ndarray = None):
super().__init__(summary, coordinates)
self.state_variables = state_variables
class EnsembleIsingTrajectory(EnsembleTrajectory):
def __init__(self, summary: Summary, state_variables: Summary, coordinates: np.ndarray = None):
super().__init__(summary, state_variables, coordinates)
self.state_variables = state_variables
# The time series of the 2D ising model has shape (num_frames, size, size)
if coordinates is not None:
coordinates = np.array(coordinates)
if len(coordinates.shape) == 3:
self.coordinates = coordinates
elif len(coordinates.shape) == 2:
self.coordinates = coordinates.reshape((1, *coordinates.shape))
def add_frame(self, frame: np.ndarray):
"""
Add a frame to the trajectory.
Args:
frame (np.ndarray): The frame to add.
"""
frame = np.array(frame)
# The frame should have shape (size, size) or (n_frames, size, size)
if len(frame.shape) == 3:
frame = frame
elif len(frame.shape) == 2:
frame = frame.reshape((1, *frame.shape))
logger.debug(f"Adding frame of shape {frame.shape} to trajectory.")
if self.coordinates is None:
logger.debug(f"Initializing trajectory with frame of shape {frame.shape}.")
self.coordinates = frame
logger.debug(f"Initialized trajectory with shape {self.coordinates.shape}.")
else:
logger.debug(f"Concatenating frame of shape {frame.shape} to trajectory.")
self.coordinates = np.concatenate((self.coordinates, frame))
logger.debug(f"Concatenated frame to trajectory with shape {self.coordinates.shape}.")
class MultiEnsembleTrajectory:
def __init__(self, trajectories: List[EnsembleTrajectory]):
self.trajectories = {i:traj for i, traj in enumerate(trajectories)} | Python |
2D | lherron2/thermomaps-ising | thermomaps-root/data/generic.py | .py | 2,438 | 70 |
import pandas as pd
from typing import Any, Iterable, List
import logging
from slurmflow.serializer import ObjectSerializer
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Summary:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
class Report:
def __init__(self, report_path: str):
self.name = report_path
with open(report_path, 'r') as file:
self.report = file.read()
def save(self, filename: str):
with open(filename, 'w') as file:
file.write(self.report)
class DataFormat:
def __init__(self, summary: Summary):
self.summary = summary
def save(self, filename: str, overwrite: bool = False):
OS = ObjectSerializer(filename)
OS.serialize(self, overwrite=overwrite)
@classmethod
def load(cls, filename: str) -> object:
OS = ObjectSerializer(filename)
return OS.load()
class Registry:
def __init__(self, objects: Iterable[object]):
self.objects = []
for obj in objects:
self.add_object(obj)
self.lookup_table = self.create_lookup_table()
def add_object(self, obj: object):
assert isinstance(obj, Summary) or isinstance(obj, DataFormat), "Object must be a Summary or a subclass of DataFormat."
self.objects.append(obj)
self.lookup_table = self.create_lookup_table()
def create_lookup_table(self, method: str = 'intersection') -> pd.DataFrame:
if method == 'intersection':
common_attrs = set.intersection(*(set(vars(obj)) for obj in self.objects))
return pd.DataFrame([{attr: getattr(obj, attr, None) for attr in common_attrs} for obj in self.objects])
elif method == 'union':
all_attrs = set.union(*(set(vars(obj)) for obj in self.objects))
return pd.DataFrame([{attr: getattr(obj, attr, None) for attr in all_attrs} for obj in self.objects])
else:
raise ValueError("Method must be 'intersection' or 'union'.")
def lookup_by_index(self, index: int) -> object:
return self.objects[index]
def lookup_by_attribute(self, attr_name: str, attr_value: Any) -> List[object]:
return [obj for obj in self.objects if getattr(obj, attr_name, None) == attr_value]
| Python |
2D | lherron2/thermomaps-ising | thermomaps-root/data/utils.py | .py | 682 | 33 | import torch
import numpy as np
import re
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
import logging
def regex_list(regex, l):
"""
Filter a list using a regular expression.
Args:
regex (str): The regular expression pattern.
l (list): The list to be filtered.
Returns:
list: Filtered list containing elements that match the pattern.
"""
return list(filter(re.compile(regex).match, l))
class ArrayWrapper:
def __init__(self, array):
self.array = array
def as_torch(self):
return torch.from_numpy(self.array)
def as_numpy(self):
return self.array
| Python |
2D | lherron2/thermomaps-ising | thermomaps-root/data/observables.py | .py | 4,162 | 129 |
import copy
import numpy as np
from abc import ABC, abstractmethod
from typing import Union, Type
from data.utils import ArrayWrapper
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Observable(ABC):
def __init__(self, name: str = None):
self.name = name
@abstractmethod
def evaluate(self, trajectory: Type['Trajectory']):
raise NotImplementedError
def set(self, quantity: np.ndarray):
self.quantity = quantity
def as_vector(self):
if len(self.quantity.shape) > 1:
return ArrayWrapper(self.quantity.reshape(self.quantity.shape[0], -1))
else:
raise ValueError("Quantity cannot be reshaped into a 2D array.")
def as_tensor(self):
return ArrayWrapper(self.quantity)
def __getitem__(self, index: Union[int, slice]) -> 'Observable':
"""
Create a new Observable instance with a subset of the quantity.
Args:
index (int or slice): The index or slice to retrieve.
Returns:
Observable: A new Observable instance with the sliced quantity.
"""
if hasattr(self.quantity, '__getitem__'):
new_obs = copy.copy(self)
new_obs.set(self.quantity[index])
return new_obs
else:
raise TypeError("Quantity does not support indexing or slicing.")
def __add__(self, other: 'Observable') -> 'Observable':
"""
Add two Observable instances together.
Args:
other (Observable): The other Observable instance to add.
Returns:
Observable: A new Observable instance with the summed quantity.
"""
try:
new_quantity = self.quantity + other.quantity
return type(self)(name=self.name, quantity=new_quantity)
except TypeError:
raise TypeError("Quantity cannot be added.")
def __sub__(self, other: 'Observable') -> 'Observable':
"""
Subtract two Observable instances.
Args:
other (Observable): The other Observable instance to subtract.
Returns:
Observable: A new Observable instance with the subtracted quantity.
"""
try:
new_quantity = self.quantity - other.quantity
return type(self)(name=self.name, quantity=new_quantity)
except TypeError:
raise TypeError("Quantity cannot be subtracted.")
def __mul__(self, other: 'Observable') -> 'Observable':
"""
Multiply two Observable instances together.
Args:
other (Observable): The other Observable instance to multiply.
Returns:
Observable: A new Observable instance with the multiplied quantity.
"""
try:
new_quantity = self.quantity * other.quantity
return type(self)(name=self.name, quantity=new_quantity)
except TypeError:
raise TypeError("Quantity cannot be multiplied.")
def __truediv__(self, other: 'Observable') -> 'Observable':
"""
Divide two Observable instances.
Args:
other (Observable): The other Observable instance to divide.
Returns:
Observable: A new Observable instance with the divided quantity.
"""
try:
new_quantity = self.quantity / other.quantity
return type(self)(name=self.name, quantity=new_quantity)
except TypeError:
raise TypeError("Quantity cannot be divided.")
def __listadd__(self, other: 'Observable') -> 'Observable':
"""
Add two Observable instances together.
Args:
other (Observable): The other Observable instance to add.
Returns:
Observable: A new Observable instance with the summed quantity.
"""
try:
new_quantity = list(self.quantity) + list(other.quantity)
type(self)(name=self.name, quantity=new_quantity)
except TypeError:
raise TypeError("Quantity cannot be converted to a list or added.")
| Python |
2D | kirchhausenlab/Cryosamba | setup.py | .py | 526 | 24 | from setuptools import setup
setup(
name="cryosamba",
version="0.1",
description="Arkash Jain added the automate folder CI/CD, reach out to him for assistance",
author="Jose Inacio da Costa Filho",
author_email="",
license="MIT",
install_requires=[
"torch",
"torchvision",
"torchaudio",
"tensorboard",
"cupy-cuda11x",
"easydict",
"loguru",
"mrcfile",
"numpy",
"tifffile",
],
python_requires=">=3.8, <3.10",
)
| Python |
2D | kirchhausenlab/Cryosamba | advanced_instructions.md | .md | 11,041 | 220 | # Advanced instructions
## Table of Contents
1. [Installation](#installation) 🐍
2. [Training](#training)
- [Setup Training](#setup-training) 🛠️
- [Run Training](#run-training) 🚀
- [Visualization with TensorBoard](#visualization-with-tensorboard) 📈
3. [Inference](#inference)
- [Setup Inference](#setup-inference) 🛠️
- [Run Inference](#run-inference) 🚀
## Installation
1. Open a terminal window and run `conda create --name your-env-name python=3.11 -y` to create the environment (replace `your-env-name` with a desired name).
2. Activate the environment with `conda activate your-env-name`. In the future, you will have to activate the environment anytime you want to use CryoSamba.
3. Install PyTorch (for CUDA 11.8):
```bash
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118
```
4. Install the remaining libraries:
```bash
pip install tifffile mrcfile easydict loguru tensorboard cupy-cuda11x typer
```
5. Navigate to the directory where you want to save the Cryosamba code (via `cd /path/to/dir`). Then run
```bash
git clone https://github.com/kirchhausenlab/Cryosamba.git
```
in this directory. If you only have access to a Cryosamba `.zip` file instead, simply extract it there.
## Training
### Setup Training
Create a `your_train_config.json` config file somewhere. Use as default the template below:
```json
{
"train_dir": "/path/to/dir/Cryosamba/runs/exp-name/train",
"data_path": ["/path/to/file/volume.mrc"],
"train_data": {
"max_frame_gap": 6,
"patch_shape": [256, 256],
"patch_overlap": [16, 16],
"split_ratio": 0.95,
"batch_size": 32,
"num_workers": 4
},
"train": {
"load_ckpt_path": null,
"print_freq": 100,
"save_freq": 1000,
"val_freq": 1000,
"num_iters": 200000,
"warmup_iters": 300,
"mixed_precision": true,
"compile": false,
"do_early_stopping": false
},
"optimizer": {
"lr": 2e-4,
"lr_decay": 0.99995,
"weight_decay": 0.0001,
"epsilon": 1e-8,
"betas": [0.9, 0.999]
},
"biflownet": {
"pyr_dim": 24,
"pyr_level": 3,
"corr_radius": 4,
"kernel_size": 3,
"warp_type": "soft_splat",
"padding_mode": "reflect",
"fix_params": false
},
"fusionnet": {
"num_channels": 16,
"padding_mode": "reflect",
"fix_params": false
}
}
```
Explanation of parameters:
- `train_dir`: Folder where the checkpoints will be saved (e.g., `exp-name/train`).
- `data_path`: full path to a single (3D) .tif, .mrc or .rec file, or full path to a folder containing a sequence of (2D) .tif files, ordered alphanumerically matching the Z-stack order. You can train on multiple volumes by including the paths as elements of a list.
- `train_data`: parameters related to the raw data used for training
- `max_frame_gap`: Maximum frame gap used for training (see manuscript). For our data, we used values of 3, 6 and 10 for resolutions of 15.72, 7.86 and 2.62 angstroms/voxel, respectively.
- `patch_shape`: X and Y resolution of the patches the model will be trained on (must be a multiple of 32).
- `patch_overlap`: overlap (on X and Y) between consecutive patches (see manuscript).
- `split_ratio`: train and validation data split ratio. Must be a float between 0 and 1. E.g., 0.95 means that 95% of the data will be assigned for training and 5% for validation.
- `batch_size`: Number of data points loaded into the GPU at once.
- `num_workers`: Number of simultaneous CPU workers used by the Pytorch Dataloader.
- `train`: parameters related to the training routine
- `load_ckpt_path`: `null` to start a training run from scratch, or the path to a (`.pt` or `.pth`) model checkpoint if you want to start from a pretrained model.
- `print_freq`: frequency of the print statements in number of iterations.
- `save_freq`: frequency of model checkpoint saving in number of iterations.
- `val_freq`: frequency validation runs in number of iterations.
- `num_iters`: Length of the training run (default is 200k iterations).
- `warmup_iters`: number of iterations for the learning rate warmu-up.
- `mixed_precision`: if `true`, uses mixed precision training.
- `compile`: If `true`, uses `torch.compile` for faster training (might lead to errors, and has a few-minutes overhead time before the training iterations).
- `do_early_stopping`: If activated, training will be halted if, starting after 20 epochs, the validation loss doesn't decrease for at least 3 consecutive epochs.
- `optimizer`: parameters related to the optimization algorithm
- `lr`: base learning rate.
- `lr_decay`: multiplicative factor for the learning rate decay.
- `weight_decay`: weight decay for the AdamW optimizer.
- `epsilon`: epsilon for the AdamW optimizer.
- `betas`: betas for the AdamW optimizer.
- `biflownet`: parameters related to the Bi-Directional Optical Flow module (see manuscript and EBME paper)
- `pyr_dim`: base number of channels of the Feature Pyramid and Flow Estimator networks' layers.
- `pyr_level`: number of pyramid levels of the Feature Pyramid network.
- `corr_radius`: radius of the correlation volume function.
- `kernel_size`: kernel size of the biflownet convolutional layers.
- `warp_type`: type of Optical Flow warping (`soft_splat`, `avg_splat`, `fw_splat` or `backwarp`)
- `padding_mode`: padding mode of biflownet convolutional layers.
- `fix_params`: set to true in order to fix biflownet's weights and disable learning.
- `fusionnet`:
- `num_channels`: base number of channels of fusionnet layers.
- `padding_mode`: padding mode of fusionnet convolutional layers.
- `fix_params`: set to true in order to fix fusionnet's weights and disable learning.
Recommended folder structure for each experiment:
```
exp-name
├── train
└── inference
```
Running a training session may overwrite the `exp-name/train` folder but won't affect `exp-name/inference`, and vice versa.
### Run Training
1. In the terminal, run `nvidia-smi` to check available GPUs. For example, if you have 8 GPUs they will be numbered from 0 to 7.
8. To train on GPUs 0 and 1, go to the CryoSamba folder and run:
```bash
CUDA_VISIBLE_DEVICES=0,1 torchrun --standalone --nproc_per_node=2 train.py --config path/to/your_train_config.json
```
Adjust `--nproc_per_node` to change the number of GPUs. Use `--seed 1234` for reproducibility.
9. To interrupt training, press `CTRL + C`. You can resume training or start from scratch if prompted.
Training will run until the maximum number of iterations is reached. However, training and validation losses might converge/stabilize before that, at which point you can safely halt the process and save time and money on your electricity bill. In order to monitor the losses' progress you can: 1) see the logs printed on your screen, 2) see the `runtime.log` file inside your training folder, or 3) visualize their plots with TensorBoard.
**The output of the training run will be checkpoint files containing the trained model weights**. There is no denoised data output at this point yet. You can used the trained model weights to run inference on your data and then get the denoised outputs.
### Visualization with TensorBoard
1. Open a terminal window inside a graphical interface (e.g., a regular desktop computer, Chrome Remote Desktop, XDesk).
2. Activate the environment and run:
```bash
tensorboard --logdir path/to/exp-name/train
```
3. In a browser, open `localhost:6006`.
4. Use the slider under `SCALARS` to smooth noisy plots.
## Inference
### Setup Inference
Create a `your_inference_config.json` config file somewhere. Use as default the template below:
```json
{
"train_dir": "/path/to/dir/Cryosamba/runs/exp-name/train",
"data_path": "/path/to/file/volume.mrc",
"inference_dir": "/path/to/dir/Cryosamba/runs/exp-name/inference",
"inference_data": {
"max_frame_gap": 12,
"patch_shape": [256, 256],
"patch_overlap": [16, 16],
"batch_size": 32,
"num_workers": 4
},
"inference": {
"output_format": "same",
"load_ckpt_name": null,
"pyr_level": 3,
"TTA": true,
"mixed_precision": true,
"compile": true
}
}
```
Explanation of parameters:
- `train_dir`: Folder from which the checkpoints will be loaded.
- `data_path`: full path to a single (3D) .tif, .mrc or .rec file, or full path to a folder containing a sequence of (2D) .tif files, ordered alphanumerically matching the Z-stack order.
- `inference_dir`: Folder where the denoised data will be saved (e.g., `exp-name/inference`).
- `inference_data`: parameters related to the raw data used for inference
- `max_frame_gap`: maximum frame gap used for inference (see manuscript). For our data, we used values of 6, 12 and 20 for resolutions of 15.72, 7.86 and 2.62 angstroms/voxel, respectively.
- `patch_shape`: X and Y resolution of the patches the model will be trained on (must be a multiple of 32).
- `patch_overlap`: overlap (on X and Y) between consecutive patches (see manuscript).
- `batch_size`: Number of data points loaded into the GPU at once.
- `num_workers`: Number of simultaneous CPU workers used by the Pytorch Dataloader.
- `inference`: parameters related to the inference routine
- `output_format`: `"same"` to save the denoised result in the same format as the input raw data. Otherwise, specify either `"tif_file"`, `"mrc_file"`, `"rec_file"` or `"tif_sequence"`.
- `load_ckpt_path`: `null` to load model weights from `train-dir/last.pt`, otherwise use the path for a custom model checkpoint.
- `pyr_level`: number of pyramid levels of the Feature Pyramid network of the biflownet.
- `TTA`: if `true`, uses Test-Time Augmentation (see manuscript), for slightly better results at the cost of longer inference times.
- `mixed_precision`: if `true`, uses mixed precision training.
- `compile`: If `true`, uses `torch.compile` for faster inference (might lead to errors, and has a few-minutes overhead time before the inference iterations).
### Run Inference
1. In the terminal, run `nvidia-smi` to check available GPUs. For example, if you have 8 GPUs they will be numbered from 0 to 7.
2. To run inference on GPUs 0 and 1, go to the CryoSamba folder and run:
```bash
CUDA_VISIBLE_DEVICES=0,1 torchrun --standalone --nproc_per_node=2 inference.py --config path/to/your_inference_config.json
```
Adjust `--nproc_per_node` to change the number of GPUs. Use `--seed 1234` for reproducibility.
3. To interrupt inference, press `CTRL + C`. You can resume or start from scratch if prompted.
4. The final denoised volume will be located at `/path/to/dir/runs/exp-name/inference`. It will be either a file named `result.tif`, `result.mrc`, `result.rec` or a folder named `result`.
You can simply open the final denoised volume in your preferred data visualization/processing software and check how it looks like.
| Markdown |
2D | kirchhausenlab/Cryosamba | train.py | .py | 9,788 | 304 | import os, sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import argparse
from time import time
import torch
from torch import GradScaler
from torch import autocast
from core.model import get_model, get_loss
from core.dataset import get_dataloader
from core.utils.utils import (
setup_run,
load_json,
logger_info,
set_writer_train,
listify,
)
from core.utils.data_utils import get_data
from core.utils.torch_utils import (
count_model_params,
setup_DDP,
sync_nodes,
cleanup,
save_ckpt,
load_ckpt,
get_lr,
get_optimizer,
get_scheduler,
)
class EarlyStopper:
def __init__(self, patience=1, min_delta=0):
self.patience = patience
self.min_delta = min_delta
self.counter = 0
self.min_validation_loss = float("inf")
def early_stop(self, validation_loss):
if validation_loss < self.min_validation_loss:
self.min_validation_loss = validation_loss
self.counter = 0
elif validation_loss > (self.min_validation_loss + self.min_delta):
self.counter += 1
if self.counter >= self.patience:
return True
return False
class Train:
def __init__(self, args):
self.run_stamp = time()
self.world_size, self.rank, self.device = setup_DDP(args.seed)
self.is_ddp = self.world_size > 1
cfg = load_json(args.config)
if self.rank == 0:
setup_run(cfg, mode="training")
self.writer = set_writer_train(cfg)
sync_nodes(self.is_ddp)
self.log(
f"Using seed number {args.seed}"
if args.seed != -1
else f"No random seed was set"
)
self.log(f"# of processes = {self.world_size}")
# init params
self.resume_ckpt = os.path.join(cfg.train_dir, "last.pt")
self.load_ckpt_path = cfg.train.load_ckpt_path
self.max_frame_gap = cfg.train_data.max_frame_gap
self.num_iters = cfg.train.num_iters
self.mixed_precision = cfg.train.mixed_precision
self.train_dir = cfg.train_dir
self.print_freq = cfg.train.print_freq
self.val_freq = cfg.train.val_freq
self.save_freq = cfg.train.save_freq
self.compile = cfg.train.compile
if hasattr(cfg.train, "do_early_stopping"):
self.do_early_stopping = cfg.train.do_early_stopping
else:
self.do_early_stopping = False
# Init model
self.model = get_model(
cfg, self.device, is_ddp=self.is_ddp, compile=self.compile
)
self.optimizer = get_optimizer(self.model, cfg.optimizer)
self.scheduler = get_scheduler(
self.optimizer, cfg.train.warmup_iters, cfg.optimizer.lr_decay
)
self.scaler = GradScaler("cuda", enabled=cfg.train.mixed_precision)
self.loss_fn = get_loss()
self.log(f"# of model parameters = {count_model_params(self.model)[1]}")
# Init dataloaders
cfg.data_path = listify(cfg.data_path)
data_list, metadata_list = list(
zip(*[get_data(path) for path in cfg.data_path])
)
self.train_loader = get_dataloader(
cfg.train_data,
data_list,
metadata_list,
split="train",
is_ddp=self.is_ddp,
shuffle=True,
)
self.val_loader = get_dataloader(
cfg.train_data,
data_list,
metadata_list,
split="val",
is_ddp=False,
shuffle=False,
)
self.log(
f"# of data samples = {len(self.train_loader)} (train), {len(self.val_loader)} (val)"
)
self.early_stopper = EarlyStopper(patience=3, min_delta=0)
def log(self, message):
return logger_info(self.rank, message)
def print_train_loss(self, train_loss, print_time):
save_ckpt(
self.model, self.optimizer, self.scheduler, self.iter, self.resume_ckpt
)
for key, value in train_loss.items():
self.writer.add_scalar(f"train_loss/{key}", value, self.iter)
self.writer.add_scalar("learning_rate/lr", get_lr(self.optimizer), self.iter)
self.writer.flush()
message = f"Iter {self.iter}/{self.num_iters} :: train loss: "
message += ", ".join(
f"{value:.6f} ({key})" for key, value in train_loss.items()
)
message += f", E.T.: {(time()-print_time):.3f}s"
self.log(message)
def save_model(self):
self.zfill = len(str(self.num_iters))
save_ckpt_path = os.path.join(
self.train_dir, str(int(self.iter)).zfill(self.zfill) + ".pt"
)
save_ckpt(self.model, self.optimizer, self.scheduler, self.iter, save_ckpt_path)
@torch.inference_mode()
def validation(self, write=True):
val_stamp = time()
self.model.eval()
val_loss = 0
for i, imgs in enumerate(self.val_loader):
imgs = imgs.to(device=self.device)
imgs = torch.split(imgs, 1, dim=1)
t = 1
img0, imgT, img1 = (
imgs[self.max_frame_gap - t].contiguous(),
imgs[self.max_frame_gap].contiguous(),
imgs[self.max_frame_gap + t].contiguous(),
)
if self.is_ddp:
rec = self.model.module.validation(img0, img1)
else:
rec = self.model.validation(img0, img1)
loss = self.loss_fn(rec, imgT)
val_loss += float(loss.item()) / len(self.val_loader)
val_time = time() - val_stamp
self.model.train()
if write == True:
self.writer.add_scalar("val_loss/val", val_loss, self.iter)
self.writer.flush()
self.log(
f"Iter {self.iter}/{self.num_iters} :: val loss: {val_loss:.6f}, E.T.: {val_time:.3f}s"
)
return val_loss
def run_training(self):
# Load checkpoint
self.model, self.optimizer, self.scheduler, self.iter = load_ckpt(
self.resume_ckpt,
self.model,
self.optimizer,
self.scheduler,
is_ddp=self.is_ddp,
compile=self.compile,
)
if self.load_ckpt_path is not None:
self.model = load_ckpt(
self.load_ckpt_path,
self.model,
is_ddp=self.is_ddp,
compile=self.compile,
)[0]
# Training loop
if self.rank == 0:
train_loss = {f"gap {t}": 0 for t in range(1, self.max_frame_gap + 1)}
epoch = self.iter // len(self.train_loader)
print_time = time()
while self.iter <= self.num_iters:
self.log(f"*** Epoch {epoch} ***")
self.model.train()
if self.is_ddp:
self.train_loader.sampler.set_epoch(epoch)
for i, imgs in enumerate(self.train_loader):
imgs = imgs.to(device=self.device)
imgs = torch.split(imgs, 1, dim=1)
for t in range(1, self.max_frame_gap + 1):
self.optimizer.zero_grad(set_to_none=True)
skip_lr_sch = False
img0, imgT, img1 = (
imgs[self.max_frame_gap - t].contiguous(),
imgs[self.max_frame_gap].contiguous(),
imgs[self.max_frame_gap + t].contiguous(),
)
with autocast("cuda", enabled=self.mixed_precision):
rec = self.model(img0, img1)
loss = self.loss_fn(rec, imgT)
if self.rank == 0:
train_loss[f"gap {t}"] += (
float(loss.detach().item()) / self.print_freq
)
old_scale = self.scaler.get_scale()
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
if old_scale > self.scaler.get_scale():
skip_lr_sch = True
if not skip_lr_sch:
self.scheduler.step()
if self.rank == 0 and self.iter > 0:
if self.iter % self.print_freq == 0:
self.print_train_loss(train_loss, print_time)
train_loss = {key: 0 for key in train_loss.keys()}
if self.iter % self.val_freq == 0:
val_loss = self.validation()
if self.iter % self.save_freq == 0:
self.save_model()
if self.iter % self.print_freq == 0:
print_time = time()
self.iter += 1
epoch += 1
if self.do_early_stopping and epoch >= 20:
if self.rank == 0:
val_loss = self.validation(write=False)
if self.early_stopper.early_stop(val_loss):
self.log(f"Early stopping training at epoch {epoch}.")
break
sync_nodes(self.is_ddp)
sync_nodes(self.is_ddp)
self.log(
f"Training completed successfully. Total training time = {time()-self.run_stamp:.3f}s"
)
cleanup(self.is_ddp)
sys.exit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="configs/default_train.json")
parser.add_argument("-s", "--seed", type=int, default=-1)
args = parser.parse_args()
task = Train(args)
task.run_training()
| Python |
2D | kirchhausenlab/Cryosamba | logging_config.py | .py | 1,752 | 59 | import logging
import logging.config
import sys
from pathlib import Path
# Set base directory for logs
BASE_DIR = Path(__file__).resolve().parent
LOGS_DIR = Path(BASE_DIR, "logs")
LOGS_DIR.mkdir(parents=True, exist_ok=True)
# making a logging config
logging_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"minimal": {"format": "%(message)s"},
"detailed": {
"format": "%(levelname)s %(asctime)s [%(name)s:%(filename)s:%(funcName)s:%(lineno)d]\n%(message)s\n"
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"stream": sys.stdout,
"formatter": "minimal",
"level": logging.DEBUG,
},
"info": {
"class": "logging.handlers.RotatingFileHandler",
"filename": Path(LOGS_DIR, "info.log"),
"maxBytes": 10485760,
"backupCount": 10,
"formatter": "detailed",
"level": logging.INFO,
},
"error": {
"class": "logging.handlers.RotatingFileHandler",
"filename": Path(LOGS_DIR, "error.log"),
"maxBytes": 10485760,
"backupCount": 10,
"formatter": "detailed",
"level": logging.ERROR,
},
},
"root": {
"handlers": ["console", "info", "error"],
"level": logging.INFO,
"propagate": True,
},
}
logging.config.dictConfig(logging_config)
logger = logging.getLogger()
# logger.debug("❗ For Debugging")
# logger.info("💻 Useful Messages from code ")
# logger.warning("⚠️S Something to be aware of")
# logger.error("💀 Mistake with the process")
# logger.critical("❌ critical error check")
| Python |
2D | kirchhausenlab/Cryosamba | __init__.py | .py | 143 | 5 | from __future__ import absolute_import
from . import automate, configs, core, requirements, scripts, tests
from .logging_config import logger
| Python |
2D | kirchhausenlab/Cryosamba | inference.py | .py | 8,649 | 267 | import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import argparse
from time import time
import numpy as np
import torch
from torch import autocast
from core.dataset import get_dataloader
from core.model import get_model
from core.utils.data_utils import (
denormalize_imgs,
get_data,
get_overlap_pad,
save_data,
unpad3D,
)
from core.utils.torch_utils import (
cleanup,
count_model_params,
load_ckpt,
setup_DDP,
sync_nodes,
)
from core.utils.utils import listify, load_json, logger_info, remove_file, setup_run
class Inference:
def __init__(self, args):
self.run_stamp = time()
self.world_size, self.rank, self.device = setup_DDP(args.seed)
self.is_ddp = self.world_size > 1
cfg = load_json(args.config)
if self.rank == 0:
setup_run(cfg, mode="inference")
sync_nodes(self.is_ddp)
self.log(
f"Using seed number {args.seed}"
if args.seed != -1
else f"No random seed was set"
)
self.log(f"# of processes = {self.world_size}")
if os.path.exists(cfg.train_dir):
cfg_train = load_json(os.path.join(cfg.train_dir, "config.json"))
else:
raise ValueError(f"Checkpoint dir {cfg.train_dir} does not exist")
cfg_train.biflownet.pyr_level = cfg.inference.pyr_level
cfg_train.train_data = cfg.inference_data
# init params
self.overlap_pad = get_overlap_pad(
cfg.inference_data.patch_overlap, self.device
)
self.TTA = cfg.inference.TTA
self.mixed_precision = cfg.inference.mixed_precision
self.inference_dir = cfg.inference_dir
self.output_format = cfg.inference.output_format
self.max_frame_gap = cfg.inference_data.max_frame_gap
self.batch_size = cfg.inference_data.batch_size
self.output_temp_name = os.path.join(cfg.inference_dir, "temp.dat")
self.compile = cfg.inference.compile
# Init model
ckpt_name = (
"last"
if cfg.inference.load_ckpt_name is None
else cfg.inference.load_ckpt_name
)
ckpt_path = os.path.join(cfg.train_dir, ckpt_name + ".pt")
if not os.path.exists(ckpt_path):
raise ValueError(f"Model checkpoint {ckpt_path} does not exist")
self.model = get_model(
cfg_train, self.device, is_ddp=self.is_ddp, compile=self.compile
)
self.model = load_ckpt(
ckpt_path, self.model, is_ddp=self.is_ddp, compile=self.compile
)[0]
self.model.eval()
self.log(f"# of model parameters = {count_model_params(self.model)[1]}")
self.log(f"Using trained weights from {ckpt_path}")
# Init dataloaders
cfg.data_path = listify(cfg.data_path)
data_list, metadata_list = list(
zip(*[get_data(path) for path in cfg.data_path])
)
self.inference_loader = get_dataloader(
cfg.inference_data,
data_list,
metadata_list,
split="test",
is_ddp=self.is_ddp,
)
self.metadata = metadata_list[0]
self.log(f"# of data samples = {len(self.inference_loader)}")
# Make output temporary file
self.make_output_temp_file()
sync_nodes(self.is_ddp)
self.output_array = np.memmap(
self.output_temp_name,
dtype=self.metadata["dtype"],
mode="r+",
shape=self.metadata["shape"],
)
def log(self, message):
return logger_info(self.rank, message)
def make_output_temp_file(self):
if not os.path.exists(self.output_temp_name):
if self.rank == 0:
output_array = np.memmap(
self.output_temp_name,
dtype=self.metadata["dtype"],
mode="w+",
shape=self.metadata["shape"],
)
z_border = self.max_frame_gap + 1
output_array[0:z_border] = self.metadata["mean"]
output_array[-z_border:] = self.metadata["mean"]
output_array.flush()
def process_crop_params(self, crop_params):
coords, border_pad = torch.split(crop_params, 3, dim=1)
residual_pad = self.overlap_pad * (self.overlap_pad > border_pad)
residual_pad[..., 1] *= -1
pad = torch.maximum(border_pad, self.overlap_pad)
out_coords = coords + residual_pad
z = coords[:, 0, 0] + self.max_frame_gap
return pad, out_coords, z
def skip_iter(self, imgs, z, out_coords):
output_mimmax = min(
[
self.output_array[
z[j],
out_coords[j, 1, 0] : out_coords[j, 1, 1],
out_coords[j, 2, 0] : out_coords[j, 2, 1],
].max()
for j in range(imgs.shape[0])
]
)
return True if output_mimmax != 0.0 else False
def TTA_transforms(self, x):
if self.TTA:
return [
x[0],
x[1].flip(dims=[-1]),
x[2].flip(dims=[-2]),
x[3].flip(dims=[-1, -2]),
]
else:
return x
def samba(self, img0, imgT, img1):
rec_minus = self.model(img0, imgT)
rec_plus = self.model(imgT, img1)
rec = self.model(rec_minus, rec_plus)
return rec
def inference_fn(self, img0, imgT, img1):
img0 = [img0, img0, img0, img0] if self.TTA == True else [img0]
imgT = [imgT, imgT, imgT, imgT] if self.TTA == True else [imgT]
img1 = [img1, img1, img1, img1] if self.TTA == True else [img1]
img0 = self.TTA_transforms(img0)
imgT = self.TTA_transforms(imgT)
img1 = self.TTA_transforms(img1)
recs = [self.samba(img0[i], imgT[i], img1[i]) for i in range(len(img0))]
recs = self.TTA_transforms(recs)
recs = torch.cat(recs, dim=1).mean(dim=1, keepdim=True)
return recs
def run_inference(self):
for i, [imgs, crop_params] in enumerate(self.inference_loader):
iter_time = time()
imgs, crop_params = imgs.to(device=self.device), crop_params.to(
device=self.device
)
pad, out_coords, z = self.process_crop_params(crop_params)
if self.skip_iter(imgs, z, out_coords):
continue
imgs = torch.split(imgs, 1, dim=1)
recs = []
for t in range(1, self.max_frame_gap + 1):
img0, imgT, img1 = (
imgs[self.max_frame_gap - t].contiguous(),
imgs[self.max_frame_gap].contiguous(),
imgs[self.max_frame_gap + t].contiguous(),
)
with autocast("cuda", enabled=self.mixed_precision):
with torch.inference_mode():
rec = self.inference_fn(img0, imgT, img1)
recs.append(rec)
rec = torch.cat(recs, dim=1).mean(dim=1, keepdim=True)
rec = denormalize_imgs(rec, params=self.metadata)
rec = rec.cpu().detach().numpy().astype(self.metadata["dtype"])
for j in range(rec.shape[0]):
self.output_array[
z[j],
out_coords[j, 1, 0] : out_coords[j, 1, 1],
out_coords[j, 2, 0] : out_coords[j, 2, 1],
] = unpad3D(rec[j], pad[j])
self.log(
f"Iter {i}/{len(self.inference_loader)}, Elapsed time = {(time()-iter_time):.3f}"
)
self.output_array.flush()
sync_nodes(self.is_ddp)
if self.rank == 0:
self.log(f"Saving results")
save_data(
path=self.inference_dir,
name="result",
data=self.output_array,
metadata=self.metadata,
output_format=self.output_format,
)
sync_nodes(self.is_ddp)
if self.rank == 0:
remove_file(self.output_temp_name)
self.log(
f"Inference completed successfully. Total inference time = {time()-self.run_stamp:.3f}s"
)
cleanup(self.is_ddp)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="configs/default_inference.json")
parser.add_argument("-s", "--seed", type=int, default=-1)
args = parser.parse_args()
task = Inference(args)
task.run_inference()
| Python |
2D | kirchhausenlab/Cryosamba | run_cryosamba.py | .py | 32,247 | 839 | import os
import sys
import shutil
import json
import subprocess
from functools import wraps
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import typer
from loguru import logger
from rich import print as rprint
from rich.console import Console
app = typer.Typer()
RUNS_DIR = os.path.join(os.getcwd(), "runs")
def select_gpus() -> Optional[Union[List[str], int]]:
simple_header("GPU Selection")
rprint(
f"[yellow]Please note that you need a nvidia GPU to run CryoSamba. If you cannot see GPU information, your machine may not support CryoSamba.[/yellow]"
)
if typer.confirm("Do you want to see detailed GPU information?"):
command1 = "nvidia-smi"
res = subprocess.run(command1, shell=True, capture_output=True, text=True)
print("")
print(res.stdout)
command2 = "nvidia-smi --query-gpu=index,utilization.gpu,memory.free,memory.total,memory.used --format=csv"
res2 = subprocess.run(command2, shell=True, capture_output=True, text=True)
lst_available_gpus = []
lines = res2.stdout.split("\n")
for i, line in enumerate(lines):
if i == 0 or line == "":
continue
lst_available_gpus.append(line.split(",")[0])
select_gpus = []
while True:
rprint(
f"\n[bold]You have these GPUs left available now: [red]{lst_available_gpus}[/red] and have currently selected these GPUs: [green]{select_gpus}[/green][/bold]"
)
gpus = typer.prompt("Add a GPU number: (or Enter F to finish selection)")
if gpus == "F":
break
if gpus in lst_available_gpus:
select_gpus.append(gpus)
lst_available_gpus.remove(gpus)
else:
rprint(f"[red]Invalid choice![/red]")
print("")
if len(select_gpus) == 0:
rprint(f"[red]You didn't select any GPUs[/red]")
return -1
else:
rprint(f"You have selected the following GPUs: [blue]{select_gpus}[/blue]\n")
return select_gpus
def run_training(gpus: str, exp_name: str) -> None:
config_path = os.path.join(RUNS_DIR, exp_name, "train_config.json")
cmd = f"OMP_NUM_THREADS=1 CUDA_VISIBLE_DEVICES={gpus} torchrun --standalone --nproc_per_node=$(echo {gpus} | tr ',' '\\n' | wc -l) train.py --config {config_path}"
rprint(
f"[yellow][bold]!!! Training instructions, read before proceeding !!![/bold][/yellow]"
)
rprint(
f"[bold]* You can interrupt training at any time by pressing CTRL + C, and you can resume it later by running CryoSamba again *[/bold]"
)
rprint(
f"[bold]* Training will run until your specified maximum number of iterations is reached. However, you can monitor the training and validation losses and halt training when you think they have converged/stabilized * [/bold]"
)
rprint(
f"[bold]* You can monitor the losses through here, through the .log file in the experiment training folder, or through TensorBoard (see README on how to run it) *[/bold] \n"
)
rprint(
f"[bold]* The output of the training run will be checkpoint files containing the trained model weights. There is no denoised data output at this point yet. You can used the trained model weights to run inference on your data and then get the denoised outputs. *[/bold] \n"
)
if typer.confirm("Do you want to start training?"):
rprint(f"\n[blue]***********************************************[/blue]\n")
subprocess.run(cmd, shell=True, text=True)
else:
rprint(f"[red]Training aborted[/red]")
def run_inference(gpus: str, exp_name: str) -> None:
config_path = os.path.join(RUNS_DIR, exp_name, "inference_config.json")
cmd = f"OMP_NUM_THREADS=1 CUDA_VISIBLE_DEVICES={gpus} torchrun --standalone --nproc_per_node=$(echo {gpus} | tr ',' '\\n' | wc -l) inference.py --config {config_path}"
rprint(
f"[yellow][bold]!!! Inference instructions, read before proceeding !!![/bold][/yellow]"
)
rprint(
f"[bold]* You can interrupt inference at any time by pressing CTRL + C, and you can resume it later by running CryoSamba again *[/bold]"
)
rprint(
f"[bold]* You should have previously run a training session on this experiment in order to run inference * [/bold]"
)
rprint(
f"[bold]* The denoised volume will be generated after the final iteration * [/bold] \n"
)
if typer.confirm("Do you want to start inference?"):
rprint(f"\n[blue]***********************************************[/blue]\n")
subprocess.run(cmd, shell=True, text=True)
else:
rprint(f"[red]Inference aborted[/red]")
def handle_exceptions(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
typer.echo(f"An error occurred: {str(e)}")
logger.exception("An exception occurred")
raise typer.Exit(code=1)
return wrapper
@handle_exceptions
def is_conda_installed() -> bool:
"""Run a subprocess to see if conda is installed or not"""
try:
subprocess.run(
["conda", "--version"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return True
except FileNotFoundError:
return False
except subprocess.CalledProcessError:
return False
@handle_exceptions
def is_env_active(env_name) -> bool:
"""Use conda env list to check active environments"""
cmd = "conda env list"
result = subprocess.run(cmd, capture_output=True, text=True, shell=True)
return f"{env_name}" in result.stdout
def run_command(command, shell=True):
process = subprocess.Popen(
command,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
output, error = process.communicate()
if process.returncode != 0:
typer.echo(f"Error executing command: {command}\nError: {error}", err=True)
logger.error(f"Error executing command: {command}\nError: {error}")
return output, error
@app.command()
@handle_exceptions
def setup_conda():
"""Setup Conda installation"""
typer.echo("Conda Installation")
if is_conda_installed():
rprint(f"[green]Conda is already installed.[/green]")
else:
if sys.platform.startswith("linux") or sys.platform == "darwin":
typer.echo("Conda is not installed. Installing conda ....")
subprocess.run(
"wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh",
shell=True,
)
subprocess.run("chmod +x Miniconda3-latest-Linux-x86_64.sh", shell=True)
subprocess.run("bash Miniconda3-latest-Linux-x86_64.sh", shell=True)
subprocess.run("export PATH=~/miniconda3/bin:$PATH", shell=True)
subprocess.run("source ~/.bashrc", shell=True)
else:
run_command(
"powershell -Command \"(New-Object Net.WebClient).DownloadFile('https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe', 'Miniconda3-latest-Windows-x86_64.exe')\""
)
run_command(
'start /wait "" Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /AddToPath=1 /RegisterPython=0 /S /D=%UserProfile%\\Miniconda3'
)
@app.command()
@handle_exceptions
def setup_environment(
env_name: str = typer.Option("cryosamba", prompt="Enter environment name")
):
"""Setup Conda environment"""
typer.echo(f"Setting up Conda Environment: {env_name}")
cmd = f"conda init && conda activate {env_name}"
if is_env_active(env_name):
typer.echo(f"Environment '{env_name}' exists.")
subprocess.run(cmd, shell=True)
else:
typer.echo(f"Creating conda environment: {env_name}")
subprocess.run(f"conda create --name {env_name} python=3.11 -y", shell=True)
subprocess.run(cmd, shell=True)
typer.echo("Environment has been created")
typer.echo("**please copy the command below in the terminal.**")
cmd = f"conda init && sleep 3 && source ~/.bashrc && conda activate {env_name} && pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 && pip install tifffile mrcfile easydict loguru tensorboard streamlit pipreqs cupy-cuda11x typer webbrowser"
typer.echo(
f"Say you downloaded cryosamba in your downloads folder, open a NEW terminal window and run the following commands or hit yes to run it here: \n\n{cmd} "
)
run_cmd = typer.prompt("Enter (y/n): ")
while True:
if run_cmd == "n":
typer.echo(cmd)
break
elif run_cmd == "y":
subprocess.run(cmd, shell=True, text=True)
break
@app.command()
@handle_exceptions
def export_env():
"""Export Conda environment"""
typer.echo("Exporting Conda Environment")
subprocess.run("conda env export > environment.yml", shell=True)
subprocess.run("mv environment.yml ", shell=True)
typer.echo("Environment exported and moved to root directory.")
def ask_user(prompt: str, default: Any = None) -> Any:
return typer.prompt(prompt, default=default)
def ask_user_int(prompt: str, min_value: int, max_value: int, default: int) -> int:
while True:
try:
value = int(ask_user(prompt, default))
if min_value <= value <= max_value:
return value
else:
rprint(
f"[red]Please enter a value between [bold]{min_value}[/bold] and [bold]{max_value}[/bold].[/red]"
)
except ValueError:
rprint(f"[red]Please enter a valid integer.[/red]")
def ask_user_int_multiple(
prompt: str, min_value: int, max_value: int, multiple: int, default: int
) -> int:
while True:
try:
value = int(ask_user(prompt, default))
if min_value <= value <= max_value:
if value % multiple != 0:
rprint(
f"[red]Please enter an integer value multiple of {multiple}.[/red]"
)
else:
return value
else:
rprint(
f"[red]Please enter a value between [bold]{min_value}[/bold] and [bold]{max_value}[/bold].[/red]"
)
except ValueError:
rprint(f"[red]Please enter a valid integer.[/red]")
def list_tif_files(path):
files = []
# List all files and directories in the specified path
for entry in os.listdir(path):
# Construct the full path of the entry
full_path = os.path.join(path, entry)
# Check if the entry is a file and ends with '.tif'
if os.path.isfile(full_path) and entry.endswith(".tif"):
files.append(full_path)
return files
@app.command()
def generate_experiment(exp_name: str) -> None:
rprint(f"[bold]Setting up new experiment [green]{exp_name}[/green][/bold]")
rprint(
f"[bold]Please choose experiment parameters below. Values inside brackets will be chosen by default if you press Enter without providing any input.[/bold]"
)
exp_path = os.path.join(RUNS_DIR, exp_name)
# Common parameters
train_dir = f"{exp_path}/train"
inference_dir = f"{exp_path}/inference"
while True:
rprint(
f"\n[bold]DATA PATH[/bold]: The path to a single (3D) .tif, .mrc or .rec file, or the path to a folder containing a sequence of (2D) .tif files, ordered alphanumerically matching the Z-stack order. You can use the full path or a path relative from the CryoSamba folder."
)
data_path = ask_user(
"Enter your data path",
f"data/sample_data.rec",
)
if not os.path.exists(data_path):
rprint(f"[red]Data path is invalid. Try again.[/red]")
else:
if os.path.isfile(data_path):
extension = os.path.splitext(data_path)[1]
if extension not in [".mrc", ".rec", ".tif"]:
rprint(
f"[red]Extension [bold]{extension}[/bold] is not supported. Try another path.[/red]"
)
else:
break
elif os.path.isdir(data_path):
files = list_tif_files(data_path)
if len(files) == 0:
rprint(
f"[red]Your folder does not contain any tif files. Only sequences of tif files are currently supported. Try another path.[/red]"
)
else:
break
# Training specific parameters
rprint(
f"\n[bold]MAXIMUM FRAME GAP FOR TRAINING[/bold]: explained in the manuscript. We empirically set values of 3, 6 and 10 for data at resolutions of 15.72, 7.86 and 2.62 Angstroms/voxel, respectively. For different resolutions, try a reasonable value interpolated from the reference ones."
)
train_max_frame_gap = ask_user_int("Enter Maximum Frame Gap for Training", 1, 40, 3)
rprint(
f"\n[bold]NUMBER OF ITERATIONS[/bold]: for how many iterations the training session will run. This is an upper limit, and you can halt training before that."
)
num_iters = ask_user_int(
"Enter the number of iterations you want to run", 1000, 200000, 50000
)
rprint(
f"\n[bold]BATCH SIZE[/bold]: number of data points passed at once to the GPUs. A higher number leads to faster training, but the whole batch might not fit into your GPU's memory, leading to out-of-memory errors or severe slowdowns. If you're getting these, try to decrease the batch size until they disappear. This number should be an even integer."
)
batch_size = ask_user_int_multiple("Enter the batch size", 2, 256, 2, 8)
# Inference specific parameters
rprint(
f"\n[bold]MAXIMUM FRAME GAP FOR INFERENCE[/bold]: explained in the manuscript. We recommend using twice the value used for training."
)
inference_max_frame_gap = ask_user_int(
"Enter Maximum Frame Gap for Inference", 1, 80, train_max_frame_gap * 2
)
rprint(
f"\n[bold]TEST-TIME AUGMENTATION[/bold]: explained in the manuscript. Enabling it leads to slightly better denoising quality at the cost of much longer inference times."
)
tta = typer.confirm(
"Enable Test Time Augmentation (TTA) for inference (disabled by default)?",
default=False,
)
rprint(
f"\n[bold]TRAINING EARLY STOPPING[/bold]: If activated, training will be halted if, starting after 20 epochs, the validation loss doesn't decrease for at least 3 consecutive epochs."
)
early_stopping = typer.confirm(
"Enable Early Stopping (disabled by default)?", default=False
)
rprint(
f"\n[yellow][bold]ADVANCED PARAMETERS[/bold]: only recommended for experienced users.[/yellow]"
)
advanced = typer.confirm(
"Do you want to set up advanced parameters (No by default)?", default=False
)
train_data_patch_shape_y = 256
train_data_patch_shape_x = 256
train_data_patch_overlap_y = 16
train_data_patch_overlap_x = 16
train_data_split_ratio = 0.95
train_data_num_workers = 4
train_load_ckpt_path = None
train_print_freq = 100
train_save_freq = 1000
train_val_freq = 500
train_warmup_iters = 300
train_mixed_precision = True
train_compile = False
optimizer_lr = 2e-4
optimizer_lr_decay = 0.99995
optimizer_weight_decay = 0.0001
optimizer_epsilon = 1e-08
optimizer_betas_0 = 0.9
optimizer_betas_1 = 0.999
biflownet_pyr_dim = 24
biflownet_pyr_level = 3
biflownet_corr_radius = 4
biflownet_kernel_size = 3
biflownet_warp_type = "soft_splat"
biflownet_padding_mode = "reflect"
biflownet_fix_params = False
fusionnet_num_channels = 16
fusionnet_padding_mode = "reflect"
fusionnet_fix_params = False
inference_data_patch_shape_y = 256
inference_data_patch_shape_x = 256
inference_data_patch_overlap_y = 16
inference_data_patch_overlap_x = 16
inference_data_num_workers = 4
inference_output_format = "same"
inference_load_ckpt_name = None
inference_pyr_level = 3
inference_mixed_precision = True
inference_compile = False
if advanced:
simple_header(f"[yellow] Advanced Parameters [/yellow]")
rprint(
f"For explanations, refer to the [bold]advanced instructions[/bold] or the [bold]manuscript[/bold]."
)
train_data_patch_shape_y = ask_user_int_multiple(
"Enter train_data.patch_shape on Y", 32, 1024, 32, 256
)
train_data_patch_shape_x = ask_user_int_multiple(
"Enter train_data.patch_shape on X", 32, 1024, 32, 256
)
train_data_patch_overlap_y = ask_user_int_multiple(
"Enter train_data.patch_overlap on Y", 0, 512, 4, 16
)
train_data_patch_overlap_x = ask_user_int_multiple(
"Enter train_data.patch_overlap on X", 0, 512, 4, 16
)
train_data_split_ratio = 0.95
train_data_num_workers = ask_user_int("Enter train_data.num_workers", 0, 512, 4)
train_print_freq = ask_user_int("Enter train.print_freq", 1, 10000, 100)
train_save_freq = ask_user_int("Enter train.save_freq", 1, 10000, 1000)
train_val_freq = ask_user_int("Enter train.val_freq", 1, 10000, 500)
train_warmup_iters = ask_user_int("Enter train.warmup_iters", 1, 10000, 300)
train_mixed_precision = ask_user("Enter train.mixed_precision", True)
train_compile = ask_user("Enter train.compile", False)
optimizer_lr = ask_user("Enter optimizer.lr", 2e-4)
optimizer_lr_decay = ask_user("Enter optimizer.lr_decay", 0.99995)
optimizer_weight_decay = ask_user("Enter optimizer.weight_decay", 0.0001)
optimizer_epsilon = ask_user("Enter optimizer.epsilon", 1e-08)
optimizer_betas_0 = ask_user("Enter optimizer.betas_0", 0.9)
optimizer_betas_1 = ask_user("Enter optimizer.betas_1", 0.999)
biflownet_pyr_dim = ask_user_int_multiple(
"Enter biflownet.pyr_dim", 4, 128, 4, 24
)
biflownet_pyr_level = ask_user_int("Enter biflownet.pyr_level", 1, 20, 3)
biflownet_corr_radius = ask_user_int("Enter biflownet.corr_radius", 1, 20, 4)
biflownet_kernel_size = ask_user_int("Enter biflownet.kernel_size", 1, 20, 3)
biflownet_warp_type = ask_user("Enter biflownet.warp_type", "soft_splat")
biflownet_padding_mode = ask_user("Enter biflownet.padding_mode", "reflect")
biflownet_fix_params = ask_user("Enter biflownet.fix_params", False)
fusionnet_num_channels = ask_user_int_multiple(
"Enter fusionnet.num_channels", 4, 128, 4, 16
)
fusionnet_padding_mode = ask_user("Enter fusionnet.padding_mode", "reflect")
fusionnet_fix_params = ask_user("Enter fusionnet.fix_params", False)
inference_data_patch_shape_y = ask_user_int_multiple(
"Enter inference_data.patch_shape on Y", 32, 1024, 32, 256
)
inference_data_patch_shape_x = ask_user_int_multiple(
"Enter inference_data.patch_shape on Y", 32, 1024, 32, 256
)
inference_data_patch_overlap_y = ask_user_int_multiple(
"Enter inference_data.patch_overlap on Y", 0, 512, 4, 16
)
inference_data_patch_overlap_x = ask_user_int_multiple(
"Enter inference_data.patch_overlap on Y", 0, 512, 4, 16
)
inference_data_num_workers = ask_user_int(
"Enter inference_data.num_workers", 0, 512, 4
)
inference_output_format = ask_user("Enter inference.output_format", "same")
inference_pyr_level = ask_user_int("Enter inference.pyr_level", 1, 20, 3)
inference_mixed_precision = ask_user("Enter inference.mixed_precision", True)
inference_compile = ask_user("Enter inference.compile", False)
# Generate training config
train_config = {
"train_dir": train_dir,
"data_path": data_path,
"train_data": {
"max_frame_gap": train_max_frame_gap,
"patch_shape": [train_data_patch_shape_y, train_data_patch_shape_x],
"patch_overlap": [train_data_patch_overlap_y, train_data_patch_overlap_x],
"split_ratio": train_data_split_ratio,
"batch_size": batch_size,
"num_workers": train_data_num_workers,
},
"train": {
"num_iters": num_iters,
"load_ckpt_path": train_load_ckpt_path,
"print_freq": train_print_freq,
"save_freq": train_save_freq,
"val_freq": train_val_freq,
"warmup_iters": train_warmup_iters,
"mixed_precision": train_mixed_precision,
"compile": train_compile,
"do_early_stopping": early_stopping,
},
"optimizer": {
"lr": optimizer_lr,
"lr_decay": optimizer_lr_decay,
"weight_decay": optimizer_weight_decay,
"epsilon": optimizer_epsilon,
"betas": [optimizer_betas_0, optimizer_betas_1],
},
"biflownet": {
"pyr_dim": biflownet_pyr_dim,
"pyr_level": biflownet_pyr_level,
"corr_radius": biflownet_corr_radius,
"kernel_size": biflownet_kernel_size,
"warp_type": biflownet_warp_type,
"padding_mode": biflownet_padding_mode,
"fix_params": biflownet_fix_params,
},
"fusionnet": {
"num_channels": fusionnet_num_channels,
"padding_mode": fusionnet_padding_mode,
"fix_params": fusionnet_fix_params,
},
}
# Generate inference config
inference_config = {
"train_dir": train_dir,
"data_path": data_path,
"inference_dir": inference_dir,
"inference_data": {
"max_frame_gap": inference_max_frame_gap,
"patch_shape": [inference_data_patch_shape_y, inference_data_patch_shape_x],
"patch_overlap": [
inference_data_patch_overlap_y,
inference_data_patch_overlap_x,
],
"batch_size": batch_size,
"num_workers": inference_data_num_workers,
},
"inference": {
"output_format": inference_output_format,
"load_ckpt_name": inference_load_ckpt_name,
"pyr_level": inference_pyr_level,
"mixed_precision": inference_mixed_precision,
"TTA": tta,
"compile": inference_compile,
},
}
os.makedirs(f"runs/{exp_name}", exist_ok=True)
# Save configs to files
with open(f"{exp_path}/train_config.json", "w") as f:
json.dump(train_config, f, indent=4)
with open(f"{exp_path}/inference_config.json", "w") as f:
json.dump(inference_config, f, indent=4)
simple_header(f"Experiment [green]{exp_name}[/green] created")
def return_screen() -> None:
if typer.confirm("Return to main menu?", default=True):
clear_screen()
main_menu()
else:
exit_screen()
def return_screen_exp_manager() -> None:
if typer.confirm("Return to experiment manager?", default=True):
clear_screen()
experiment_menu()
else:
return_screen()
def exit_screen() -> None:
rprint("[bold]Thank you for using CryoSamba. Goodbye![/bold]")
quit()
def title_screen() -> None:
rprint("")
rprint(
"[green] ██████╗██████╗ ██╗ ██╗ ██████╗[/green] [yellow]███████╗ █████╗ ███╗ ███╗██████╗ █████╗[/yellow]"
)
rprint(
"[green]██╔════╝██╔══██╗╚██╗ ██╔╝██╔═══██╗[/green][yellow]██╔════╝██╔══██╗████╗ ████║██╔══██╗██╔══██╗[/yellow]"
)
rprint(
"[green]██║ ██████╔╝ ╚████╔╝ ██║ ██║[/green][yellow]███████╗███████║██╔████╔██║██████╔╝███████║[/yellow]"
)
rprint(
"[green]██║ ██╔══██╗ ╚██╔╝ ██║ ██║[/green][yellow]╚════██║██╔══██║██║╚██╔╝██║██╔══██╗██╔══██║[/yellow]"
)
rprint(
"[green]╚██████╗██║ ██║ ██║ ╚██████╔╝[/green][yellow]███████║██║ ██║██║ ╚═╝ ██║██████╔╝██║ ██║[/yellow]"
)
rprint(
"[green] ╚═════╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ [/green][yellow]╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝[/yellow]"
)
rprint("")
rprint("[bold]Welcome to CryoSamba [white]v1.0[/white] [/bold]")
rprint(
"[bold]by Kirchhausen Lab [blue](https://kirchhausen.hms.harvard.edu/)[/blue][/bold]"
)
print("")
rprint(
"Please read the instructions carefully. If you experience any issues reach out to "
)
rprint("[bold]Jose Costa-Filho[/bold] @ joseinacio@tklab.hms.harvard.edu")
rprint("[bold]Arkash Jain[/bold] @ arkash@tklab.hms.harvard.edu")
rprint("We appreciate all feedback!")
def clear_screen() -> None:
os.system("clear")
@app.command()
def main():
clear_screen()
main_menu()
def main_menu() -> None:
title_screen()
rprint(f"\n[bold]*** MAIN MENU ***[/bold]\n")
steps = [
f"[bold]|1| Manage experiments[/bold]",
f"[bold]|2| Run training[/bold]",
f"[bold]|3| Run inference[/bold]",
f"[bold]|4| Exit[/bold]",
]
if not os.path.exists(RUNS_DIR):
os.makedirs(RUNS_DIR)
exp_list = list_non_hidden_files(RUNS_DIR)
if len(exp_list) == 0:
steps[0] = f"[bold]|1| Manage experiments [red](start here!)[/red][/bold]"
for step in steps:
rprint(step)
print("")
while True:
input_cmd = typer.prompt("Choose an option [1/2/3/4]")
if input_cmd == "1":
clear_screen()
experiment_menu()
break
elif input_cmd == "2":
clear_screen()
run_cryosamba("Training")
break
elif input_cmd == "3":
clear_screen()
run_cryosamba("Inference")
break
elif input_cmd == "4":
exit_screen()
break
else:
rprint("[red]Invalid option. Please choose either 1, 2, 3 or 4.[/red]")
def simple_header(message) -> None:
rprint(f"\n[bold]*** {message} ***[/bold]\n")
def setup_cryosamba() -> None:
simple_header("CryoSamba Setup")
if typer.confirm("Do you want to setup Conda?"):
setup_conda()
if typer.confirm("Do you want to setup the environment?"):
env_name = typer.prompt("Enter environment name", default="cryosamba")
setup_environment(env_name)
if typer.confirm("Do you want to export the environment? (Optional)"):
export_env()
rprint("[green]CryoSamba setup finished[/green]")
return_screen()
def show_exp_list() -> None:
rprint(f"Your experiments are stored at [bold]{RUNS_DIR}[/bold]")
exp_list = list_non_hidden_files(RUNS_DIR)
if len(exp_list) == 0:
rprint(f"You have no existing experiments.")
else:
rprint(f"You have the following experiments: [bold]{sorted(exp_list)}[/bold]")
def experiment_menu() -> None:
simple_header("Experiment Manager")
show_exp_list()
steps = [
f"[bold]|1| Create a new experiment[/bold]",
f"[bold]|2| Delete an experiment[/bold]",
f"[bold]|3| Return to Main Menu[/bold]",
]
print("")
for step in steps:
rprint(step)
print("")
while True:
input_cmd = typer.prompt("Choose an option [1/2/3]")
if input_cmd == "1":
clear_screen()
setup_experiment()
break
elif input_cmd == "2":
exp_list = list_non_hidden_files(RUNS_DIR)
if len(exp_list) == 0:
rprint(f"You have no existing experiments to delete.")
else:
clear_screen()
delete_experiment()
break
elif input_cmd == "3":
clear_screen()
main_menu()
break
else:
rprint("[red]Invalid option. Please choose either 1, 2 or 3.[/red]")
def setup_experiment() -> None:
simple_header("New Experiment Setup")
while True:
exp_name = typer.prompt(
"Please enter the new experiment name (or enter E to Exit)"
)
if exp_name == "E":
break
exp_path = os.path.join(RUNS_DIR, exp_name)
if os.path.exists(exp_path):
rprint(
f"[red]Experiment [bold]{exp_name}[/bold] already exists. Please choose a new name.[/red]"
)
else:
generate_experiment(exp_name)
break
return_screen_exp_manager()
def delete_experiment() -> None:
simple_header("Experiment Deletion (be careful!)")
while True:
show_exp_list()
exp_name = typer.prompt(
"Please enter the name of the experiment you want to delete (or enter E to Exit)"
)
if exp_name == "E":
break
exp_path = os.path.join(RUNS_DIR, exp_name)
if not os.path.exists(exp_path):
rprint(
f"[red]Experiment [bold]{exp_name}[/bold] not found. Please check the experiment name and try again.[/red]"
)
else:
rprint(f"Experiment [bold]{exp_name}[/bold] found.")
if typer.confirm(
f"Do you really want to delete experiment {exp_name} and all its contents (config files, trained models, denoised results)?"
):
shutil.rmtree(exp_path)
rprint(f"Experiment [bold]{exp_name}[/bold] successfully deleted.")
return_screen_exp_manager()
def list_non_hidden_files(path):
non_hidden_files = [file for file in os.listdir(path) if not file.startswith(".")]
return non_hidden_files
def run_cryosamba(mode) -> None:
simple_header(f"CryoSamba {mode}")
if not os.path.exists(RUNS_DIR):
os.makedirs(RUNS_DIR)
rprint(f"Your experiments are stored at [bold]{RUNS_DIR}[/bold]")
exp_list = list_non_hidden_files(RUNS_DIR)
if len(exp_list) == 0:
rprint(
f"[red]You have no existing experiments. Set up a new experiment via the main menu.[/red]"
)
return_screen()
else:
rprint(f"You have the following experiments: [bold]{sorted(exp_list)}[/bold]")
while True:
exp_name = typer.prompt("Please enter the experiment name (or enter E to Exit)")
if exp_name == "E":
break
exp_path = os.path.join(RUNS_DIR, exp_name)
if not os.path.exists(exp_path):
rprint(
f"[red]Experiment [bold]{exp_name}[/bold] not found. Please check the experiment name and try again.[/red]"
)
else:
rprint(f"* Experiment [green]{exp_name}[/green] selected *")
selected_gpus = select_gpus()
if selected_gpus != -1:
if mode == "Training":
run_training(",".join(selected_gpus), exp_name)
elif mode == "Inference":
run_inference(",".join(selected_gpus), exp_name)
break
return_screen()
if __name__ == "__main__":
typer.run(main)
| Python |
2D | kirchhausenlab/Cryosamba | core/fusionnet.py | .py | 5,963 | 181 | import torch
import torch.nn as nn
import torch.nn.functional as F
from core.utils.nn_utils import ConvBlock, conv2, conv4, deconv, deconv3, warp_fn
class DownsampleImage(nn.Module):
def __init__(self, num_channels, padding_mode="zeros"):
super().__init__()
self.downsample_mask = nn.Sequential(
ConvBlock(
num_channels, 2 * num_channels, kernel_size=2, padding_mode=padding_mode
),
ConvBlock(
2 * num_channels,
2 * num_channels,
kernel_size=5,
padding_mode=padding_mode,
),
ConvBlock(
2 * num_channels,
2 * num_channels,
kernel_size=3,
padding_mode=padding_mode,
),
ConvBlock(
2 * num_channels, 25, kernel_size=1, padding_mode=padding_mode, act=None
),
)
def forward(self, x, img):
"""down-sample the image [H*2, W*2, 1] -> [H, W, 1] using convex combination"""
N, _, H, W = img.shape
mask = self.downsample_mask(x)
mask = mask.view(N, 1, 25, H // 2, W // 2)
mask = torch.softmax(mask, dim=2)
down_img = F.unfold(img, [5, 5], stride=2, padding=2)
down_img = down_img.view(N, 1, 25, H // 2, W // 2)
down_img = torch.sum(mask * down_img, dim=2)
return down_img
class ContextNet(nn.Module):
def __init__(self, num_channels, padding_mode="zeros"):
super().__init__()
chs = [
1,
1 * num_channels,
2 * num_channels,
4 * num_channels,
8 * num_channels,
]
self.convs = nn.ModuleList(
[
conv2(ch_in, ch_out, padding_mode=padding_mode)
for ch_in, ch_out in zip(chs[:-1], chs[1:])
]
)
def forward(self, feat, flow):
feat_pyramid = []
for conv in self.convs:
feat = conv(feat)
flow = (
F.interpolate(
flow, scale_factor=0.5, mode="bilinear", align_corners=False
)
* 0.5
)
warped_feat = warp_fn(feat, flow)
feat_pyramid.append(warped_feat)
return feat_pyramid
class RefineUNet(nn.Module):
def __init__(self, num_channels, padding_mode="zeros"):
super().__init__()
self.down1 = conv4(8, 2 * num_channels, padding_mode=padding_mode)
self.down2 = conv2(
4 * num_channels, 4 * num_channels, padding_mode=padding_mode
)
self.down3 = conv2(
8 * num_channels, 8 * num_channels, padding_mode=padding_mode
)
self.down4 = conv2(
16 * num_channels, 16 * num_channels, padding_mode=padding_mode
)
self.up1 = deconv(
32 * num_channels, 8 * num_channels, padding_mode=padding_mode
)
self.up2 = deconv(
16 * num_channels, 4 * num_channels, padding_mode=padding_mode
)
self.up3 = deconv(8 * num_channels, 2 * num_channels, padding_mode=padding_mode)
self.up4 = deconv3(4 * num_channels, num_channels, padding_mode=padding_mode)
def forward(self, cat, c0, c1):
s0 = self.down1(cat)
s1 = self.down2(torch.cat((s0, c0[0], c1[0]), 1))
s2 = self.down3(torch.cat((s1, c0[1], c1[1]), 1))
s3 = self.down4(torch.cat((s2, c0[2], c1[2]), 1))
x = self.up1(torch.cat((s3, c0[3], c1[3]), 1))
x = self.up2(torch.cat((x, s2), 1))
x = self.up3(torch.cat((x, s1), 1))
x = self.up4(torch.cat((x, s0), 1))
return x
class FusionNet(nn.Module):
def __init__(self, args):
super().__init__()
self.padding_mode = args.padding_mode
self.contextnet = ContextNet(args.num_channels, padding_mode=self.padding_mode)
self.unet = RefineUNet(args.num_channels, padding_mode=self.padding_mode)
self.refine_pred = ConvBlock(
args.num_channels, 2, kernel_size=3, padding_mode=self.padding_mode
)
self.downsample_image = DownsampleImage(
args.num_channels, padding_mode=self.padding_mode
)
# fix the parameters if needed
if ("fix_params" in args) and (args.fix_params):
for p in self.parameters():
p.requires_grad = False
def forward(self, img0, img1, bi_flow):
# upsample input images and estimated bi_flow
img0 = F.interpolate(
input=img0, scale_factor=2, mode="bilinear", align_corners=False
)
img1 = F.interpolate(
input=img1, scale_factor=2, mode="bilinear", align_corners=False
)
bi_flow = (
F.interpolate(
input=bi_flow, scale_factor=2, mode="bilinear", align_corners=False
)
* 2
)
# input features for sythesis network: original images, warped images, warped features, and flow_0t_1t
flow_0t = bi_flow[:, :2] * 0.5
flow_1t = bi_flow[:, 2:4] * 0.5
flow_0t_1t = torch.cat((flow_0t, flow_1t), 1)
warped_img0 = warp_fn(img0, flow_0t)
warped_img1 = warp_fn(img1, flow_1t)
c0 = self.contextnet(img0, flow_0t)
c1 = self.contextnet(img1, flow_1t)
# feature extraction by u-net
x = self.unet(
torch.cat((warped_img0, warped_img1, img0, img1, flow_0t_1t), 1), c0, c1
)
# prediction
refine = torch.sigmoid(self.refine_pred(x))
refine_res = refine[:, 0:1] * 2 - 1
refine_mask = refine[:, 1:2]
merged_img = warped_img0 * refine_mask + warped_img1 * (1 - refine_mask)
interp_img = merged_img + refine_res
# convex down-sampling
interp_img = self.downsample_image(x, interp_img)
return interp_img
if __name__ == "__main__":
pass
| Python |
2D | kirchhausenlab/Cryosamba | core/biflownet.py | .py | 13,495 | 419 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.utils.nn_utils import ConvBlock, backwarp, conv2, deconv, warp_fn
class ImportanceMask(nn.Module):
def __init__(self, mode):
super().__init__()
self.conv_img = ConvBlock(
in_channels=1, out_channels=4, kernel_size=3, padding_mode=mode, act=None
)
self.conv_error = ConvBlock(
in_channels=1, out_channels=4, kernel_size=3, padding_mode=mode, act=None
)
self.conv_mix = MaskUNet(chs_in=8, num_channels=4, padding_mode=mode)
def forward(self, img, error):
img = self.conv_img(img)
error = self.conv_error(error)
feat = torch.cat((img, error), dim=1)
feat = self.conv_mix(feat)
return feat
class MaskUNet(nn.Module):
def __init__(self, chs_in, num_channels, padding_mode="zeros"):
super().__init__()
self.down1 = conv2(chs_in, 2 * num_channels, padding_mode=padding_mode)
self.down2 = conv2(
2 * num_channels, 4 * num_channels, padding_mode=padding_mode
)
self.down3 = conv2(
4 * num_channels, 8 * num_channels, padding_mode=padding_mode
)
self.down4 = conv2(
8 * num_channels, 16 * num_channels, padding_mode=padding_mode
)
self.up1 = deconv(
16 * num_channels, 8 * num_channels, padding_mode=padding_mode
)
self.up2 = deconv(
16 * num_channels, 4 * num_channels, padding_mode=padding_mode
)
self.up3 = deconv(8 * num_channels, 2 * num_channels, padding_mode=padding_mode)
self.up4 = deconv(4 * num_channels, 1, padding_mode=padding_mode)
def forward(self, s0):
s1 = self.down1(s0)
s2 = self.down2(s1)
s3 = self.down3(s2)
x = self.down4(s3)
x = self.up1(x)
x = self.up2(torch.cat((x, s3), 1))
x = self.up3(torch.cat((x, s2), 1))
x = self.up4(torch.cat((x, s1), 1))
return x
# **************************************************************************************************#
# => Feature Pyramid
# **************************************************************************************************#
class FeatPyramid(nn.Module):
"""Two-level feature pyramid
1) remove high-level feature pyramid (compared to PWC-Net), and add more conv layers to stage 2;
2) do not increase the output channel of stage 2, in order to keep the cost of corr volume under control.
"""
def __init__(self, num_channels=24, kernel_size=3, mode="zeros"):
super().__init__()
act = "lrelu"
self.conv_stage1 = nn.Sequential(
ConvBlock(1, num_channels, kernel_size=2, padding_mode=mode, act=act),
*[
ConvBlock(
num_channels,
num_channels,
kernel_size=kernel_size,
padding_mode=mode,
act=act,
)
for _ in range(2)
]
)
self.conv_stage2 = nn.Sequential(
ConvBlock(
num_channels,
2 * num_channels,
kernel_size=2,
padding_mode=mode,
act=act,
),
*[
ConvBlock(
2 * num_channels,
2 * num_channels,
kernel_size=kernel_size,
padding_mode=mode,
act=act,
)
for _ in range(5)
]
)
def forward(self, img):
x = self.conv_stage1(img)
x = self.conv_stage2(x)
return x
# **************************************************************************************************#
# => Estimator
# **************************************************************************************************#
class Correlation(nn.Module):
def __init__(self, corr_radius=4):
super().__init__()
self.corr_radius = [corr_radius] * 4
def forward(self, x1, x2):
B, C, H, W = x1.size()
x2 = F.pad(x2, self.corr_radius)
# Using unfold function to create patches for correlation
kv = x2.unfold(2, H, 1).unfold(3, W, 1)
# We need to consider the correlation in the channel dimension. Therefore, reshape kv accordingly.
kv = kv.contiguous().view(B, C, -1, H, W)
# Calculating correlation
cv = (x1.view(B, C, 1, H, W) * kv).mean(dim=1, keepdim=True)
# Reshaping the output to match the shape of the output of the original function
cv = cv.view(B, -1, H, W)
return cv
class Estimator(nn.Module):
"""A 6-layer flow estimator, with correlation-injected features
1) construct partial cost volume with the CNN features from stage 2 of the feature pyramid;
2) estimate bi-directional flows, by feeding cost volume, CNN features for both warped images,
CNN feature and estimated flow from previous iteration.
"""
def __init__(self, pyr_dim=24, kernel_size=3, corr_radius=4, mode="zeros"):
super().__init__()
image_feat_channel = 2 * pyr_dim
last_flow_feat_channel = 64
in_channels = (
(corr_radius * 2 + 1) ** 2
+ image_feat_channel * 2
+ last_flow_feat_channel
+ 4
)
self.corr = Correlation(corr_radius)
act = "lrelu"
self.convs = nn.Sequential(
ConvBlock(
in_channels=in_channels,
out_channels=160,
kernel_size=1,
padding_mode=mode,
act=act,
),
ConvBlock(
in_channels=160,
out_channels=128,
kernel_size=kernel_size,
padding_mode=mode,
act=act,
),
ConvBlock(
in_channels=128,
out_channels=112,
kernel_size=kernel_size,
padding_mode=mode,
act=act,
),
ConvBlock(
in_channels=112,
out_channels=96,
kernel_size=kernel_size,
padding_mode=mode,
act=act,
),
ConvBlock(
in_channels=96,
out_channels=64,
kernel_size=kernel_size,
padding_mode=mode,
act=act,
),
)
self.final_conv = ConvBlock(
in_channels=64,
out_channels=4,
kernel_size=kernel_size,
padding_mode=mode,
act=None,
)
def forward(self, feat0, feat1, last_feat, last_flow):
volume = F.leaky_relu(
input=self.corr(feat0, feat1), negative_slope=0.1, inplace=False
)
feat = torch.cat([volume, feat0, feat1, last_feat, last_flow], 1)
feat = self.convs(feat)
flow = self.final_conv(feat)
return flow, feat
class ForwardWarp(nn.Module):
def __init__(self, warp_type, padding_mode):
super().__init__()
self.alpha = ImportanceMask(padding_mode) if warp_type == "soft_splat" else None
if warp_type == "backwarp":
self.fn = self.fn_backwarp
elif warp_type == "avg_splat":
self.fn = self.fn_avg_splat
elif warp_type == "fw_splat":
self.fn = self.fn_fw_splat
elif warp_type == "soft_splat":
self.fn = self.fn_soft_splat
def fn_backwarp(self, img0, img1, flow):
img0 = backwarp(tenInput=img0, tenFlow=flow[:, 2:])
img1 = backwarp(tenInput=img1, tenFlow=flow[:, :2])
return img0, img1
def fn_avg_splat(self, img0, img1, flow):
img0 = warp_fn(
tenInput=img0, tenFlow=flow[:, :2] * 0.5, tenMetric=None, strType="average"
)
img1 = warp_fn(
tenInput=img1, tenFlow=flow[:, 2:] * 0.5, tenMetric=None, strType="average"
)
return img0, img1
def fn_fw_splat(self, img0, img1, flow):
img0 = warp_fn(
tenInput=img0, tenFlow=flow[:, :2] * 1.0, tenMetric=None, strType="average"
)
img1 = warp_fn(
tenInput=img1, tenFlow=flow[:, 2:] * 1.0, tenMetric=None, strType="average"
)
return img0, img1
def fn_soft_splat(self, img0, img1, flow):
tenMetric0 = F.l1_loss(
input=img0,
target=backwarp(tenInput=img1, tenFlow=flow[:, :2] * 0.5),
reduction="none",
).mean([1], True)
tenMetric0 = self.alpha(img0, -tenMetric0).neg().clip(-20.0, 20.0)
img0 = warp_fn(
tenInput=img0,
tenFlow=flow[:, :2] * 0.5,
tenMetric=tenMetric0,
strType="softmax",
)
tenMetric1 = F.l1_loss(
input=img1,
target=backwarp(tenInput=img0, tenFlow=flow[:, 2:] * 0.5),
reduction="none",
).mean([1], True)
tenMetric1 = self.alpha(img1, -tenMetric1).neg().clip(-20.0, 20.0)
img1 = warp_fn(
tenInput=img1,
tenFlow=flow[:, 2:] * 0.5,
tenMetric=tenMetric1,
strType="softmax",
)
return img0, img1
def forward(self, img0, img1, flow):
return self.fn(img0, img1, flow)
# **************************************************************************************************#
# => BiFlowNet
# **************************************************************************************************#
class BiFlowNet(nn.Module):
"""Our bi-directional flownet
In general, we combine image pyramid, middle-oriented forward warping,
lightweight feature encoder and cost volume for simultaneous bi-directional
motion estimation.
"""
def __init__(self, args):
super().__init__()
self.last_flow_feat_channel = 64
self.pyr_level = args.pyr_level
self.warp_type = args.warp_type
self.feat_pyramid = FeatPyramid(
args.pyr_dim, args.kernel_size, args.padding_mode
)
self.flow_estimator = Estimator(
args.pyr_dim, args.kernel_size, args.corr_radius, args.padding_mode
)
self.warp_imgs = ForwardWarp(args.warp_type, args.padding_mode)
# fix the parameters if needed
if ("fix_params" in args) and (args.fix_params):
for p in self.parameters():
p.requires_grad = False
def pre_warp(self, img0, img1, last_flow):
up_flow = (
F.interpolate(
input=last_flow, scale_factor=4.0, mode="bilinear", align_corners=False
)
* 4
)
img0, img1 = self.warp_imgs(img0, img1, up_flow)
return img0, img1
def forward_one_iteration(self, img0, img1, last_feat, last_flow):
feat0 = self.feat_pyramid(img0)
feat1 = self.feat_pyramid(img1)
flow, feat = self.flow_estimator(feat0, feat1, last_feat, last_flow)
return flow, feat
def forward(self, img0, img1):
N, _, H, W = img0.shape
###### First level
level = self.pyr_level - 1
scale_factor = 1 / 2**level
img0_down = F.interpolate(
input=img0, scale_factor=scale_factor, mode="bilinear", align_corners=False
)
img1_down = F.interpolate(
input=img1, scale_factor=scale_factor, mode="bilinear", align_corners=False
)
last_flow = torch.zeros(
(N, 4, H // (2 ** (level + 2)), W // (2 ** (level + 2))), device=img0.device
)
last_feat = torch.zeros(
(
N,
self.last_flow_feat_channel,
H // (2 ** (level + 2)),
W // (2 ** (level + 2)),
),
device=img0.device,
)
flow, feat = self.forward_one_iteration(
img0_down, img1_down, last_feat, last_flow
)
######
for level in list(range(self.pyr_level - 1))[::-1]:
scale_factor = 1 / 2**level
img0_down = F.interpolate(
input=img0,
scale_factor=scale_factor,
mode="bilinear",
align_corners=False,
)
img1_down = F.interpolate(
input=img1,
scale_factor=scale_factor,
mode="bilinear",
align_corners=False,
)
last_flow = (
F.interpolate(
input=flow, scale_factor=2.0, mode="bilinear", align_corners=False
)
* 2
)
last_feat = F.interpolate(
input=feat, scale_factor=2.0, mode="bilinear", align_corners=False
)
img0_down, img1_down = self.pre_warp(img0_down, img1_down, last_flow)
flow, feat = self.forward_one_iteration(
img0_down, img1_down, last_feat, last_flow
)
# directly up-sample estimated flow to full resolution with bi-linear interpolation
output_flow = (
F.interpolate(
input=flow, scale_factor=4.0, mode="bilinear", align_corners=False
)
* 4
)
return output_flow
if __name__ == "__main__":
pass
| Python |
2D | kirchhausenlab/Cryosamba | core/model.py | .py | 3,207 | 112 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from core.dataset import DatasetBase
from core.biflownet import BiFlowNet
from core.fusionnet import FusionNet
class CharbonnierLoss(nn.Module):
def __init__(self, eps=1e-6):
super().__init__()
self.eps = eps
def forward(self, x, y):
loss = torch.mean(torch.sqrt((x - y).pow(2) + self.eps))
return loss
class TernaryLoss(nn.Module):
def __init__(self):
super().__init__()
self.patch_size = 7
self.out_channels = self.patch_size * self.patch_size
self.padding = 1
def transform(self, img):
patches = F.conv2d(img, self.w, padding=3, bias=None) #########
transf = patches - img
transf_norm = transf / torch.sqrt(0.81 + transf**2)
return transf_norm
def hamming(self, t1, t2):
dist = (t1 - t2).pow(2)
dist_norm = torch.mean(dist / (0.1 + dist), 1, True)
return dist_norm
def valid_mask(self, t):
n, _, h, w = t.size()
inner = torch.ones(
n, 1, h - 2 * self.padding, w - 2 * self.padding, device=t.device
).type_as(t)
mask = F.pad(inner, [self.padding] * 4)
return mask
def forward(self, x, y):
self.w = torch.eye(self.out_channels, device=x.device).reshape(
(self.patch_size, self.patch_size, 1, self.out_channels)
)
self.w = self.w.permute(3, 2, 0, 1).float()
x = self.transform(x)
y = self.transform(y)
return (self.hamming(x, y) * self.valid_mask(x)).mean()
class PhotometricLoss(nn.Module):
def __init__(self):
super().__init__()
self.ter_loss = TernaryLoss()
self.char_loss = CharbonnierLoss()
def forward(self, interp_img, gt):
loss = 100 * (
self.char_loss(interp_img, gt) + 0.1 * self.ter_loss(interp_img, gt)
)
return loss
def get_loss():
return PhotometricLoss()
class CryoSamba(nn.Module):
def __init__(self, cfg):
super().__init__()
self.biflownet = BiFlowNet(cfg.biflownet)
self.fusionnet = FusionNet(cfg.fusionnet)
self.gap = cfg.train_data.max_frame_gap
self.apply(self.init_weights)
def init_weights(self, m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def validation(self, img0, img1):
biflow = self.biflownet(img0, img1).contiguous()
rec = self.fusionnet(img0, img1, biflow)
rec = torch.clamp(rec, -1, 1)
return rec
def forward(self, img0, img1):
biflow = self.biflownet(img0, img1).contiguous()
rec = self.fusionnet(img0, img1, biflow)
rec = torch.clamp(rec, -1, 1)
return rec
def get_model(cfg, device, is_ddp, compile):
model = CryoSamba(cfg).to(device=device)
if compile:
model = torch.compile(model, dynamic=False)
model = DDP(model, device_ids=[device]) if is_ddp else model
return model
| Python |
2D | kirchhausenlab/Cryosamba | core/dataset.py | .py | 4,145 | 127 | import os, glob
import numpy as np
import tifffile
import mrcfile
import torch
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torch.utils.data.distributed import DistributedSampler
from core.utils.data_utils import normalize_imgs, denormalize_imgs, augment_dataset
class DatasetBase(Dataset):
def __init__(self, args, data, metadata, split="train"):
self.data = data
self.metadata = metadata
self.split = split
self.shape = self.data.shape
self.max_frame_gap = args.max_frame_gap
self.patch_overlap = [2 * self.max_frame_gap] + args.patch_overlap
self.patch_shape = [2 * self.max_frame_gap + 1] + args.patch_shape
self.indices = [
np.arange(-overlap, shape, patch_shape - overlap)
for overlap, shape, patch_shape in zip(
self.patch_overlap, self.shape, self.patch_shape
)
]
self.indices[0] = np.arange(
0,
self.shape[0] - self.patch_overlap[0],
self.patch_shape[0] - self.patch_overlap[0],
)
if self.split != "test":
self.split_ratio = args.split_ratio
length = int(len(self.indices[0]) * self.split_ratio)
if self.split == "train":
self.indices[0] = self.indices[0][:length]
elif self.split == "val":
if length < len(self.indices[0]):
self.indices[0] = self.indices[0][length:]
else:
self.indices[0] = self.indices[0][-1:]
self.index_shape = [len(idx) for idx in self.indices]
self.dataset_length = (
self.index_shape[0] * self.index_shape[1] * self.index_shape[2]
)
self.coords_list, self.border_pad_list = list(
zip(*[self.get_crop_params(index) for index in range(self.dataset_length)])
)
def get_crop_params(self, index):
index_unravel = np.unravel_index(index, self.index_shape)
coords_start = np.asarray(
[self.indices[i][idx] for i, idx in enumerate(index_unravel)]
).astype("int")
coords_end = np.asarray(
[coord + shape for coord, shape in zip(coords_start, self.patch_shape)]
).astype("int")
border_pad = np.asarray([[0, 0], [0, 0], [0, 0]]).astype("int")
for i in range(3):
if coords_start[i] < 0:
border_pad[i][0] = -coords_start[i]
coords_start[i] = 0
if coords_end[i] >= self.shape[i]:
border_pad[i][1] = coords_end[i] - self.shape[i]
coords_end[i] = self.shape[i]
coords = np.stack((coords_start, coords_end), axis=-1)
return coords, border_pad
def __getitem__(self, index):
coords, border_pad = self.coords_list[index], self.border_pad_list[index]
imgs = self.data[
coords[0, 0] : coords[0, 1],
coords[1, 0] : coords[1, 1],
coords[2, 0] : coords[2, 1],
]
imgs = np.pad(imgs, border_pad, mode="reflect")
imgs = np.array(imgs)
imgs = torch.from_numpy(imgs).float()
if self.split == "train":
imgs = augment_dataset(imgs)
imgs = normalize_imgs(imgs, params=self.metadata)
if self.split == "test":
crop_params = np.concatenate((coords, border_pad), axis=0)
return imgs, crop_params
return imgs
def __len__(self):
return self.dataset_length
def get_dataloader(args, data_list, metadata_list, split, is_ddp, shuffle=False):
dataset = ConcatDataset(
[
DatasetBase(args, data, metadata, split=split)
for data, metadata in zip(data_list, metadata_list)
]
)
sampler = DistributedSampler(dataset, shuffle=shuffle) if is_ddp else None
shuffle = None if is_ddp else shuffle
return DataLoader(
dataset,
batch_size=args.batch_size,
pin_memory=True,
num_workers=args.num_workers,
drop_last=False,
sampler=sampler,
shuffle=shuffle,
)
| Python |
2D | kirchhausenlab/Cryosamba | core/utils/softsplat.py | .py | 25,936 | 668 | #!/usr/bin/env python
import collections
import os
import re
import typing
import cupy
import torch
##########################################################
objCudacache = {}
def cuda_int32(intIn: int):
return cupy.int32(intIn)
# end
def cuda_kernel(strFunction: str, strKernel: str, objVariables: typing.Dict):
if "device" not in objCudacache:
objCudacache["device"] = torch.cuda.get_device_name()
# end
strKey = strFunction
for strVariable in objVariables:
objValue = objVariables[strVariable]
strKey += strVariable
if objValue is None:
continue
elif type(objValue) == int:
strKey += str(objValue)
elif type(objValue) == float:
strKey += str(objValue)
elif type(objValue) == bool:
strKey += str(objValue)
elif type(objValue) == str:
strKey += objValue
elif type(objValue) == torch.Tensor:
strKey += str(objValue.dtype)
strKey += str(objValue.shape)
strKey += str(objValue.stride())
elif True:
print(strVariable, type(objValue))
assert False
# end
# end
strKey += objCudacache["device"]
if strKey not in objCudacache:
for strVariable in objVariables:
objValue = objVariables[strVariable]
if objValue is None:
continue
elif type(objValue) == int:
strKernel = strKernel.replace("{{" + strVariable + "}}", str(objValue))
elif type(objValue) == float:
strKernel = strKernel.replace("{{" + strVariable + "}}", str(objValue))
elif type(objValue) == bool:
strKernel = strKernel.replace("{{" + strVariable + "}}", str(objValue))
elif type(objValue) == str:
strKernel = strKernel.replace("{{" + strVariable + "}}", objValue)
elif type(objValue) == torch.Tensor and objValue.dtype == torch.uint8:
strKernel = strKernel.replace("{{type}}", "unsigned char")
elif type(objValue) == torch.Tensor and objValue.dtype == torch.float16:
strKernel = strKernel.replace("{{type}}", "half")
elif type(objValue) == torch.Tensor and objValue.dtype == torch.float32:
strKernel = strKernel.replace("{{type}}", "float")
elif type(objValue) == torch.Tensor and objValue.dtype == torch.float64:
strKernel = strKernel.replace("{{type}}", "double")
elif type(objValue) == torch.Tensor and objValue.dtype == torch.int32:
strKernel = strKernel.replace("{{type}}", "int")
elif type(objValue) == torch.Tensor and objValue.dtype == torch.int64:
strKernel = strKernel.replace("{{type}}", "long")
elif type(objValue) == torch.Tensor:
print(strVariable, objValue.dtype)
assert False
elif True:
print(strVariable, type(objValue))
assert False
# end
# end
while True:
objMatch = re.search("(SIZE_)([0-4])(\()([^\)]*)(\))", strKernel)
if objMatch is None:
break
# end
intArg = int(objMatch.group(2))
strTensor = objMatch.group(4)
intSizes = objVariables[strTensor].size()
strKernel = strKernel.replace(
objMatch.group(),
str(
intSizes[intArg]
if torch.is_tensor(intSizes[intArg]) == False
else intSizes[intArg].item()
),
)
# end
while True:
objMatch = re.search("(OFFSET_)([0-4])(\()", strKernel)
if objMatch is None:
break
# end
intStart = objMatch.span()[1]
intStop = objMatch.span()[1]
intParentheses = 1
while True:
intParentheses += 1 if strKernel[intStop] == "(" else 0
intParentheses -= 1 if strKernel[intStop] == ")" else 0
if intParentheses == 0:
break
# end
intStop += 1
# end
intArgs = int(objMatch.group(2))
strArgs = strKernel[intStart:intStop].split(",")
assert intArgs == len(strArgs) - 1
strTensor = strArgs[0]
intStrides = objVariables[strTensor].stride()
strIndex = []
for intArg in range(intArgs):
strIndex.append(
"(("
+ strArgs[intArg + 1].replace("{", "(").replace("}", ")").strip()
+ ")*"
+ str(
intStrides[intArg]
if torch.is_tensor(intStrides[intArg]) == False
else intStrides[intArg].item()
)
+ ")"
)
# end
strKernel = strKernel.replace(
"OFFSET_" + str(intArgs) + "(" + strKernel[intStart:intStop] + ")",
"(" + str.join("+", strIndex) + ")",
)
# end
while True:
objMatch = re.search("(VALUE_)([0-4])(\()", strKernel)
if objMatch is None:
break
# end
intStart = objMatch.span()[1]
intStop = objMatch.span()[1]
intParentheses = 1
while True:
intParentheses += 1 if strKernel[intStop] == "(" else 0
intParentheses -= 1 if strKernel[intStop] == ")" else 0
if intParentheses == 0:
break
# end
intStop += 1
# end
intArgs = int(objMatch.group(2))
strArgs = strKernel[intStart:intStop].split(",")
assert intArgs == len(strArgs) - 1
strTensor = strArgs[0]
intStrides = objVariables[strTensor].stride()
strIndex = []
for intArg in range(intArgs):
strIndex.append(
"(("
+ strArgs[intArg + 1].replace("{", "(").replace("}", ")").strip()
+ ")*"
+ str(
intStrides[intArg]
if torch.is_tensor(intStrides[intArg]) == False
else intStrides[intArg].item()
)
+ ")"
)
# end
strKernel = strKernel.replace(
"VALUE_" + str(intArgs) + "(" + strKernel[intStart:intStop] + ")",
strTensor + "[" + str.join("+", strIndex) + "]",
)
# end
objCudacache[strKey] = {"strFunction": strFunction, "strKernel": strKernel}
# end
return strKey
# end
@cupy.memoize(for_each_device=True)
def cuda_launch(strKey: str):
if "CUDA_HOME" not in os.environ:
os.environ["CUDA_HOME"] = cupy.cuda.get_cuda_path()
include_path = "-I" + os.path.join(os.environ["CUDA_HOME"], "include")
options = ("-I " + os.environ["CUDA_HOME"], include_path)
# Load the module from source
module = cupy.RawModule(code=objCudacache[strKey]["strKernel"], options=options)
# Get the specific function
return module.get_function(objCudacache[strKey]["strFunction"])
##########################################################
def softsplat(
tenIn: torch.Tensor, tenFlow: torch.Tensor, tenMetric: torch.Tensor, strMode: str
):
assert strMode.split("-")[0] in ["sum", "avg", "linear", "soft"]
if strMode == "sum":
assert tenMetric is None
if strMode == "avg":
assert tenMetric is None
if strMode.split("-")[0] == "linear":
assert tenMetric is not None
if strMode.split("-")[0] == "soft":
assert tenMetric is not None
if strMode == "avg":
tenIn = torch.cat(
[
tenIn,
tenIn.new_ones([tenIn.shape[0], 1, tenIn.shape[2], tenIn.shape[3]]),
],
1,
)
elif strMode.split("-")[0] == "linear":
tenIn = torch.cat([tenIn * tenMetric, tenMetric], 1)
elif strMode.split("-")[0] == "soft":
tenIn = torch.cat([tenIn * tenMetric.exp(), tenMetric.exp()], 1)
# end
tenOut = _FunctionSoftsplat.apply(tenIn, tenFlow)
if strMode.split("-")[0] in ["avg", "linear", "soft"]:
tenNormalize = tenOut[:, -1:, :, :]
if len(strMode.split("-")) == 1:
tenNormalize = tenNormalize + 0.0000001
elif strMode.split("-")[1] == "addeps":
tenNormalize = tenNormalize + 0.0000001
elif strMode.split("-")[1] == "zeroeps":
tenNormalize[tenNormalize == 0.0] = 1.0
elif strMode.split("-")[1] == "clipeps":
tenNormalize = tenNormalize.clip(0.0000001, None)
# end
tenOut = tenOut[:, :-1, :, :] / tenNormalize
# end
return tenOut
# end
class _FunctionSoftsplat(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(self, tenIn, tenFlow):
tenOut = tenIn.new_zeros(
[tenIn.shape[0], tenIn.shape[1], tenIn.shape[2], tenIn.shape[3]]
)
if tenIn.is_cuda == True:
cuda_launch(
cuda_kernel(
"softsplat_out",
"""
extern "C" __global__ void __launch_bounds__(512) softsplat_out(
const int n,
const {{type}}* __restrict__ tenIn,
const {{type}}* __restrict__ tenFlow,
{{type}}* __restrict__ tenOut
) { for (int intIndex = (blockIdx.x * blockDim.x) + threadIdx.x; intIndex < n; intIndex += blockDim.x * gridDim.x) {
const int intN = ( intIndex / SIZE_3(tenOut) / SIZE_2(tenOut) / SIZE_1(tenOut) ) % SIZE_0(tenOut);
const int intC = ( intIndex / SIZE_3(tenOut) / SIZE_2(tenOut) ) % SIZE_1(tenOut);
const int intY = ( intIndex / SIZE_3(tenOut) ) % SIZE_2(tenOut);
const int intX = ( intIndex ) % SIZE_3(tenOut);
assert(SIZE_1(tenFlow) == 2);
{{type}} fltX = ({{type}}) (intX) + VALUE_4(tenFlow, intN, 0, intY, intX);
{{type}} fltY = ({{type}}) (intY) + VALUE_4(tenFlow, intN, 1, intY, intX);
if (isfinite(fltX) == false) { return; }
if (isfinite(fltY) == false) { return; }
{{type}} fltIn = VALUE_4(tenIn, intN, intC, intY, intX);
int intNorthwestX = (int) (floor(fltX));
int intNorthwestY = (int) (floor(fltY));
int intNortheastX = intNorthwestX + 1;
int intNortheastY = intNorthwestY;
int intSouthwestX = intNorthwestX;
int intSouthwestY = intNorthwestY + 1;
int intSoutheastX = intNorthwestX + 1;
int intSoutheastY = intNorthwestY + 1;
{{type}} fltNorthwest = (({{type}}) (intSoutheastX) - fltX) * (({{type}}) (intSoutheastY) - fltY);
{{type}} fltNortheast = (fltX - ({{type}}) (intSouthwestX)) * (({{type}}) (intSouthwestY) - fltY);
{{type}} fltSouthwest = (({{type}}) (intNortheastX) - fltX) * (fltY - ({{type}}) (intNortheastY));
{{type}} fltSoutheast = (fltX - ({{type}}) (intNorthwestX)) * (fltY - ({{type}}) (intNorthwestY));
if ((intNorthwestX >= 0) && (intNorthwestX < SIZE_3(tenOut)) && (intNorthwestY >= 0) && (intNorthwestY < SIZE_2(tenOut))) {
atomicAdd(&tenOut[OFFSET_4(tenOut, intN, intC, intNorthwestY, intNorthwestX)], fltIn * fltNorthwest);
}
if ((intNortheastX >= 0) && (intNortheastX < SIZE_3(tenOut)) && (intNortheastY >= 0) && (intNortheastY < SIZE_2(tenOut))) {
atomicAdd(&tenOut[OFFSET_4(tenOut, intN, intC, intNortheastY, intNortheastX)], fltIn * fltNortheast);
}
if ((intSouthwestX >= 0) && (intSouthwestX < SIZE_3(tenOut)) && (intSouthwestY >= 0) && (intSouthwestY < SIZE_2(tenOut))) {
atomicAdd(&tenOut[OFFSET_4(tenOut, intN, intC, intSouthwestY, intSouthwestX)], fltIn * fltSouthwest);
}
if ((intSoutheastX >= 0) && (intSoutheastX < SIZE_3(tenOut)) && (intSoutheastY >= 0) && (intSoutheastY < SIZE_2(tenOut))) {
atomicAdd(&tenOut[OFFSET_4(tenOut, intN, intC, intSoutheastY, intSoutheastX)], fltIn * fltSoutheast);
}
} }
""",
{"tenIn": tenIn, "tenFlow": tenFlow, "tenOut": tenOut},
)
)(
grid=tuple([int((tenOut.nelement() + 512 - 1) / 512), 1, 1]),
block=tuple([512, 1, 1]),
args=[
cuda_int32(tenOut.nelement()),
tenIn.data_ptr(),
tenFlow.data_ptr(),
tenOut.data_ptr(),
],
stream=collections.namedtuple("Stream", "ptr")(
torch.cuda.current_stream().cuda_stream
),
)
elif tenIn.is_cuda != True:
assert False
# end
self.save_for_backward(tenIn, tenFlow)
return tenOut
# end
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(self, tenOutgrad):
tenIn, tenFlow = self.saved_tensors
tenOutgrad = tenOutgrad.contiguous()
assert tenOutgrad.is_cuda == True
tenIngrad = (
tenIn.new_zeros(
[tenIn.shape[0], tenIn.shape[1], tenIn.shape[2], tenIn.shape[3]]
)
if self.needs_input_grad[0] == True
else None
)
tenFlowgrad = (
tenFlow.new_zeros(
[tenFlow.shape[0], tenFlow.shape[1], tenFlow.shape[2], tenFlow.shape[3]]
)
if self.needs_input_grad[1] == True
else None
)
if tenIngrad is not None:
cuda_launch(
cuda_kernel(
"softsplat_ingrad",
"""
extern "C" __global__ void __launch_bounds__(512) softsplat_ingrad(
const int n,
const {{type}}* __restrict__ tenIn,
const {{type}}* __restrict__ tenFlow,
const {{type}}* __restrict__ tenOutgrad,
{{type}}* __restrict__ tenIngrad,
{{type}}* __restrict__ tenFlowgrad
) { for (int intIndex = (blockIdx.x * blockDim.x) + threadIdx.x; intIndex < n; intIndex += blockDim.x * gridDim.x) {
const int intN = ( intIndex / SIZE_3(tenIngrad) / SIZE_2(tenIngrad) / SIZE_1(tenIngrad) ) % SIZE_0(tenIngrad);
const int intC = ( intIndex / SIZE_3(tenIngrad) / SIZE_2(tenIngrad) ) % SIZE_1(tenIngrad);
const int intY = ( intIndex / SIZE_3(tenIngrad) ) % SIZE_2(tenIngrad);
const int intX = ( intIndex ) % SIZE_3(tenIngrad);
assert(SIZE_1(tenFlow) == 2);
{{type}} fltIngrad = 0.0f;
{{type}} fltX = ({{type}}) (intX) + VALUE_4(tenFlow, intN, 0, intY, intX);
{{type}} fltY = ({{type}}) (intY) + VALUE_4(tenFlow, intN, 1, intY, intX);
if (isfinite(fltX) == false) { return; }
if (isfinite(fltY) == false) { return; }
int intNorthwestX = (int) (floor(fltX));
int intNorthwestY = (int) (floor(fltY));
int intNortheastX = intNorthwestX + 1;
int intNortheastY = intNorthwestY;
int intSouthwestX = intNorthwestX;
int intSouthwestY = intNorthwestY + 1;
int intSoutheastX = intNorthwestX + 1;
int intSoutheastY = intNorthwestY + 1;
{{type}} fltNorthwest = (({{type}}) (intSoutheastX) - fltX) * (({{type}}) (intSoutheastY) - fltY);
{{type}} fltNortheast = (fltX - ({{type}}) (intSouthwestX)) * (({{type}}) (intSouthwestY) - fltY);
{{type}} fltSouthwest = (({{type}}) (intNortheastX) - fltX) * (fltY - ({{type}}) (intNortheastY));
{{type}} fltSoutheast = (fltX - ({{type}}) (intNorthwestX)) * (fltY - ({{type}}) (intNorthwestY));
if ((intNorthwestX >= 0) && (intNorthwestX < SIZE_3(tenOutgrad)) && (intNorthwestY >= 0) && (intNorthwestY < SIZE_2(tenOutgrad))) {
fltIngrad += VALUE_4(tenOutgrad, intN, intC, intNorthwestY, intNorthwestX) * fltNorthwest;
}
if ((intNortheastX >= 0) && (intNortheastX < SIZE_3(tenOutgrad)) && (intNortheastY >= 0) && (intNortheastY < SIZE_2(tenOutgrad))) {
fltIngrad += VALUE_4(tenOutgrad, intN, intC, intNortheastY, intNortheastX) * fltNortheast;
}
if ((intSouthwestX >= 0) && (intSouthwestX < SIZE_3(tenOutgrad)) && (intSouthwestY >= 0) && (intSouthwestY < SIZE_2(tenOutgrad))) {
fltIngrad += VALUE_4(tenOutgrad, intN, intC, intSouthwestY, intSouthwestX) * fltSouthwest;
}
if ((intSoutheastX >= 0) && (intSoutheastX < SIZE_3(tenOutgrad)) && (intSoutheastY >= 0) && (intSoutheastY < SIZE_2(tenOutgrad))) {
fltIngrad += VALUE_4(tenOutgrad, intN, intC, intSoutheastY, intSoutheastX) * fltSoutheast;
}
tenIngrad[intIndex] = fltIngrad;
} }
""",
{
"tenIn": tenIn,
"tenFlow": tenFlow,
"tenOutgrad": tenOutgrad,
"tenIngrad": tenIngrad,
"tenFlowgrad": tenFlowgrad,
},
)
)(
grid=tuple([int((tenIngrad.nelement() + 512 - 1) / 512), 1, 1]),
block=tuple([512, 1, 1]),
args=[
cuda_int32(tenIngrad.nelement()),
tenIn.data_ptr(),
tenFlow.data_ptr(),
tenOutgrad.data_ptr(),
tenIngrad.data_ptr(),
None,
],
stream=collections.namedtuple("Stream", "ptr")(
torch.cuda.current_stream().cuda_stream
),
)
# end
if tenFlowgrad is not None:
cuda_launch(
cuda_kernel(
"softsplat_flowgrad",
"""
extern "C" __global__ void __launch_bounds__(512) softsplat_flowgrad(
const int n,
const {{type}}* __restrict__ tenIn,
const {{type}}* __restrict__ tenFlow,
const {{type}}* __restrict__ tenOutgrad,
{{type}}* __restrict__ tenIngrad,
{{type}}* __restrict__ tenFlowgrad
) { for (int intIndex = (blockIdx.x * blockDim.x) + threadIdx.x; intIndex < n; intIndex += blockDim.x * gridDim.x) {
const int intN = ( intIndex / SIZE_3(tenFlowgrad) / SIZE_2(tenFlowgrad) / SIZE_1(tenFlowgrad) ) % SIZE_0(tenFlowgrad);
const int intC = ( intIndex / SIZE_3(tenFlowgrad) / SIZE_2(tenFlowgrad) ) % SIZE_1(tenFlowgrad);
const int intY = ( intIndex / SIZE_3(tenFlowgrad) ) % SIZE_2(tenFlowgrad);
const int intX = ( intIndex ) % SIZE_3(tenFlowgrad);
assert(SIZE_1(tenFlow) == 2);
{{type}} fltFlowgrad = 0.0f;
{{type}} fltX = ({{type}}) (intX) + VALUE_4(tenFlow, intN, 0, intY, intX);
{{type}} fltY = ({{type}}) (intY) + VALUE_4(tenFlow, intN, 1, intY, intX);
if (isfinite(fltX) == false) { return; }
if (isfinite(fltY) == false) { return; }
int intNorthwestX = (int) (floor(fltX));
int intNorthwestY = (int) (floor(fltY));
int intNortheastX = intNorthwestX + 1;
int intNortheastY = intNorthwestY;
int intSouthwestX = intNorthwestX;
int intSouthwestY = intNorthwestY + 1;
int intSoutheastX = intNorthwestX + 1;
int intSoutheastY = intNorthwestY + 1;
{{type}} fltNorthwest = 0.0f;
{{type}} fltNortheast = 0.0f;
{{type}} fltSouthwest = 0.0f;
{{type}} fltSoutheast = 0.0f;
if (intC == 0) {
fltNorthwest = (({{type}}) (-1.0f)) * (({{type}}) (intSoutheastY) - fltY);
fltNortheast = (({{type}}) (+1.0f)) * (({{type}}) (intSouthwestY) - fltY);
fltSouthwest = (({{type}}) (-1.0f)) * (fltY - ({{type}}) (intNortheastY));
fltSoutheast = (({{type}}) (+1.0f)) * (fltY - ({{type}}) (intNorthwestY));
} else if (intC == 1) {
fltNorthwest = (({{type}}) (intSoutheastX) - fltX) * (({{type}}) (-1.0f));
fltNortheast = (fltX - ({{type}}) (intSouthwestX)) * (({{type}}) (-1.0f));
fltSouthwest = (({{type}}) (intNortheastX) - fltX) * (({{type}}) (+1.0f));
fltSoutheast = (fltX - ({{type}}) (intNorthwestX)) * (({{type}}) (+1.0f));
}
for (int intChannel = 0; intChannel < SIZE_1(tenOutgrad); intChannel += 1) {
{{type}} fltIn = VALUE_4(tenIn, intN, intChannel, intY, intX);
if ((intNorthwestX >= 0) && (intNorthwestX < SIZE_3(tenOutgrad)) && (intNorthwestY >= 0) && (intNorthwestY < SIZE_2(tenOutgrad))) {
fltFlowgrad += VALUE_4(tenOutgrad, intN, intChannel, intNorthwestY, intNorthwestX) * fltIn * fltNorthwest;
}
if ((intNortheastX >= 0) && (intNortheastX < SIZE_3(tenOutgrad)) && (intNortheastY >= 0) && (intNortheastY < SIZE_2(tenOutgrad))) {
fltFlowgrad += VALUE_4(tenOutgrad, intN, intChannel, intNortheastY, intNortheastX) * fltIn * fltNortheast;
}
if ((intSouthwestX >= 0) && (intSouthwestX < SIZE_3(tenOutgrad)) && (intSouthwestY >= 0) && (intSouthwestY < SIZE_2(tenOutgrad))) {
fltFlowgrad += VALUE_4(tenOutgrad, intN, intChannel, intSouthwestY, intSouthwestX) * fltIn * fltSouthwest;
}
if ((intSoutheastX >= 0) && (intSoutheastX < SIZE_3(tenOutgrad)) && (intSoutheastY >= 0) && (intSoutheastY < SIZE_2(tenOutgrad))) {
fltFlowgrad += VALUE_4(tenOutgrad, intN, intChannel, intSoutheastY, intSoutheastX) * fltIn * fltSoutheast;
}
}
tenFlowgrad[intIndex] = fltFlowgrad;
} }
""",
{
"tenIn": tenIn,
"tenFlow": tenFlow,
"tenOutgrad": tenOutgrad,
"tenIngrad": tenIngrad,
"tenFlowgrad": tenFlowgrad,
},
)
)(
grid=tuple([int((tenFlowgrad.nelement() + 512 - 1) / 512), 1, 1]),
block=tuple([512, 1, 1]),
args=[
cuda_int32(tenFlowgrad.nelement()),
tenIn.data_ptr(),
tenFlow.data_ptr(),
tenOutgrad.data_ptr(),
None,
tenFlowgrad.data_ptr(),
],
stream=collections.namedtuple("Stream", "ptr")(
torch.cuda.current_stream().cuda_stream
),
)
# end
return tenIngrad, tenFlowgrad
# end
# end
def FunctionSoftsplat(tenInput, tenFlow, tenMetric, strType):
assert tenMetric is None or tenMetric.shape[1] == 1
assert strType in ["summation", "average", "linear", "softmax"]
if strType == "average":
tenInput = torch.cat(
[
tenInput,
tenInput.new_ones(
tenInput.shape[0], 1, tenInput.shape[2], tenInput.shape[3]
),
],
1,
)
elif strType == "linear":
tenInput = torch.cat([tenInput * tenMetric, tenMetric], 1)
elif strType == "softmax":
tenInput = torch.cat([tenInput * tenMetric.exp(), tenMetric.exp()], 1)
tenOutput = _FunctionSoftsplat.apply(tenInput, tenFlow)
if strType != "summation":
tenNormalize = tenOutput[:, -1:, :, :]
tenNormalize[tenNormalize == 0.0] = 1.0
tenOutput = tenOutput[:, :-1, :, :] / tenNormalize
return tenOutput
| Python |
2D | kirchhausenlab/Cryosamba | core/utils/__init__.py | .py | 0 | 0 | null | Python |
2D | kirchhausenlab/Cryosamba | core/utils/torch_utils.py | .py | 5,030 | 177 | import os
import torch
import torch.distributed as dist
import numpy as np
import random
from core.utils.utils import make_dir, load_json, save_json
### DDP utils
def sync_nodes(is_ddp):
if is_ddp:
dist.barrier()
else:
pass
def cleanup(is_ddp):
if is_ddp:
dist.destroy_process_group()
else:
pass
def get_node_count():
world_size = os.environ.get("WORLD_SIZE", default=1)
return int(world_size)
def set_global_seed(seed, rank):
if seed != -1:
torch.manual_seed(seed + rank)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def setup_DDP(seed=-1):
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
world_size = get_node_count()
if world_size > 1:
dist.init_process_group("nccl")
rank = dist.get_rank()
device = rank % torch.cuda.device_count()
else:
rank, device = 0, 0
set_global_seed(seed, rank)
torch.cuda.set_device(rank)
return world_size, rank, device
### Optimizer utils
def get_optimizer(model, args):
return torch.optim.AdamW(
model.parameters(),
args.lr,
betas=args.betas,
eps=args.epsilon,
weight_decay=args.weight_decay,
)
def get_lr(optimizer):
if not optimizer.param_groups:
raise ValueError("Optimizer does not have any parameter groups")
lr = optimizer.param_groups[0]["lr"]
return lr
class CombinedScheduler(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, warmup_steps, lr_decay, last_iter=-1):
self.warmup_steps = warmup_steps
self.lr_decay = lr_decay
super().__init__(optimizer, last_epoch=last_iter)
def get_lr(self):
if self._step_count < self.warmup_steps:
alpha = float(self._step_count) / self.warmup_steps
scale_factor = (1 / self.warmup_steps) * (1 - alpha) + alpha
else:
scale_factor = self.lr_decay ** (self._step_count - self.warmup_steps)
return [base_lr * scale_factor for base_lr in self.base_lrs]
def get_scheduler(optimizer, warmup_steps, lr_decay, last_iter=-1):
return CombinedScheduler(optimizer, warmup_steps, lr_decay, last_iter)
# Checkpoint utils
def count_model_params(model):
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return "{:,.0f}".format(total_params), "{:,.0f}".format(trainable_params)
def adjust_keys_for_compiled_model(loaded_state_dict):
"""Prefixes all keys with '_orig_mod.' to fit the structure of a compiled model."""
return {f"_orig_mod.{k}": v for k, v in loaded_state_dict.items()}
def state_dict_remove_prefix(state_dict, prefix):
new_state_dict = {}
for k, v in state_dict.items():
name = k
if k.startswith(prefix):
name = name[len(prefix) :]
new_state_dict[name] = v
return new_state_dict
def state_dict_add_prefix(state_dict, prefix):
new_state_dict = {}
for k, v in state_dict.items():
name = k
if not k.startswith(prefix):
name = prefix + name
new_state_dict[name] = v
return new_state_dict
def fix_state_dict(state_dict, is_ddp, compile):
state_dict = state_dict_remove_prefix(state_dict, "module.")
state_dict = state_dict_remove_prefix(state_dict, "_orig_mod.")
state_dict = state_dict_remove_prefix(state_dict, "module._orig_mod.")
if is_ddp and compile:
state_dict = state_dict_add_prefix(state_dict, "module._orig_mod.")
elif is_ddp and not compile:
state_dict = state_dict_add_prefix(state_dict, "module.")
elif not is_ddp and compile:
state_dict = state_dict_add_prefix(state_dict, "_orig_mod.")
return state_dict
def save_ckpt(model, optimizer, scheduler, iter, path):
ckpt = {
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"iter": iter,
}
torch.save(ckpt, path)
def load_ckpt(
path, model=None, optimizer=None, scheduler=None, is_ddp=False, compile=False
):
if os.path.exists(path):
ckpt = torch.load(path, weights_only=False)
if model is not None:
model.load_state_dict(
fix_state_dict(ckpt["model_state_dict"], is_ddp, compile), strict=True
)
if optimizer is not None:
optimizer.load_state_dict(ckpt["optimizer_state_dict"])
if scheduler is not None:
scheduler.load_state_dict(ckpt["scheduler_state_dict"])
start_iter = ckpt["iter"] + 1
else:
start_iter = 0
return model, optimizer, scheduler, start_iter
| Python |
2D | kirchhausenlab/Cryosamba | core/utils/utils.py | .py | 3,313 | 137 | import os, glob
from distutils.util import strtobool
from easydict import EasyDict
import sys
import json
import shutil
from loguru import logger
from torch.utils.tensorboard import SummaryWriter
def listify(x):
return x if isinstance(x, list) else [x]
### File utils
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def remove_file(path):
if os.path.exists(path):
os.remove(path)
def load_json(path):
with open(path) as f:
cfg = EasyDict(json.load(f))
return cfg
def save_json(path, cfg):
with open(path, "w") as f:
json.dump(cfg, f, indent=4)
### Loguru utils
def logger_info(rank, message):
if rank == 0:
logger.info(message)
else:
pass
def console_filter(record):
return record["extra"].get("to_console", True)
def set_logger(save_dir):
logger_path = os.path.join(save_dir, "runtime.log")
logger_format = "<green>{time:YYYY/MM/DD HH:mm:ss!UTC}</green> | <level>{level}</level> | <level>{message}</level>"
logger.remove()
logger.add(sys.stdout, format=logger_format, filter=console_filter)
logger.add(logger_path, format=logger_format)
### Tensorboard utils
def set_writer(save_dir, layout):
writer = SummaryWriter(save_dir)
writer.add_custom_scalars(layout)
return writer
def set_writer_layout_train(max_frame_gap):
train_loss_labels = [f"train_loss/gap {t}" for t in range(1, max_frame_gap + 1)]
layout = {
"Plots": {
"train_loss": ["Multiline", train_loss_labels],
"val_loss": ["Multiline", ["val_loss/val"]],
"learning_rate": ["Multiline", ["learning_rate/lr"]],
},
}
return layout
def set_writer_train(cfg):
layout = set_writer_layout_train(cfg.train_data.max_frame_gap)
writer = set_writer(cfg.train_dir, layout)
return writer
### Run utils
def prompt(query):
sys.stdout.write("%s [y/n]:" % query)
val = input()
try:
ret = strtobool(val)
except ValueError:
sys.stdout("please answer with y/n")
return prompt(query)
return ret
def setup_run(cfg, mode):
save_dir = cfg.train_dir if mode == "training" else cfg.inference_dir
can_resume_run = (
os.path.exists(os.path.join(save_dir, "last.pt"))
if mode == "training"
else True
)
new_run = True
if os.path.exists(save_dir):
while True:
if (
prompt(
f"Dir {save_dir} already exists. Would you like to overwrite it?"
)
== True
):
shutil.rmtree(save_dir)
elif can_resume_run:
if prompt(f"Would you like to resume the existing {mode}?") == True:
message = f"Resuming {mode} from {save_dir}"
new_run = False
else:
print("Understandable, have a nice day.")
sys.exit()
else:
print("No checkpoints found.")
sys.exit()
break
if new_run:
message = f"Starting new {mode} in {save_dir}"
make_dir(save_dir)
save_json(os.path.join(save_dir, "config.json"), cfg)
set_logger(save_dir)
logger.info(message)
| Python |
2D | kirchhausenlab/Cryosamba | core/utils/nn_utils.py | .py | 4,401 | 163 | import torch
import torch.nn as nn
import torch.nn.functional as F
from core.utils import softsplat
def backwarp(tenInput, tenFlow):
tenHor = (
torch.linspace(
-1.0 + (1.0 / tenFlow.shape[3]),
1.0 - (1.0 / tenFlow.shape[3]),
tenFlow.shape[3],
device=tenFlow.device,
)
.view(1, 1, 1, -1)
.expand(-1, -1, tenFlow.shape[2], -1)
)
tenVer = (
torch.linspace(
-1.0 + (1.0 / tenFlow.shape[2]),
1.0 - (1.0 / tenFlow.shape[2]),
tenFlow.shape[2],
device=tenFlow.device,
)
.view(1, 1, -1, 1)
.expand(-1, -1, -1, tenFlow.shape[3])
)
backwarp_tenGrid = torch.cat([tenHor, tenVer], 1)
backwarp_tenPartial = tenFlow.new_ones(
[tenFlow.shape[0], 1, tenFlow.shape[2], tenFlow.shape[3]]
)
tenFlow = torch.cat(
[
tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0),
tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0),
],
1,
)
tenInput = torch.cat([tenInput, backwarp_tenPartial], 1)
tenOutput = F.grid_sample(
input=tenInput,
grid=(backwarp_tenGrid + tenFlow).permute(0, 2, 3, 1),
mode="bilinear",
padding_mode="zeros",
align_corners=False,
)
tenMask = tenOutput[:, -1:, :, :]
tenMask[tenMask > 0.999] = 1.0
tenMask[tenMask < 1.0] = 0.0
return tenOutput[:, :-1, :, :] * tenMask
def warp_fn(tenInput, tenFlow, tenMetric=None, strType="average"):
return softsplat.FunctionSoftsplat(
tenInput=tenInput, tenFlow=tenFlow, tenMetric=tenMetric, strType=strType
)
class ConvBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding_mode="zeros",
bias=True,
act="prelu",
):
super().__init__()
if kernel_size % 2 == 0:
stride = kernel_size
padding = 0
else:
stride = 1
padding = (kernel_size - 1) // 2
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
padding_mode=padding_mode,
bias=bias,
)
if act == "prelu":
self.act = nn.PReLU(out_channels)
elif act == "lrelu":
self.act = nn.LeakyReLU(inplace=False, negative_slope=0.1)
else:
self.act = nn.Identity()
def forward(self, x):
x = self.conv(x)
x = self.act(x)
return x
def conv2(in_planes, out_planes, kernel_size=3, stride=2, padding_mode="zeros"):
return nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=stride, padding_mode=padding_mode),
ConvBlock(
out_planes, out_planes, kernel_size=kernel_size, padding_mode=padding_mode
),
)
def conv4(in_planes, out_planes, kernel_size=3, stride=2, padding_mode="zeros"):
return nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=stride, padding_mode=padding_mode),
*[
ConvBlock(
out_planes,
out_planes,
kernel_size=kernel_size,
padding_mode=padding_mode,
)
for _ in range(3)
]
)
def deconv(in_planes, out_planes, kernel_size=3, stride=2, padding_mode="zeros"):
return nn.Sequential(
nn.Upsample(scale_factor=stride, mode="bilinear", align_corners=False),
ConvBlock(
in_planes,
out_planes,
kernel_size=kernel_size,
padding_mode=padding_mode,
bias=False,
),
)
def deconv3(in_planes, out_planes, kernel_size=3, stride=2, padding_mode="zeros"):
return nn.Sequential(
nn.Upsample(scale_factor=stride, mode="bilinear", align_corners=False),
ConvBlock(
in_planes,
out_planes,
kernel_size=kernel_size,
padding_mode=padding_mode,
bias=False,
),
ConvBlock(
out_planes, out_planes, kernel_size=kernel_size, padding_mode=padding_mode
),
ConvBlock(
out_planes, out_planes, kernel_size=kernel_size, padding_mode=padding_mode
),
)
| Python |
2D | kirchhausenlab/Cryosamba | core/utils/data_utils.py | .py | 5,816 | 205 | import glob
import os
import mrcfile
import numpy as np
import tifffile as tif
import torch
import warnings
from core.utils.utils import make_dir
def get_overlap_pad(patch_overlap, device):
overlap_pad = patch_overlap
overlap_pad = [
[0, 0],
[overlap_pad[0] // 2, overlap_pad[0] // 2],
[overlap_pad[1] // 2, overlap_pad[1] // 2],
]
overlap_pad = torch.tensor(overlap_pad, device=device).unsqueeze(0).int()
return overlap_pad
def augment_dataset(imgs):
if torch.rand(1, device=imgs.device) < 0.5:
imgs = torch.flip(imgs, [-1])
if torch.rand(1, device=imgs.device) < 0.5:
imgs = torch.flip(imgs, [-2])
if torch.rand(1, device=imgs.device) < 0.5:
imgs = torch.flip(imgs, [-3])
return imgs
def unpad3D(array, pad):
return array[
...,
pad[0][0] : -pad[0][1] or None,
pad[1][0] : -pad[1][1] or None,
pad[2][0] : -pad[2][1] or None,
]
def normalize_imgs(imgs, params):
imgs = (imgs - params["min"]) / (params["max"] - params["min"])
imgs = 2 * imgs - 1
return imgs
def denormalize_imgs(imgs, params):
imgs = (imgs + 1) / 2
imgs = imgs * (params["max"] - params["min"]) + params["min"]
return imgs
def data_extension_to_format(extension):
if extension == ".tif":
return "tif_file"
elif extension == ".mrc":
return "mrc_file"
elif extension == ".rec":
return "rec_file"
else:
raise NotImplementedError(
f"File extension {extension} is not currently supported"
)
def data_format_to_extension(format):
if format == "tif_file" or format == "tif_sequence":
return ".tif"
elif format == "mrc_file":
return ".mrc"
elif format == "rec_file":
return ".rec"
else:
raise NotImplementedError(f"Data format {format} is not currently supported")
def get_data_format(path):
if os.path.isfile(path):
extension = os.path.splitext(path)[1]
return data_extension_to_format(extension)
elif os.path.isdir(path):
files = glob.glob(os.path.join(path, "*.tif"))
if len(files) > 0:
return "tif_sequence"
else:
raise NotImplementedError(
f"Only sequences of tif files are currently supported"
)
else:
raise ValueError(f"Path {path} is invalid")
class Virtual3DStack:
def __init__(self, path):
filelist = sorted(glob.glob(os.path.join(path, "*.tif")))
self.slices = [tif.TiffFile(f) for f in filelist]
def __getitem__(self, index):
if isinstance(index, tuple):
z_slice, y_slice, x_slice = index
return np.stack(
[
self.slices[z].asarray(out="memmap")[y_slice, x_slice]
for z in range(*z_slice.indices(len(self.slices)))
]
)
else:
return self.slices[index].asarray(out="memmap")
@property
def shape(self):
z = len(self.slices)
y, x = self.slices[0].asarray(out="memmap").shape
return (z, y, x)
@property
def dtype(self):
return self.slices[0].asarray(out="memmap").dtype
def min(self):
return min([slice.asarray(out="memmap").min() for slice in self.slices])
def max(self):
return max([slice.asarray(out="memmap").max() for slice in self.slices])
def mean(self):
return sum([slice.asarray(out="memmap").mean() for slice in self.slices]) / len(
self.slices
)
def memmap_data(path, data_format):
extra_params = None
if data_format == "tif_file":
data = tif.memmap(path)
elif data_format == "mrc_file" or data_format == "rec_file":
with warnings.catch_warnings(record=True) as w:
memmap = mrcfile.mmap(path, mode="r", permissive=False)
data = memmap.data
extra_params = {"voxel_size": memmap.voxel_size.copy()}
elif data_format == "tif_sequence":
data = Virtual3DStack(path)
return data, extra_params
def get_metadata(data, data_format, extra_params=None):
metadata = {
"format": data_format,
"shape": data.shape,
"dtype": data.dtype,
"mean": data.mean().astype("float"),
"min": data.min().astype("float"),
"max": data.max().astype("float"),
}
if extra_params is not None:
for key, value in extra_params.items():
metadata[key] = value
return metadata
def get_data(path):
data_format = get_data_format(path)
data, extra_params = memmap_data(path, data_format)
metadata = get_metadata(data, data_format, extra_params)
return data, metadata
def save_data(path, name, data, metadata, output_format="same"):
if output_format == "same":
output_format = metadata["format"]
extension = data_format_to_extension(output_format)
if output_format == "tif_sequence":
zfill = len(str(data.shape[0]))
save_dir = os.path.join(path, name)
make_dir(save_dir)
for i in range(data.shape[0]):
slice_2d = data[i, :, :]
save_path = os.path.join(
save_dir, f"slice_{str(i).zfill(zfill)}" + extension
)
tif.imwrite(save_path, slice_2d)
else:
save_path = os.path.join(path, name + extension)
if output_format == "tif_file":
tif.imwrite(save_path, data)
elif output_format == "mrc_file" or output_format == "rec_file":
with mrcfile.new(save_path, overwrite=True) as mrc:
mrc.set_data(data)
mrc.voxel_size = metadata["voxel_size"].copy()
mrc.update_header_from_data()
mrc.update_header_stats()
| Python |
2D | kirchhausenlab/Cryosamba | automate/cryosamba_setup.py | .py | 4,991 | 143 | import logging
import os
import subprocess
import sys
import streamlit as st
from training_setup import handle_exceptions
from logging_config import logger
def run_command(command, shell=True):
process = subprocess.Popen(
command,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
output, error = process.communicate()
if process.returncode != 0:
st.error(f"Error executing command: {command}\nError: {error}")
logger.error(f"Error executing command: {command}\nError: {error}")
return output, error
def is_conda_installed() -> bool:
try:
subprocess.run(["conda", "--version"], capture_output=True, check=True)
return True
except (subprocess.CalledProcessError, FileNotFoundError):
return False
def install_conda():
st.write("Conda is not installed. Installing conda...")
if sys.platform.startswith("linux"):
run_command(
"wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh"
)
run_command("chmod +x Miniconda3-latest-Linux-x86_64.sh")
run_command("bash Miniconda3-latest-Linux-x86_64.sh -b -p $HOME/miniconda3")
run_command("$HOME/miniconda3/bin/conda init bash")
elif sys.platform == "darwin":
run_command(
"wget https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh"
)
run_command("chmod +x Miniconda3-latest-MacOSX-x86_64.sh")
run_command("bash Miniconda3-latest-MacOSX-x86_64.sh -b -p $HOME/miniconda3")
run_command("$HOME/miniconda3/bin/conda init bash")
elif sys.platform == "win32":
run_command(
"powershell -Command \"(New-Object Net.WebClient).DownloadFile('https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe', 'Miniconda3-latest-Windows-x86_64.exe')\""
)
run_command(
'start /wait "" Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /AddToPath=1 /RegisterPython=0 /S /D=%UserProfile%\\Miniconda3'
)
else:
st.error("Unsupported operating system")
return False
st.write(
"Conda installed. Please restart the application for changes to take effect."
)
return True
@handle_exceptions
def setup_conda():
st.subheader("Conda Installation")
if is_conda_installed():
st.write("Conda is already installed.")
return True
else:
if install_conda():
st.write("Conda installation completed successfully.")
st.write("Please restart the application for changes to take effect.")
return True
else:
st.error("Failed to install Conda.")
return False
@handle_exceptions
def setup_environment_for_cryosamba() -> None:
st.title("Cryosamba Setup Interface")
st.write("Welcome to Cryosamba Setup Interface!")
if not is_conda_installed():
st.warning(
"Conda is not installed. You need to install Conda before proceeding."
)
if st.button("Install Conda"):
if setup_conda():
st.success(
"Conda installed successfully. Please restart the application."
)
else:
st.error(
"Failed to install Conda. Please try again or install manually."
)
else:
st.success("Conda is installed.")
if st.button("Setup Environment"):
env_name = st.text_input("Enter environment name", "cryosamba")
setup_environment(env_name)
if st.button("Export Environment"):
env_name = st.text_input("Enter environment name to export", "cryosamba")
export_env(env_name)
def setup_environment(env_name):
st.subheader(f"Setting up Conda Environment: {env_name}")
if is_env_active(env_name):
st.write(f"Environment '{env_name}' exists.")
else:
st.write(f"Creating conda environment: {env_name}")
run_command(f"conda create --name {env_name} python=3.11 -y")
st.write(f"Activating conda environment: {env_name}")
run_command(
f"conda activate {env_name} && pip3 install torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118"
)
run_command(
f"conda activate {env_name} && pip install tifffile mrcfile easydict loguru tensorboard streamlit pipreqs cupy-cuda11x"
)
st.write("Environment setup complete.")
def is_env_active(env_name) -> bool:
output, _ = run_command("conda env list")
return f"{env_name}" in output
def export_env(env_name):
st.subheader("Exporting Conda Environment")
run_command(f"conda env export -n {env_name} > environment.yml")
run_command("mv environment.yml ../")
st.write("Environment exported and moved to root directory.")
if __name__ == "__main__":
setup_environment_for_cryosamba()
| Python |
2D | kirchhausenlab/Cryosamba | automate/file_selector.py | .py | 1,660 | 54 | import os
import streamlit as st
from logging_config import logger
def list_directories_in_directory(directory):
try:
with os.scandir(directory) as entries:
dirs = [entry.name for entry in entries if entry.is_dir()]
return dirs
except PermissionError:
st.error("Permission denied to access this directory.")
return []
def get_dir():
# Set default directory to the current folder
if "current_directory" not in st.session_state:
st.session_state.current_directory = os.getcwd()
st.title("Directory Navigator")
# Display the current directory
st.write(f"Current Directory: {st.session_state.current_directory}")
selected_subdir = st.selectbox(
"Subdirectories:",
[""] + list_directories_in_directory(st.session_state.current_directory),
)
# Buttons to navigate up and down the directory levels
col1, col2 = st.columns(2)
with col1:
if st.button("Go Up"):
st.session_state.current_directory = os.path.dirname(
st.session_state.current_directory
)
with col2:
if st.button("Go Down") and selected_subdir:
st.session_state.current_directory = os.path.join(
st.session_state.current_directory, selected_subdir
)
if st.button("Select Path to be displayed below by hitting submit"):
st.session_state.current_directory = os.path.join(
st.session_state.current_directory, selected_subdir
)
# Display the selected directory
st.write("Selected Directory:")
st.write(st.session_state.current_directory)
| Python |
2D | kirchhausenlab/Cryosamba | automate/run_inference.py | .py | 3,923 | 101 | import logging
import os
import subprocess
from functools import wraps
from typing import List
import streamlit as st
from file_selector import get_dir, list_directories_in_directory
from training_setup import handle_exceptions
from logging_config import logger
@handle_exceptions
def select_gpus() -> List[str]:
st.text("The following GPUs are not in use, select ones you one want to use! ")
with st.echo():
command = "nvidia-smi && nvidia-smi --query-gpu=index,utilization.gpu,memory.free,memory.total,memory.used --format=csv"
res = subprocess.run(command, shell=True, capture_output=True, text=True)
st.code(res.stdout)
options = st.multiselect(
"Select the GPUs here",
["0", "1", "2", "3", "4", "5", "6", "7"],
["0", "1", "2"],
)
st.write("You selected:", options)
# print(type(options), options)
return options
@handle_exceptions
def run_experiment(gpus: str, folder_path: str) -> None:
print(f"{folder_path}")
cmd = f"CUDA_VISIBLE_DEVICES={gpus} torchrun --standalone --nproc_per_node=$(echo {gpus} | tr ',' '\n' | wc -l) ../inference.py --config ../runs/{folder_path}/inference_config.json"
st.text(f"Do you want to run the command: {cmd}?")
selection = st.radio("Type y/n: ", ["y", "n"], index=None)
if selection == "n":
st.write("cancelled")
elif selection == "y":
st.write(
"Dear Reader, copy this command onto your terminal or powershell to train the model, and follow the prompts!"
)
st.write(
"Please open up a new terminal on your machine and navigate to the cryosamba/automate folder. Then run this command"
)
st.code(cmd)
st.code(
f"SAMPLE COMMAND LOOKS LIKE: \n CUDA_VISIBLE_DEVICES=0,1 torchrun --standalone --nproc_per_node=2 train.py --config configs/your_config_train.json \n: "
)
@handle_exceptions
def select_experiment() -> None:
get_dir()
st.write("Please enter the experiment you want to run: ")
input_name = st.text_input("Experiment Name", "")
base_path = f"../runs/{input_name}"
if st.button("Check folder"):
if os.path.exists(base_path):
st.success(f"Folder {base_path} found")
st.session_state.folder_found = True
st.session_state.input_name = input_name
else:
st.error(f"Folder {base_path} not found")
st.session_state.folder = False
@handle_exceptions
def select_experiment_and_run() -> None:
st.header("Welcome to the CryoSamba Training Runner")
st.write(
"Please note that you *need a GPU to run cryosamba, if you cannot see a graph or a table or GPUs **AFTER YOU CHOOSE YOUR Experiment**, your machine does not support cryosamba.* \
If you want to run the training on a different machine, please follow the instructions below "
)
st.code(
"# To copy cryosamba, make a zip file of cryosamba and run the following. \n scp cryosamba.zip user_name@remote_server.edu:/path/to/store \n ssh user_name@remote_server.edu && cd /path/to/store \n unzip cryosamba.zip \n \
\n cd cryosamba/automate \n pip install streamlit \n streamlit run main.py"
)
options = select_gpus()
st.write(
"If the table shows a list of 0s, you have compatible hardware but NO GPUs. Please connect to a machine that has GPUs. Instructions to ssh into a machine for cryosamba above:"
)
if "folder_found" not in st.session_state:
select_experiment()
elif st.session_state.folder_found:
st.write("We will be running training here:")
if not options or len(options) == 0:
st.error("you did not select any options!")
st.stop()
run_experiment(",".join(options), st.session_state.input_name)
# def main():
# select_experiment_and_run()
# if __name__=="__main__":
# main()
| Python |
2D | kirchhausenlab/Cryosamba | automate/inference_setup.py | .py | 9,424 | 256 | import json
import logging
import os
from functools import wraps
from random import randint
import streamlit as st
from file_selector import get_dir, list_directories_in_directory
from training_setup import handle_exceptions
from logging_config import logger
def folder_exists(folder_path):
"""Check if the given folder path exists."""
return os.path.exists(folder_path)
@handle_exceptions
def make_folder(is_inference=False):
get_dir()
if is_inference:
st.subheader("Inference Folder Check")
st.write("Enter the name for your experiment:")
input_name = st.text_input("Experiment Name", "")
base_path = f"../runs/{input_name}/inference"
else:
st.subheader("Experiment Folder Check")
st.write("Enter the name for your experiment:")
input_name = st.text_input("Experiment Name", "")
base_path = f"../runs/{input_name}/train"
if st.button("Check Folder"):
if folder_exists(base_path):
st.success(f"Folder '{base_path}' found.")
st.session_state.folder_found = True
st.session_state.DEFAULT_NAME = input_name
st.session_state.step = "mandatory_params"
st.session_state.inference_dir = base_path
else:
st.error(f"Folder '{base_path}' not found.")
st.session_state.folder_found = False
@handle_exceptions
def generate_mandatory_params():
get_dir()
st.subheader("Generate JSON Config")
st.write("Enter the mandatory details: ")
with st.container():
st.markdown("**Training Directory Path**")
train_dir = st.text_input(
"train_dir", "/nfs/datasync4/inacio/data/denoising/cryosamba/rota/train/"
)
st.markdown(
"_The name of the folder where the checkpoints were saved (exp-name/train) in your training run._"
)
st.markdown("**Data Path**")
data_path = st.text_input(
"data_path",
"/nfs/datasync4/inacio/data/raw_data/cryo/novareconstructions/rotacell_grid1_TS09_ctf_6xBin.rec",
)
st.markdown(
"_Filename (for single 3D file) or folder (for 2D sequence) where the raw data is located._"
)
st.markdown("**Inference Directory Path**")
inference_dir = st.text_input(
"inference_dir",
st.session_state.get(
"inference_dir",
"/nfs/datasync4/inacio/data/denoising/cryosamba/rota/inference/",
),
)
st.markdown(
"_The name of the folder where the denoised stack will be saved (exp-name/inference)._"
)
st.markdown("**Maximum Frame Gap**")
max_frame_gap = st.slider(
"inference_data.max_frame_gap", min_value=1, max_value=40, value=12
)
st.markdown(
"_The maximum frame gap used for inference (usually two times the value used for training). Explained in the manuscript._"
)
if st.button("Next: Add Additional Parameters"):
st.session_state.mandatory_params = {
"train_dir": train_dir,
"data_path": data_path,
"inference_dir": inference_dir,
"max_frame_gap": max_frame_gap,
}
st.session_state.step = "additional_params"
elif st.button("Submit"):
st.session_state.mandatory_params = {
"train_dir": train_dir,
"data_path": data_path,
"inference_dir": inference_dir,
"max_frame_gap": max_frame_gap,
}
st.session_state.step = "generate_config"
@handle_exceptions
def generate_additional_params():
st.subheader("Change Additional Parameters")
st.markdown("_Select the section you want to update:_")
with st.container():
st.button(
"Inference Data Parameters",
on_click=lambda: st.session_state.update(
{"additional_params_section": "inference_data"}
),
)
st.button(
"Inference Parameters",
on_click=lambda: st.session_state.update(
{"additional_params_section": "inference"}
),
)
additional_params_section = st.session_state.get("additional_params_section", "")
if additional_params_section == "inference_data":
st.write("**Inference Data Parameters**")
patch_shape_x = st.slider("Patch Shape X", value=256, step=32, max_value=1024)
patch_shape_y = st.slider("Patch Shape Y", value=256, step=32, max_value=1024)
patch_overlap_x = st.number_input("Patch Overlap X", min_value=16)
patch_overlap_y = st.number_input("Patch Overlap Y", min_value=16)
batch_size = st.number_input("Batch Size", value=32, step=16)
num_workers = st.number_input("Number of Workers", value=4)
st.markdown(
"_X and Y resolution of the patches the model will be trained on. Doesn't need to be square (resolution x = resolution y), but it has to be a multiple of 32._"
)
st.markdown(
"_Number of data points loaded into the GPU at once. Increasing it makes the model train faster (with diminishing returns for large batch size), but requires more GPU memory. Play with it to avoid GPU out of memory errors._"
)
if st.button("Save Inference Data Parameters"):
st.session_state.inference_data_params = {
"patch_shape": [patch_shape_x, patch_shape_y],
"patch_overlap": [patch_overlap_x, patch_overlap_y],
"batch_size": batch_size,
"num_workers": num_workers,
}
st.success("Inference Data Parameters saved")
if additional_params_section == "inference":
st.write("**Inference Parameters**")
output_format = st.radio("Output Format", ["same", "different"])
load_ckpt_name = st.text_input("Load Checkpoint Name", "")
pyr_level = st.number_input("Pyr Level", value=3)
TTA = st.radio("Test-Time Augmentation (TTA)", [True, False])
compile = st.radio("Compile", [True, False])
st.markdown(
"_If true uses test-time augmentation, which makes results slightly better but takes much longer (around 3x) to run._"
)
st.markdown(
"_If true, uses torch.compile for faster training, which is good but takes some minutes to start running the script and it’s somewhat buggy. Recommend using false until you’re comfortable with the code._"
)
if st.button("Save Inference Parameters"):
st.session_state.inference_params = {
"output_format": output_format,
"load_ckpt_name": load_ckpt_name,
"pyr_level": pyr_level,
"TTA": TTA,
"compile": compile,
}
st.success("Inference Parameters saved")
if st.button("Generate Config"):
st.session_state.step = "generate_config"
@handle_exceptions
def generate_config():
DEFAULT_NAME = st.session_state.DEFAULT_NAME
mandatory_params = st.session_state.mandatory_params
# Setting default values
inference_data_defaults = {
"patch_shape": [256, 256],
"patch_overlap": [16, 16],
"batch_size": 32,
"num_workers": 4,
}
inference_defaults = {
"output_format": "same",
"load_ckpt_name": None,
"pyr_level": 3,
"TTA": True,
"mixed_precision": True,
"compile": True,
}
inference_data_params = {
**inference_data_defaults,
**st.session_state.get("inference_data_params", {}),
}
inference_params = {
**inference_defaults,
**st.session_state.get("inference_params", {}),
}
base_config = {
"train_dir": mandatory_params["train_dir"],
"data_path": [mandatory_params["data_path"]],
"inference_dir": mandatory_params["inference_dir"],
"inference_data": {
"max_frame_gap": mandatory_params["max_frame_gap"],
**inference_data_params,
},
"inference": {**inference_params},
}
config_file = f"../runs/{DEFAULT_NAME}/inference_config.json"
with open(config_file, "w") as f:
json.dump(base_config, f, indent=4)
st.success(f"Inference config file generated successfully at {config_file}")
st.session_state.config_generated = True
@handle_exceptions
def setup_inference() -> None:
st.title("Cryosamba Inference Setup Interface")
st.write("Welcome to Cryosamba Inference Setup Interface!")
if "folder_found" not in st.session_state:
make_folder(is_inference=True)
elif st.session_state.folder_found:
step = st.session_state.get("step", "mandatory_params")
if step == "mandatory_params":
generate_mandatory_params()
elif step == "additional_params":
generate_additional_params()
elif step == "generate_config":
generate_config()
else:
st.success(
"Configuration already generated. No further modifications allowed."
)
if st.button("Exit Setup"):
st.stop()
else:
st.error("Please ensure the folder exists before proceeding.")
make_folder(is_inference=True)
if __name__ == "__main__":
setup_inference()
| Python |
2D | kirchhausenlab/Cryosamba | automate/test.py | .py | 5,022 | 132 | import logging
import os
import subprocess
import sys
from functools import wraps
import streamlit as st
from training_setup import handle_exceptions
from logging_config import logger
@handle_exceptions
def is_conda_installed() -> bool:
"""Run a subprocess to see if conda is installled or not"""
try:
subprocess.run(
["conda", "--version"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return True
except FileNotFoundError:
return False
except subprocess.CalledProcessError:
return False
@handle_exceptions
def is_env_active(env_name) -> bool:
"""Use conda env list to check active environments"""
cmd = "conda env list"
result = subprocess.run(cmd, capture_output=True, text=True, shell=True)
return f"{env_name}" in result.stdout
def run_command(command, shell=True):
process = subprocess.Popen(
command,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
output, error = process.communicate()
if process.returncode != 0:
st.error(f"Error executing command: {command}\nError: {error}")
logger.error(f"Error executing command: {command}\nError: {error}")
return output, error
@handle_exceptions
def setup_conda():
st.subheader("Conda Installation")
if is_conda_installed():
st.write("Conda is already installled.")
else:
if sys.platform.startswith("linux") or sys.platform == "darwin":
st.write("Conda is not installed. Installing conda ....")
subprocess.run(
"wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh",
shell=True,
)
subprocess.run("chmod +x Miniconda3-latest-Linux-x86_64.sh", shell=True)
subprocess.run("bash Miniconda3-latest-Linux-x86_64.sh", shell=True)
subprocess.run("export PATH=~/miniconda3/bin:$PATH", shell=True)
subprocess.run("source ~/.bashrc", shell=True)
else:
run_command(
"powershell -Command \"(New-Object Net.WebClient).DownloadFile('https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe', 'Miniconda3-latest-Windows-x86_64.exe')\""
)
run_command(
'start /wait "" Miniconda3-latest-Windows-x86_64.exe /InstallationType=JustMe /AddToPath=1 /RegisterPython=0 /S /D=%UserProfile%\\Miniconda3'
)
@handle_exceptions
def setup_environment(env_name):
st.subheader(f"Setting up Conda Environment: {env_name}")
cmd = f"conda init && conda activate {env_name}"
if is_env_active(env_name):
st.write(f"Environment '{env_name}' exists.")
subprocess.run(cmd, shell=True)
else:
st.write(f"Creating conda environment: {env_name}")
subprocess.run(f"conda create --name {env_name} python=3.11 -y", shell=True)
subprocess.run(cmd, shell=True)
st.success("Environment has been created", icon="✅")
st.success("**please copy the command below in the terminal.**", icon="✅")
st.write(
"Say you downloaded cryosamba in your downloads folder, open a NEW terminal window and run the following commands:"
)
st.code("cd downloads/cryosamba/automate")
cmd = f"conda init && sleep 3 && source ~/.bashrc && conda activate {env_name} && pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 && pip install tifffile mrcfile easydict loguru tensorboard streamlit pipreqs cupy-cuda11x"
st.code(cmd)
@handle_exceptions
def export_env():
st.subheader("Exporting Conda Environment")
subprocess.run("conda env export > environment.yml", shell=True)
subprocess.run("mv environment.yml ../", shell=True)
st.write("Environment exported and moved to root directory.")
@handle_exceptions
def setup_environment_for_cryosamba() -> None:
st.title("Cryosamba Setup Interface")
st.subheader("Welcome to Cryosamba Setup Interface!")
st.write(
"Please take some time to read the instructions and in the case of failures refer to the README for the contact information of relevant parties. *Refer to the video for step by step instructions*"
)
lst = [
"|STEP 1| : **Setup Conda** - if already installed, it shows you that its installed",
"|STEP 2|: **Make an Environment** - Creates an environment and gives you instructions on which commands to copy",
"|STEP 3|: **OPTIONAL, Export the Environment** - for programmers who want to look at installed packages",
]
for elem in lst:
st.markdown(elem)
if st.button("Setup Conda"):
setup_conda()
env_name = st.text_input("Enter environment name", "cryosamba")
if st.button("2) Setup Environment"):
setup_environment(env_name)
if st.button("3) (Optional) Export Environment"):
export_env()
| Python |
2D | kirchhausenlab/Cryosamba | automate/run_training.py | .py | 5,004 | 118 | import logging
import os
import subprocess
import webbrowser
from functools import wraps
from typing import List
import streamlit as st
from file_selector import get_dir, list_directories_in_directory
from training_setup import handle_exceptions
from logging_config import logger
@handle_exceptions
def select_gpus() -> List[str]:
st.text("The following GPUs are not in use, select ones you one want to use! ")
with st.echo():
command = "nvidia-smi && nvidia-smi --query-gpu=index,utilization.gpu,memory.free,memory.total,memory.used --format=csv"
res = subprocess.run(command, shell=True, capture_output=True, text=True)
st.code(res.stdout)
options = st.multiselect(
"Select the GPUs here",
["0", "1", "2", "3", "4", "5", "6", "7"],
["0", "1", "2"],
)
st.write("You selected:", options)
# print(type(options), options)
return options
@handle_exceptions
def run_experiment(gpus: str, folder_path: str) -> None:
print(f"{folder_path}")
cmd = f"CUDA_VISIBLE_DEVICES={gpus} torchrun --standalone --nproc_per_node=$(echo {gpus} | tr ',' '\n' | wc -l) ../train.py --config ../runs/{folder_path}/train_config.json"
st.text(f"Do you want to run the command: {cmd}?")
selection = st.radio("Type y/n: ", ["y", "n"], index=None)
if selection == "n":
st.write("cancelled")
elif selection == "y":
st.write(
"Dear Reader, copy this command onto your terminal or powershell to train the model, and follow the prompts!"
)
st.write(
"Please open up a new terminal on your machine and navigate to the cryosamba/automate folder. Then run this command"
)
st.code(cmd)
st.code(
f"SAMPLE COMMAND LOOKS LIKE: \n CUDA_VISIBLE_DEVICES=0,1 torchrun --standalone --nproc_per_node=2 train.py --config configs/your_config_train.json \n: "
)
st.write(
"If you want a cool way to monitor the losses, try tensorboard, copy paste these commands in a new terminal WHEN the training is \
successfully running. Note there is no output, only checkpoints in the training folder. Outputs are produced only for inferences, and can be \
viewed using Fiji or ImageJ. **Please have the conda environment active for running the training and opening tensorboard"
)
st.markdown(
"""
TensorBoard can be used to monitor the progress of the training losses.
1. Open a terminal window inside a graphical interface (e.g., XDesk).
2. Activate the environment and run:
```bash
tensorboard --logdir /path/to/dir/Cryosamba/runs/exp-name/train
```
3. Open localhost:6006 in the browser (Chrome or firefox)
4. Use the slide under SCALARS to smooth noise plots
"""
)
if st.button("View Training"):
cmd = f"tensorboard --logdir ../runs/{folder_path}/train"
subprocess.Popen(cmd, shell=True)
webbrowser.open("http://localhost:6006")
@handle_exceptions
def select_experiment() -> None:
get_dir()
st.write("Please enter the experiment you want to run: ")
input_name = st.text_input("Experiment Name", "")
base_path = f"../runs/{input_name}"
if st.button("Check folder"):
if os.path.exists(base_path):
st.success(f"Folder {base_path} found")
st.session_state.folder_found = True
st.session_state.input_name = input_name
else:
st.error(f"Folder {base_path} not found")
st.session_state.folder = False
@handle_exceptions
def select_experiment_and_run_training():
st.header("Welcome to the CryoSamba Training Runner")
st.write(
"Please note that you *need a GPU to run cryosamba, if you cannot see a graph or a table or GPUs **AFTER YOU CHOOSE YOUR Experiment**, your machine does not support cryosamba.* \
If you want to run the training on a different machine, please follow the instructions below "
)
st.code(
"# To copy cryosamba, make a zip file of cryosamba and run the following. \n scp cryosamba.zip user_name@remote_server.edu:/path/to/store \n ssh user_name@remote_server.edu && cd /path/to/store \n unzip cryosamba.zip \n \
\n cd cryosamba/automate \n pip install streamlit \n streamlit run main.py"
)
options = select_gpus()
st.write(
"If the table shows a list of 0s, you have compatible hardware but NO GPUs. Please connect to a machine that has GPUs. Instructions to ssh into a machine for cryosamba above:"
)
if "folder_found" not in st.session_state:
select_experiment()
elif st.session_state.folder_found:
st.write("We will be running training here:")
if not options or len(options) == 0:
st.error("you did not select any options!")
st.stop()
run_experiment(",".join(options), st.session_state.input_name)
| Python |
2D | kirchhausenlab/Cryosamba | automate/main.py | .py | 1,747 | 56 | import logging
import os
from test import setup_environment_for_cryosamba
import streamlit as st
from inference_setup import setup_inference
from run_inference import select_experiment_and_run
from run_training import select_experiment_and_run_training
from training_setup import setup_cryosamba_and_training
from logging_config import logger
def main():
st.sidebar.title("Cryosamba Navigation")
app_mode = st.sidebar.selectbox(
"Choose the app mode",
[
"Choose your options!",
"Setup Environment",
"Setup Training",
"Run Training",
"Setup Inference",
"Run Inference",
],
)
if app_mode == "Choose your options!":
st.header(
"Please look at the options from the dropdown to either setup, train or run inferences. CAREFUL, IT WILL ONLY RUN ON A WINDOWS OR A LINUX"
)
elif app_mode == "Setup Environment":
setup_environment_for_cryosamba()
elif app_mode == "Setup Training":
setup_cryosamba_and_training()
elif app_mode == "Run Training":
select_experiment_and_run_training()
elif app_mode == "Setup Inference":
setup_inference()
elif app_mode == "Run Inference":
select_experiment_and_run()
st.sidebar.title("Workflow Overview")
st.sidebar.markdown("## Step-by-Step guide")
st.sidebar.write("1) Setup Environment")
st.sidebar.write("2) Setup Training")
st.sidebar.write("3) Run Training")
st.sidebar.write("4) Setup Inference")
st.sidebar.write("5) Run Inference")
if __name__ == "__main__":
try:
main()
except Exception as e:
logger.exception("An error occurred: %s", str(e))
raise
| Python |
2D | kirchhausenlab/Cryosamba | automate/training_setup.py | .py | 13,235 | 361 | import json
import logging
import os
from functools import wraps
from random import randint
import streamlit as st
from file_selector import get_dir, list_directories_in_directory
from logging_config import logger
def handle_exceptions(input_func):
"""Decorator for handling exceptions"""
@wraps(input_func)
def wrapper(*args, **kwargs):
try:
return input_func(*args, **kwargs)
except Exception as e:
logger.error(
f"Error in {input_func.__name__}: {str(e)}| Code: {input_func.__code__} "
)
st.error(f"An error occurred: {str(e)}")
raise RuntimeError(
f"The function {input_func.__name__} failed with the error {str(e)} | Code: {input_func.__code__}"
)
return wrapper
@handle_exceptions
def make_folder():
get_dir()
st.subheader("Experiment Folder Creation")
st.write("Enter the name for your experiment:")
input_name = st.text_input("Experiment Name", "")
if st.button("Create Experiment Folder"):
if input_name:
DEFAULT_NAME = input_name
else:
DEFAULT_NAME = f"TEST_NAME_EXP-{randint(1, 100)}"
st.write(f"Creating experiment folders for '{DEFAULT_NAME}'...")
try:
base_path = f"../runs/{DEFAULT_NAME}"
os.makedirs(f"{base_path}/train", exist_ok=True)
os.makedirs(f"{base_path}/inference", exist_ok=True)
st.success(f"Experiment folders created successfully.")
st.session_state.DEFAULT_NAME = DEFAULT_NAME
st.session_state.step = "mandatory_params"
except Exception as e:
st.error(f"Error creating experiment folders: {str(e)}")
@handle_exceptions
def generate_mandatory_params():
get_dir()
st.subheader("Generate JSON Config")
st.write("Enter the mandatory details: ")
with st.container():
st.markdown("**Training Directory Path**")
train_dir = st.text_input(
"train_dir", "/nfs/datasync4/inacio/data/denoising/cryosamba/rota/train/"
)
st.markdown(
"_The name of the folder where the checkpoints will be saved (exp-name/train)._"
)
st.markdown("**Data Path**")
data_path = st.text_input(
"data_path",
"/nfs/datasync4/inacio/data/raw_data/cryo/novareconstructions/rotacell_grid1_TS09_ctf_3xBin.rec",
)
st.markdown(
"_Filename (for single 3D file) or folder (for 2D sequence) where the raw data is located. This can be a list, in case you want to train on several volumes at the same time._"
)
st.markdown("**Maximum Frame Gap**")
max_frame_gap = st.slider(
"train_data.max_frame_gap", min_value=1, max_value=20, value=6
)
st.markdown(
"_The maximum frame gap used for training. Explained in the manuscript._"
)
if st.button("Next: Add Additional Parameters"):
st.session_state.mandatory_params = {
"train_dir": train_dir,
"data_path": data_path,
"max_frame_gap": max_frame_gap,
}
st.session_state.step = "additional_params"
elif st.button("Submit"):
st.session_state.mandatory_params = {
"train_dir": train_dir,
"data_path": data_path,
"max_frame_gap": max_frame_gap,
}
st.session_state.step = "generate_config"
@handle_exceptions
def generate_additional_params():
st.subheader("Change Additional Parameters")
st.markdown("_Select the section you want to update:_")
with st.container():
st.button(
"Train Data Parameters",
on_click=lambda: st.session_state.update(
{"additional_params_section": "train_data"}
),
)
st.button(
"Train Parameters",
on_click=lambda: st.session_state.update(
{"additional_params_section": "train"}
),
)
st.button(
"Optimizer Parameters",
on_click=lambda: st.session_state.update(
{"additional_params_section": "optimizer"}
),
)
st.button(
"Biflownet Parameters",
on_click=lambda: st.session_state.update(
{"additional_params_section": "biflownet"}
),
)
st.button(
"Fusionnet Parameters",
on_click=lambda: st.session_state.update(
{"additional_params_section": "fusionnet"}
),
)
additional_params_section = st.session_state.get("additional_params_section", "")
if additional_params_section == "train_data":
st.write("**Train Data Parameters**")
patch_shape_x = st.slider("Patch Shape X", value=256, step=32, max_value=1024)
patch_shape_y = st.slider("Patch Shape Y", value=256, step=32, max_value=1024)
patch_overlap_x = st.number_input("Patch Overlap X", min_value=16)
patch_overlap_y = st.number_input("Patch Overlap Y", min_value=16)
split_ratio = st.number_input("Split Ratio", value=0.95)
batch_size = st.number_input("Batch Size", value=32, step=16)
num_workers = st.number_input("Number of Workers", value=4)
st.markdown(
"_X and Y resolution of the patches the model will be trained on. Doesn't need to be square (resolution x = resolution y), but it has to be a multiple of 32._"
)
st.markdown(
"_Number of data points loaded into the GPU at once. Increasing it makes the model train faster (with diminishing returns for large batch size), but requires more GPU memory. Play with it to avoid GPU out of memory errors._"
)
if st.button("Save Train Data Parameters"):
st.session_state.train_data_params = {
"patch_shape": [patch_shape_x, patch_shape_y],
"patch_overlap": [patch_overlap_x, patch_overlap_y],
"split_ratio": split_ratio,
"batch_size": batch_size,
"num_workers": num_workers,
}
st.success("Train Data Parameters saved")
if additional_params_section == "train":
st.write("**Train Parameters**")
print_freq = st.number_input("Print Frequency", value=100)
save_freq = st.number_input("Save Frequency", value=1000)
val_freq = st.number_input("Val Frequency", value=1000)
num_iters = st.number_input("Number of Iterations", value=200000)
warmup_iters = st.number_input("Warmup Iterations", value=300)
compile = st.radio("Compile", [True, False])
st.markdown(
"_If true, uses torch.compile for faster training, which is good but takes some minutes to start running the script and it’s somewhat buggy. Recommend using false until you’re comfortable with the code._"
)
st.markdown(
"_Length of the training run. The default value (200k) is a very long time, but you can halt the training whenever you feel it’s fine (see Tensorboard below)._"
)
if st.button("Save Train Parameters"):
st.session_state.train_params = {
"print_freq": print_freq,
"save_freq": save_freq,
"val_freq": val_freq,
"num_iters": num_iters,
"warmup_iters": warmup_iters,
"compile": compile,
}
st.success("Train Parameters saved")
if additional_params_section == "optimizer":
st.write("**Optimizer Parameters**")
lr = st.number_input("Learning Rate", value=2e-4, format="%.6f")
lr_decay = st.number_input("Learning Rate Decay", value=0.99995, format="%.8f")
weight_decay = st.number_input("Weight Decay", value=0.0001, format="%.8f")
epsilon = st.number_input("Epsilon", value=1e-8, format="%.10f")
beta1 = st.number_input("Beta 1", value=0.9, format="%.5f")
beta2 = st.number_input("Beta 2", value=0.999, format="%.5f")
if st.button("Save Optimizer Parameters"):
st.session_state.optimizer_params = {
"lr": lr,
"lr_decay": lr_decay,
"weight_decay": weight_decay,
"epsilon": epsilon,
"betas": [beta1, beta2],
}
st.success("Optimizer Parameters saved")
if additional_params_section == "biflownet":
st.write("**Biflownet Parameters**")
pyr_dim = st.number_input("Pyr Dimension", value=24)
pyr_level = st.number_input("Pyr Level", value=3)
corr_radius = st.number_input("Correlation Radius", value=4)
kernel_size = st.number_input("Kernel Size", value=3)
fix_params = st.radio("Fix Params", [True, False])
if st.button("Save Biflownet Parameters"):
st.session_state.biflownet_params = {
"pyr_dim": pyr_dim,
"pyr_level": pyr_level,
"corr_radius": corr_radius,
"kernel_size": kernel_size,
"fix_params": fix_params,
}
st.success("Biflownet Parameters saved")
if additional_params_section == "fusionnet":
st.write("**Fusionnet Parameters**")
num_channels = st.number_input("Number of Channels", value=16)
if st.button("Save Fusionnet Parameters"):
st.session_state.fusionnet_params = {"num_channels": num_channels}
st.success("Fusionnet Parameters saved")
if st.button("Generate Config"):
st.session_state.step = "generate_config"
@handle_exceptions
def generate_config():
DEFAULT_NAME = st.session_state.DEFAULT_NAME
mandatory_params = st.session_state.mandatory_params
# Setting default values
train_data_defaults = {
"patch_shape": [256, 256],
"patch_overlap": [16, 16],
"split_ratio": 0.95,
"batch_size": 32,
"num_workers": 4,
}
train_defaults = {
"print_freq": 100,
"save_freq": 1000,
"val_freq": 1000,
"num_iters": 200000,
"warmup_iters": 300,
"compile": False,
}
optimizer_defaults = {
"lr": 2e-4,
"lr_decay": 0.99995,
"weight_decay": 0.0001,
"epsilon": 1e-8,
"betas": [0.9, 0.999],
}
biflownet_defaults = {
"pyr_dim": 24,
"pyr_level": 3,
"corr_radius": 4,
"kernel_size": 3,
"fix_params": False,
}
fusionnet_defaults = {"num_channels": 16}
train_data_params = {
**train_data_defaults,
**st.session_state.get("train_data_params", {}),
}
train_params = {**train_defaults, **st.session_state.get("train_params", {})}
optimizer_params = {
**optimizer_defaults,
**st.session_state.get("optimizer_params", {}),
}
biflownet_params = {
**biflownet_defaults,
**st.session_state.get("biflownet_params", {}),
}
fusionnet_params = {
**fusionnet_defaults,
**st.session_state.get("fusionnet_params", {}),
}
base_config = {
"train_dir": mandatory_params["train_dir"],
"data_path": [mandatory_params["data_path"]],
"train_data": {
"max_frame_gap": mandatory_params["max_frame_gap"],
**train_data_params,
},
"train": {**train_params, "load_ckpt_path": None, "mixed_precision": True},
"optimizer": {**optimizer_params},
"biflownet": {
**biflownet_params,
"warp_type": "soft_splat",
"padding_mode": "reflect",
},
"fusionnet": {
**fusionnet_params,
"padding_mode": "reflect",
"fix_params": False,
},
}
config_file = f"../runs/{DEFAULT_NAME}/train_config.json"
with open(config_file, "w") as f:
json.dump(base_config, f, indent=4)
st.success(f"Config file generated successfully at {config_file}")
st.session_state.config_generated = True
@handle_exceptions
def setup_cryosamba_and_training() -> None:
st.title("Cryosamba Setup Interface")
st.write(
"Welcome to the training setup for cryosamba. Here you can set the parameters for your machine learning configuration."
)
st.write(
"*Note that you have to hit a button twice to see results. The first click shows you a preview of what will happen and the next click runs it*"
)
if "DEFAULT_NAME" not in st.session_state:
make_folder()
else:
step = st.session_state.get("step", "mandatory_params")
if step == "mandatory_params":
generate_mandatory_params()
elif step == "additional_params":
generate_additional_params()
elif step == "generate_config":
generate_config()
else:
st.success(
"Configuration already generated. No further modifications allowed."
)
if st.button("Exit Setup"):
st.stop()
# def main():
# setup_cryosamba_and_training()
# if __name__ == "__main__":
# main()
| Python |
2D | kirchhausenlab/Cryosamba | automate/scripts/install_cryosamba.sh | .sh | 1,999 | 70 | #!/bin/bash
check_conda() {
STR="Conda installation not found"
if command -v conda &> /dev/null; then
STR="Conda is already installed"
echo $STR
fi
if [ "$STR" = "Conda installation not found" ]; then
echo "Installing conda, please hit yes and enter to install (this may take 3-4 minutes)"
# Get anaconda
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
chmod +x Miniconda3-latest-Linux-x86_64.sh
bash Miniconda3-latest-Linux-x86_64.sh
# evaluating conda
export PATH=~/miniconda3/bin:$PATH
rm Miniconda3-latest-Linux-x86_64.*
source ~/.bashrc
echo "Conda successfully installed"
fi
}
# Function to create and set up the conda environment
setup_environment() {
env_name="cryosamba "
# Check if the environment already exists
if conda env list | grep -q "^$env_name\s"; then
echo "Conda environment '$env_name' already exists. Skipping creation."
else
echo "Creating conda environment: $env_name"
conda create --name $env_name python=3.11 -y
fi
}
activate_env() {
env_name="cryosamba"
echo "Activating conda environment: $env_name"
source ~/miniconda3/bin/activate $env_name
echo "Installing PyTorch"
pip3 install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu118
echo "Installing other dependencies"
pip install tifffile mrcfile easydict loguru tensorboard cupy-cuda11x streamlit typer
echo "Environment setup complete"
}
export_env(){
conda env export > environment.yml
mv environment.yml ../../
}
main(){
# check conda, if it doesnt exist install it
echo "*** CRYOSAMBA INSTALLATION ***"
echo "* Installing Conda"
check_conda
echo "* Creating the CryoSamba environment *"
setup_environment
echo "* Installing required libraries *"
activate_env
echo "* Exporting environment file *"
export_env
}
main
| Shell |
2D | kirchhausenlab/Cryosamba | automate/scripts/install_requirements_txt_for_ref.sh | .sh | 101 | 7 | #!/bin/bash
install_requirements(){
pip3 install pipreqs
pipreqs ../.
}
install_requirements
| Shell |
2D | kirchhausenlab/Cryosamba | automate/scripts/train_data.sh | .sh | 1,392 | 55 | #!/bin/bash
select_gpus(){
echo "The following GPU's are currently not in use"
nvidia-smi && nvidia-smi --query-gpu=index,utilization.gpu,memory.free,memory.total,memory.used --format=csv
echo "--"
echo ""
echo "Enter which GPU you want (seperate using commas, e.g. - 2,3)"
read -r gpu_indices
}
select_experiment() {
echo "Enter the name of the experiment you want to run:"
read -r EXP_NAME
if [ ! -d "../../runs/$EXP_NAME" ]; then
echo "Experiment does not exist, please make one! You can run setup_experiment.sh to do so"
exit 1
fi
if [ ! -f "../../runs/$EXP_NAME/train_config.json" ]; then
echo "../../runs/$EXP_NAME/train_config.json"
echo "config does not exist, please make one! You can run setup_experiment.sh to do so"
exit 1
fi
}
command_construct(){
# Construct the command
cmd="CUDA_VISIBLE_DEVICES=$gpu_indices torchrun --standalone --nproc_per_node=$(echo $gpu_indices | tr ',' '\n' | wc -l) ../../train.py --config ../../runs/$EXP_NAME/train_config.json"
echo "Do you want to run the command $cmd?"
echo "--"
echo "Type y/n:"
read -r selection
if [ $selection = "n" ]; then
echo "cancelled!!"
else
echo "running on GPUs, $cmd"
eval "$cmd"
fi
}
main() {
# Select the GPUs
select_gpus
# Select experiment to run
select_experiment
# Eval command
command_construct
}
main
| Shell |
2D | kirchhausenlab/Cryosamba | automate/scripts/setup_experiment_training.sh | .sh | 3,569 | 140 | #!/bin/bash
make_folder() {
while true; do
echo "What do you want to name your experiment, type below: "
read -r input_name
if [ -z "$input_name" ]; then
echo "PLEASE ENTER A NAME, experiment name cannot be empty"
elif [ -d "../../$input_name" ]; then
echo "Experiment already exists, please choose a different name"
else
DEFAULT_NAME=$input_name
echo "$DEFAULT_NAME folder made"
break
fi
done
}
# Make train and inference folders
generate_train_and_test_paths(){
mkdir -p "../../runs/$DEFAULT_NAME/train"
mkdir -p "../../runs/$DEFAULT_NAME/inference"
}
generate_config() {
base_config=$(cat << EOL
{
"train_data": {
"max_frame_gap": 6,
"patch_overlap": [
16,
16
],
"patch_shape":[
256,
256
],
"split_ratio": 0.95,
"num_workers": 4
},
"train": {
"load_ckpt_path": null,
"print_freq": 100,
"save_freq": 1000,
"val_freq": 1000,
"warmup_iters": 300,
"mixed_precision": true,
"compile": false
},
"optimizer": {
"lr": 2e-4,
"lr_decay": 0.99995,
"weight_decay": 0.0001,
"epsilon": 1e-08,
"betas": [
0.9,
0.999
]
},
"biflownet": {
"pyr_dim": 24,
"pyr_level": 3,
"corr_radius": 4,
"kernel_size": 3,
"warp_type": "soft_splat",
"padding_mode": "reflect",
"fix_params": false
},
"fusionnet": {
"num_channels": 16,
"padding_mode": "reflect",
"fix_params": false
}
}
EOL
)
while true; do
echo "Enter the data path:"
read -r data_path
if [ -n "$data_path" ]; then
break
else
echo "Data path cannot be empty. Please enter a valid path."
fi
done
echo "Enter the maximum frame gap (press Enter for default: 6):"
read -r max_frame_gap
max_frame_gap=${max_frame_gap:-6}
echo "Enter the number of iterations (press Enter for default: 200000):"
read -r num_iters
num_iters=${num_iters:-200000}
echo "Enter the batch size (press Enter for default: 32):"
read -r batch_size
batch_size=${batch_size:-32}
config_file="../../runs/$DEFAULT_NAME/train_config.json"
train_dir="../$DEFAULT_NAME/train"
# Use jq to merge the base config with user inputs
echo "$base_config" | jq \
--arg data_path "$data_path" \
--arg train_dir "$train_dir"\
--argjson max_frame_gap "$max_frame_gap" \
--argjson num_iters "$num_iters" \
--argjson batch_size "$batch_size" \
'. + {
"train_dir":$train_dir,
"data_path": [$data_path],
"train_data": (.train_data + {
"max_frame_gap": $max_frame_gap,
"batch_size": $batch_size
}),
"train": (.train + {
"num_iters": $num_iters
})
}' > "$config_file"
echo "Config file generated at $config_file"
}
main (){
# Generate a folder
RAND_NUM=$((1+$RANDOM %100))
DEFAULT_NAME=TEST_NAME_EXP-$RAND_NUM
make_folder
# make the folders for paths
generate_train_and_test_paths
# Main script execution
generate_config
}
main
| Shell |
2D | kirchhausenlab/Cryosamba | automate/scripts/run_inference.sh | .sh | 1,398 | 55 | #!/bin/bash
select_gpus(){
echo "The following GPU's are currently not in use"
nvidia-smi && nvidia-smi --query-gpu=index,utilization.gpu,memory.free,memory.total,memory.used --format=csv
echo "--"
echo ""
echo "Enter which GPU you want (seperate using commas, e.g. - 2,3)"
read -r gpu_indices
}
select_experiment() {
echo "Enter the name of the experiment you want to run:"
read -r EXP_NAME
if [ ! -d "../../runs/$EXP_NAME" ]; then
echo "Experiment does not exist, please make one! You can run setup_experiment.sh to do so"
exit 1
fi
if [ ! -f "../../runs/$EXP_NAME/inference_config.json" ]; then
echo "../../runs/$EXP_NAME/config.json"
echo "config does not exist, please make one! You can run setup_experiment.sh to do so"
exit 1
fi
}
command_construct(){
# Construct the command
cmd="CUDA_VISIBLE_DEVICES=$gpu_indices torchrun --standalone --nproc_per_node=$(echo $gpu_indices | tr ',' '\n' | wc -l) ../../inference.py --config ../../runs/$EXP_NAME/inference_config.json"
echo "Do you want to run the command $cmd?"
echo "--"
echo "Type y/n:"
read -r selection
if [ $selection = "n" ]; then
echo "cancelled!!"
else
echo "running on GPUs, $cmd"
eval "$cmd"
fi
}
main() {
# Select the GPUs
select_gpus
# Select experiment to run
select_experiment
# Eval command
command_construct
}
main
| Shell |
2D | kirchhausenlab/Cryosamba | automate/scripts/setup_inference.sh | .sh | 2,148 | 93 | #!/bin/bash
select_experiment_location(){
echo "Enter the name of the experiment you want to run inference for:"
read -r EXP_NAME
if [ ! -d "../../runs/$EXP_NAME" ]; then
echo "Experiment does not exist, please make one! You can run setup_experiment.sh to do so"
exit 1
fi
}
generate_config() {
base_config=$(cat << EOL
{
"inference_dir": "../$EXP_NAME/inference",
"inference_data": {
"patch_shape": [
256,
256
],
"patch_overlap": [
16,
16
],
"batch_size": 32,
"num_workers": 4
},
"inference": {
"output_format": "same",
"load_ckpt_name": null,
"pyr_level": 3,
"mixed_precision": true
}
}
EOL
)
train_dir=../$EXP_NAME/train
echo "Enter the data path "
read -r data_path
while true; do
if [ -z "$data_path" = "" ]; then
echo "Please enter a valid data path"
data_path=${data_path:-""}
else
break
fi
done
echo "Enter the max frame gap for the inference(usually 2 x value use for training, press Enter for default of 12)"
read -r max_frame_gap
max_frame_gap=${max_frame_gap:-12}
echo "Enter TTA (uses test-time augmentation for slightly better results however is 3x slower to run), press enter for default:true)"
read -r TTA
TTA=${TTA:-true}
compile=false
config_file="../../runs/$EXP_NAME/inference_config.json"
# use jq to merge the base config with user inputs
echo "$base_config" | jq \
--arg train_dir "$train_dir" \
--arg data_path "$data_path" \
--argjson max_frame_gap "$max_frame_gap" \
--argjson TTA "$TTA" \
--argjson compile "$compile" \
'. + {
"train_dir": $train_dir,
"data_path": [$data_path],
"inference_data" : (.inference_data+ {
"max_frame_gap": $max_frame_gap,
}),
"inference": (.inference+ {
"TTA": $TTA,
"compile": $compile
})
}' > "$config_file"
echo "generated config file!"
}
main(){
select_experiment_location
generate_config
}
main
| Shell |
2D | kirchhausenlab/Cryosamba | automate/scripts/find_vulnerabilities.sh | .sh | 177 | 11 | #!/bin/bash
install_bandit_and_run_tests(){
conda activate cryosamba_env
conda install bandit
conda install bandit[toml]
bandit -r ../. -ll
}
install_bandit_and_run_tests
| Shell |
2D | kirchhausenlab/Cryosamba | automate/scripts/additional/setup_cron.sh | .sh | 325 | 14 | #!/bin/bash
## run a cron job every 24 hours to see if dependencies need to be updated or not
CURR_PATH="$(pwd)/update_env.sh"
LOG_FILE="$(pwd)/cron_update.log"
crontabl -l > mycron
echo "0 0 * * * $SCRIPT_PATH >> $LOG_FILE 2>&1" >> mycron
crontab mycron
rm mycron
echo "Cron job has been set up to run $CURR_PATH daily" | Shell |
2D | kirchhausenlab/Cryosamba | automate/scripts/additional/update_env.sh | .sh | 330 | 10 | #!/bin/bash
## Update the environment in cases when libraries might be getting old or not
ENV_NAME="cryosamba_env"
YML_PATH="../../environment.yml"
conda deactivate
conda env update --name $ENV_NAME --file $YML_PATH --prune
conda activate $ENV_NAME
echo "$(date): Conda environment has been updated via cron job scheduled daily" | Shell |
2D | kirchhausenlab/Cryosamba | tests/__init__.py | .py | 0 | 0 | null | Python |
2D | kirchhausenlab/Cryosamba | tests/run_e2e.sh | .sh | 970 | 13 | #!/bin/bash
#The test_sample folder has some sample data produced by running 500 iterations of cryosamba on a DGX A100 GPU with a batch size of
# 16 and max frame gap of 3. The following test recreates that scenario for users and will take around 15 minutes to run (both train and inference)
# Once you have the training and inference done, navigate to the test_rotacell folder, where you will find our sample results. Compare
# what you get by running this test to our sample as a sanity check for cryosamba's validity
#
# Download the ndc10gfp_g7_l1_ts_002_ctf_6xBin.rec file from dropbox and put it in the cryosamba folder before running the tests
echo "Please make sure you change the paths for the data in the test_sample folder"
CUDA_VISIBLE_DEVICES=0 torchrun --standalone --nproc_per_node=1 train.py --config test_sample/train_config.json
CUDA_VISIBLE_DEVICES=0 torchrun --standalone --nproc_per_node=1 inference.py --config test_sample/inference_config.json
| Shell |
2D | kirchhausenlab/Cryosamba | tests/run_e2e.py | .py | 3,721 | 116 | """
The test_sample folder has some sample data produced by running 500 iterations of cryosamba on a DGX A100 GPU with a batch size of
16 and max frame gap of 3. The following test recreates that scenario for users and will take around 15 minutes to run (both train and inference)
Once you have the training and inference done, navigate to the test_rotacell folder, where you will find our sample results. Compare
what you get by running this test to our sample as a sanity check for cryosamba's validity
Download the ndc10gfp_g7_l1_ts_002_ctf_6xBin.rec file from dropbox and put it in the cryosamba folder before running the tests
"""
import json
import os
import subprocess
import typer
from pathlib import Path
import time
def main():
start_time = time.time()
curr_path = Path(__name__).resolve().parent
test_file = f"{curr_path}/ndc10gfp_g7_l1_ts_002_ctf_6xBin.rec"
train_config = {
"train_dir": "test_sample/train",
"data_path": test_file,
"train_data": {
"max_frame_gap": 3,
"patch_overlap": [16, 16],
"patch_shape": [256, 256],
"split_ratio": 0.95,
"batch_size": 16,
"num_workers": 4,
},
"train": {
"num_iters": 500,
"load_ckpt_path": None,
"print_freq": 100,
"save_freq": 1000,
"val_freq": 1000,
"warmup_iters": 300,
"mixed_precision": True,
"compile": False,
},
"optimizer": {
"lr": 2e-4,
"lr_decay": 0.99995,
"weight_decay": 0.0001,
"epsilon": 1e-08,
"betas": [0.9, 0.999],
},
"biflownet": {
"pyr_dim": 24,
"pyr_level": 3,
"corr_radius": 4,
"kernel_size": 3,
"warp_type": "soft_splat",
"padding_mode": "reflect",
"fix_params": False,
},
"fusionnet": {
"num_channels": 16,
"padding_mode": "reflect",
"fix_params": False,
},
}
# Generate inference config
inference_config = {
"train_dir": "test_sample/train",
"data_path": test_file,
"inference_dir": "test_sample/inference",
"inference_data": {
"max_frame_gap": 6,
"patch_shape": [256, 256],
"patch_overlap": [16, 16],
"batch_size": 16,
"num_workers": 4,
},
"inference": {
"output_format": "same",
"load_ckpt_name": None,
"pyr_level": 3,
"mixed_precision": True,
"tta": True,
"compile": False,
},
}
# Save train config to JSON
train_config_path = curr_path / "test_sample" / "train_config.json"
with open(train_config_path, "w") as f:
json.dump(train_config, f, indent=4)
# Save inference config to JSON
inference_config_path = curr_path / "test_sample" / "inference_config.json"
with open(inference_config_path, "w") as f:
json.dump(inference_config, f, indent=4)
flag = True
while time.time() - start_time < 15000:
cmd = f"CUDA_VISIBLE_DEVICES=0 torchrun --standalone --nproc_per_node=1 train.py --config {train_config_path}"
subprocess.run(cmd, shell=True, text=True)
flag = False
print("finished execution!")
break
if flag:
print("too slow, check your compute!!")
cmd = f"CUDA_VISIBLE_DEVICES=0 torchrun --standalone --nproc_per_node=1 inference.py --config {inference_config_path}"
subprocess.run(cmd, shell=True, text=True)
if __name__ == "__main__":
typer.run(main)
| Python |
2D | kirchhausenlab/Cryosamba | tests/unit/test_run_automatic_train_file_creation.py | .py | 5,460 | 201 | import os
import sys
import subprocess
import unittest
import json
def run_bash_script(script_content):
# Write the script content to a temporary file
with open("temp_script.sh", "w") as f:
f.write(script_content)
# Make the script executable
os.chmod("temp_script.sh", 0o755)
# Run the script and capture output
process = subprocess.Popen(
["./temp_script.sh"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate()
# Clean up the temporary script
os.remove("temp_script.sh")
return stdout, stderr, process.returncode
def get_experiment_name(stdout):
for line in stdout.split("\n"):
if "folder made" in line:
return line.split()[0]
return None
class TestRunCreation(unittest.TestCase):
def setUp(self):
# Your Bash script content here
self.bash_script = """#!/bin/bash
make_folder() {
while true; do
input_name=exp-random
if [ -z "$input_name" ]; then
echo "PLEASE ENTER A NAME, experiment name cannot be empty"
elif [ -d "../../$input_name" ]; then
echo "Experiment already exists, please choose a different name"
else
DEFAULT_NAME=$input_name
echo "$DEFAULT_NAME folder made"
break
fi
done
}
# Make train and inference folders
generate_train_and_test_paths(){
mkdir -p "../../runs/$DEFAULT_NAME/train"
mkdir -p "../../runs/$DEFAULT_NAME/inference"
}
generate_config() {
base_config=$(cat << EOL
{
"train_data": {
"max_frame_gap": 6,
"patch_overlap": [
16,
16
],
"patch_shape":[
256,
256
],
"split_ratio": 0.95,
"num_workers": 4
},
"train": {
"load_ckpt_path": null,
"print_freq": 100,
"save_freq": 1000,
"val_freq": 1000,
"warmup_iters": 300,
"mixed_precision": true,
"compile": false
},
"optimizer": {
"lr": 2e-4,
"lr_decay": 0.99995,
"weight_decay": 0.0001,
"epsilon": 1e-08,
"betas": [
0.9,
0.999
]
},
"biflownet": {
"pyr_dim": 24,
"pyr_level": 3,
"corr_radius": 4,
"kernel_size": 3,
"warp_type": "soft_splat",
"padding_mode": "reflect",
"fix_params": false
},
"fusionnet": {
"num_channels": 16,
"padding_mode": "reflect",
"fix_params": false
}
}
EOL
)
data_path="/nfs/datasync4/inacio/data/denoising/cryosamba/rota/train/"
max_frame_gap=${max_frame_gap:-6}
num_iters=${num_iters:-200000}
batch_size=${batch_size:-32}
config_file="../../runs/$DEFAULT_NAME/train_config.json"
train_dir="../exp-random/train"
# Use jq to merge the base config with user inputs
echo "$base_config" | jq \
--arg data_path "$data_path" \
--arg train_dir "$train_dir"\
--argjson max_frame_gap "$max_frame_gap" \
--argjson num_iters "$num_iters" \
--argjson batch_size "$batch_size" \
'. + {
"train_dir":$train_dir,
"data_path": [$data_path],
"train_data": (.train_data + {
"max_frame_gap": $max_frame_gap,
"batch_size": $batch_size
}),
"train": (.train + {
"num_iters": $num_iters
})
}' > "$config_file"
echo "Config file generated at $config_file"
}
main (){
# Generate a folder
RAND_NUM=$((1+$RANDOM %100))
DEFAULT_NAME=TEST_NAME_EXP-$RAND_NUM
make_folder
# make the folders for paths
generate_train_and_test_paths
# Main script execution
generate_config
}
main
"""
self.stdout, self.stderr, self.return_code = run_bash_script(self.bash_script)
self.experiment_name = get_experiment_name(self.stdout)
def test_script_execution(self):
self.assertEqual(
self.return_code, 0, f"Script failed with error: {self.stderr}"
)
def test_folder_creation(self):
if self.experiment_name:
self.assertTrue(os.path.exists(f"../../runs/{self.experiment_name}"))
self.assertTrue(os.path.exists(f"../../runs/{self.experiment_name}/train"))
self.assertTrue(
os.path.exists(f"../../runs/{self.experiment_name}/inference")
)
else:
self.fail("Experiment name not found in script output")
def test_config_file_creation(self):
if self.experiment_name:
config_path = f"../../runs/{self.experiment_name}/train_config.json"
self.assertTrue(os.path.exists(config_path))
# Validate JSON structure
with open(config_path, "r") as f:
config = json.load(f)
self.assertIn("train_data", config)
self.assertIn("train", config)
self.assertIn("optimizer", config)
self.assertIn("biflownet", config)
self.assertIn("fusionnet", config)
else:
self.fail("Experiment name not found in script output")
if __name__ == "__main__":
unittest.main()
| Python |
2D | kirchhausenlab/Cryosamba | tests/unit/test_trainConfig_gen.py | .py | 5,472 | 140 | import json
import os
import unittest
from pathlib import Path
from logging_config import logger
class TestTrainConfig(unittest.TestCase):
def setUp(self):
self.folder_name = "exp-random"
self.curr_path = Path(__file__).resolve().parent
self.path_to_experiments = self.curr_path.parent.parent / "runs"
self.config_path = (
self.path_to_experiments / self.folder_name / "train_config.json"
)
def test_generate_test_config(self):
try:
self.assertTrue(self.config_path.exists(), "Config file was not generated")
except Exception as e:
logger.error("❌ Error checking config file: %s", str(e))
self.fail(f"Config file check failed: {str(e)}")
def test_verify_config(self):
try:
with open(self.config_path, "r") as f:
config = json.load(f)
# Check for mandatory parameters
self.assertIn("train_dir", config, "train_dir is missing from config")
self.assertIn("data_path", config, "data_path is missing from config")
self.assertIn(
"train_data", config, "train_data section is missing from config"
)
self.assertIn(
"max_frame_gap",
config["train_data"],
"max_frame_gap is missing from config",
)
# Check for additional parameters
self.assertIn("train", config, "train section is missing from config")
self.assertIn(
"optimizer", config, "optimizer section is missing from config"
)
self.assertIn(
"biflownet", config, "biflownet section is missing from config"
)
self.assertIn(
"fusionnet", config, "fusionnet section is missing from config"
)
# Check specific values
self.assertEqual(
config["train_data"]["max_frame_gap"], 6, "Incorrect max_frame_gap"
)
self.assertEqual(
config["train_data"]["patch_shape"], [256, 256], "Incorrect patch_shape"
)
self.assertEqual(
config["train_data"]["patch_overlap"],
[16, 16],
"Incorrect patch_overlap",
)
self.assertEqual(
config["train_data"]["split_ratio"], 0.95, "Incorrect split_ratio"
)
self.assertEqual(
config["train_data"]["batch_size"], 32, "Incorrect batch_size"
)
self.assertEqual(
config["train_data"]["num_workers"], 4, "Incorrect num_workers"
)
self.assertEqual(
config["train"]["num_iters"], 200000, "Incorrect num_iters"
)
self.assertEqual(
config["train"]["compile"], False, "Incorrect compile value"
)
self.assertAlmostEqual(
config["optimizer"]["lr"],
0.0002,
places=6,
msg="Incorrect learning rate",
)
self.assertEqual(config["biflownet"]["pyr_dim"], 24, "Incorrect pyr_dim")
self.assertEqual(
config["fusionnet"]["num_channels"], 16, "Incorrect num_channels"
)
except Exception as e:
logger.error("❌ Error verifying config: %s", str(e))
self.fail(f"Config verification failed: {str(e)}")
def test_check_folder_created(self):
try:
check_path = (self.path_to_experiments / self.folder_name).exists()
self.assertTrue(
check_path, msg=f"Experiment folder '{self.folder_name}' not found"
)
train_folder = self.path_to_experiments / self.folder_name / "train"
inference_folder = self.path_to_experiments / self.folder_name / "inference"
self.assertTrue(train_folder.exists(), msg="Train folder not found")
self.assertTrue(inference_folder.exists(), msg="Inference folder not found")
except Exception as e:
logger.error("❌ Error checking folder creation: %s", str(e))
self.fail(f"Folder creation check failed: {str(e)}")
def test_config_file_permissions(self):
try:
self.assertTrue(
os.access(self.config_path, os.R_OK), "Config file is not readable"
)
self.assertTrue(
os.access(self.config_path, os.W_OK), "Config file is not writable"
)
except Exception as e:
logger.error("❌ Error checking config file permissions: %s", str(e))
self.fail(f"Config file permissions check failed: {str(e)}")
def test_config_file_format(self):
try:
with open(self.config_path, "r") as f:
config = json.load(f)
self.assertIsInstance(
config, dict, "Config file is not a valid JSON dictionary"
)
except json.JSONDecodeError as e:
logger.error("❌ Error parsing JSON: %s", str(e))
self.fail(f"Config file is not a valid JSON: {str(e)}")
except Exception as e:
logger.error("❌ Error checking config file format: %s", str(e))
self.fail(f"Config file format check failed: {str(e)}")
if __name__ == "__main__":
unittest.main()
| Python |
2D | kirchhausenlab/Cryosamba | tests/unit/test_inferenceConfig_generation.py | .py | 4,268 | 124 | import json
import os
import subprocess
import unittest
from pathlib import Path
from logging_config import logger
class TestInferenceConfig(unittest.TestCase):
def setUp(self):
self.folder_name = "exp-random"
self.curr_path = Path(__file__).resolve().parent
self.path_to_experiments = self.curr_path.parent.parent / "runs"
self.config_path = (
self.path_to_experiments / self.folder_name / "inference_config.json"
)
def test_verify_config(self):
try:
with open(self.config_path, "r") as f:
config = json.load(f)
# Check for mandatory parameters
self.assertIn("train_dir", config, "train_dir is missing from config")
self.assertIn("data_path", config, "data_path is missing from config")
self.assertIn(
"inference_dir", config, "inference_dir is missing from config"
)
self.assertIn(
"inference_data",
config,
"inference_data section is missing from config",
)
self.assertIn(
"max_frame_gap",
config["inference_data"],
"max_frame_gap is missing from inference_data",
)
# Check for additional parameters
self.assertIn(
"inference", config, "inference section is missing from config"
)
# Check specific values
self.assertIsInstance(
config["data_path"], list, "data_path should be a list"
)
self.assertGreater(len(config["data_path"]), 0, "data_path list is empty")
# Check inference_data parameters
self.assertEqual(
config["inference_data"]["patch_shape"],
[256, 256],
"Incorrect default patch_shape",
)
self.assertEqual(
config["inference_data"]["patch_overlap"],
[16, 16],
"Incorrect default patch_overlap",
)
self.assertEqual(
config["inference_data"]["batch_size"],
32,
"Incorrect default batch_size",
)
self.assertEqual(
config["inference_data"]["num_workers"],
4,
"Incorrect default num_workers",
)
# Check inference parameters
self.assertIn(
"output_format",
config["inference"],
"output_format is missing from inference section",
)
self.assertIn(
"load_ckpt_name",
config["inference"],
"load_ckpt_name is missing from inference section",
)
self.assertIn(
"pyr_level",
config["inference"],
"pyr_level is missing from inference section",
)
self.assertIn(
"TTA", config["inference"], "TTA is missing from inference section"
)
self.assertIn(
"mixed_precision",
config["inference"],
"mixed_precision is missing from inference section",
)
self.assertIn(
"compile",
config["inference"],
"compile is missing from inference section",
)
# Check default values for inference
self.assertEqual(
config["inference"]["pyr_level"], 3, "Incorrect default pyr_level"
)
self.assertTrue(config["inference"]["TTA"], "Incorrect default TTA value")
self.assertTrue(
config["inference"]["mixed_precision"],
"Incorrect default mixed_precision value",
)
self.assertFalse(
config["inference"]["compile"], "Incorrect default compile value"
)
except Exception as e:
logger.error("❌ Error verifying inference config: %s", str(e))
self.fail("Inference config verification failed: %s", str(e))
if __name__ == "__main__":
unittest.main()
| Python |
2D | kirchhausenlab/Cryosamba | tests/unit/__init__.py | .py | 0 | 0 | null | Python |
2D | kirchhausenlab/Cryosamba | tests/unit/test_setup_and_installation.py | .py | 3,337 | 101 | import os
import shlex
import subprocess
import sys
import unittest
from logging_config import logger
class TestCUDA_ENV(unittest.TestCase):
def run_command(self, command, shell=True):
"""
Run a shell command and return its output and error (if any).
"""
try:
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
output, error = process.communicate()
if process.returncode != 0:
logger.error(f"Error executing command: {command}\nError: {error}")
return output, error
except Exception as e:
logger.error(f"💀 Error executing command: {str(e)}")
return None, str(e)
def test_system_type(self):
"""Test if the current system is compatible with cryosamba"""
curr_system = sys.platform.lower()
self.assertIsNotNone(
curr_system,
msg="Checking if the current system is a valid linux, windows or ubuntu machine",
)
self.assertNotEqual(
curr_system, "darwin", msg="System is macOS, which is not CUDA compatible"
)
def test_check_cuda(self):
"""Check if cuda is installed or not"""
if sys.platform.startswith("linux"):
output, error = self.run_command("nvidia-smi")
self.assertIsNotNone(
output, msg="CUDA is not installed or not functioning properly"
)
elif sys.platform.startswith("win"):
output, error = self.run_command("nvidia-smi")
self.assertIsNotNone(
output, msg="CUDA is not installed or not functioning properly"
)
else:
self.skipTest("This test is only applicable on Linux or Windows systems")
def test_conda_installation(self):
output, error = self.run_command("conda --version")
self.assertIsNotNone(
output, msg=f"Conda is not installed or not found in PATH. Error: {error}"
)
logger.info("Conda version: %s", output)
def test_active_env(self, env_name="cryosamba"):
output, error = self.run_command("conda env list")
self.assertIn(env_name, output, msg=f"Environment {env_name} does not exist")
logger.info(f"Environment {env_name} exists")
def test_installed_packages(self, env_name="cryosamba"):
packages_lst = [
"torch",
"torchvision",
"torchaudio",
"tifffile",
"mrcfile",
"easydict",
"loguru",
"streamlit",
]
output, error = self.run_command(
f"conda run -n {env_name} conda list", shell=True
)
self.assertIsNotNone(
output,
msg=f"Failed to list packages in environment {env_name}. Error: {error}",
)
for package in packages_lst:
self.assertIn(
package,
output,
msg=f"Package {package} is not installed in environment {env_name}",
)
logger.info(f"✅ {package} is installed")
if __name__ == "__main__":
unittest.main()
| Python |
2D | kirchhausenlab/Cryosamba | tests/unit/test_run_automatic_inference_file_creation.py | .py | 3,796 | 132 | import json
import os
import subprocess
import sys
import unittest
def run_bash_script(script_content):
with open("temp_script.sh", "w") as f:
f.write(script_content)
os.chmod("temp_script.sh", 0o755)
process = subprocess.Popen(
[
"./temp_script.sh",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process.communicate()
os.remove("temp_script.sh")
return stdout, stderr, process.returncode
class TestInferenceRunCreation(unittest.TestCase):
def setUp(self):
# Your Bash script content here
self.bash_script = """#!/bin/bash
select_experiment_location(){
EXP_NAME="exp-random"
if [ ! -d "../../runs/$EXP_NAME" ]; then
echo "Experiment does not exist, please make one! You can run setup_experiment.sh to do so"
exit 1
fi
}
generate_config() {
base_config=$(cat << EOL
{
"inference_dir": "../$EXP_NAME/inference",
"inference_data": {
"patch_shape": [
256,
256
],
"patch_overlap": [
16,
16
],
"batch_size": 32,
"num_workers": 4
},
"inference": {
"output_format": "same",
"load_ckpt_name": null,
"pyr_level": 3,
"mixed_precision": true
}
}
EOL
)
train_dir=../$EXP_NAME/train
data_path=""
max_frame_gap=${max_frame_gap:-12}
TTA=${TTA:-true}
compile=false
config_file="../../runs/$EXP_NAME/inference_config.json"
# use jq to merge the base config with user inputs
echo "$base_config" | jq \
--arg train_dir "$train_dir" \
--arg data_path "$data_path" \
--argjson max_frame_gap "$max_frame_gap" \
--argjson TTA "$TTA" \
--argjson compile "$compile" \
'. + {
"train_dir": $train_dir,
"data_path": [$data_path],
"inference_data" : (.inference_data+ {
"max_frame_gap": $max_frame_gap,
}),
"inference": (.inference+ {
"TTA": $TTA,
"compile": $compile
})
}' > "$config_file"
echo "generated config file!"
}
main(){
select_experiment_location
generate_config
}
main"""
self.stdout, self.stderr, self.return_code = run_bash_script(self.bash_script)
self.experiment_name = "exp-random"
def test_script_execution(self):
self.assertEqual(
self.return_code, 0, f"Script failed with error: {self.stderr}"
)
def test_config_file_creation(self):
if self.experiment_name:
config_path = f"../../runs/{self.experiment_name}/inference_config.json"
self.assertTrue(os.path.exists(config_path))
# Validate JSON structure
with open(config_path, "r") as f:
config = json.load(f)
self.assertIn("inference", config)
self.assertIn("inference_dir", config)
self.assertIn("inference_data", config)
self.assertIn("train_dir", config)
self.assertIn("data_path", config)
else:
self.fail("Experiment name not found in script output")
if __name__ == "__main__":
unittest.main()
| Python |
2D | kirchhausenlab/Cryosamba | tests/integration/__init__.py | .py | 1 | 2 | Python | |
2D | kirchhausenlab/Cryosamba | tests/integration/startup_and_setup.py | .py | 1,543 | 44 | import os
import shutil
import sys
import unittest
from tests.unit.test_inferenceConfig_generation import TestInferenceConfig
from tests.unit.test_run_automatic_inference_file_creation import (
TestInferenceRunCreation,
)
from tests.unit.test_run_automatic_train_file_creation import TestRunCreation
from tests.unit.test_setup_and_installation import TestCUDA_ENV
from tests.unit.test_trainConfig_gen import TestTrainConfig
class IntegrationTest(unittest.TestCase):
def run_and_verify(self, suite):
res = unittest.TextTestRunner(verbosity=2).run(suite)
self.assertTrue(res.wasSuccessful())
def test_1_setup_and_installation(self):
suite = unittest.TestLoader().loadTestsFromTestCase(TestCUDA_ENV)
res = unittest.TextTestRunner(verbosity=2).run(suite)
self.assertTrue(res.wasSuccessful())
def test_2_train_config_gen(self):
suite = unittest.TestLoader().loadTestsFromTestCase(TestRunCreation)
self.run_and_verify(suite)
def test_3_train_config_validate(self):
suite = unittest.TestLoader().loadTestsFromTestCase(TestTrainConfig)
self.run_and_verify(suite)
def test_5_inference_config_validate(self):
suite = unittest.TestLoader().loadTestsFromTestCase(TestInferenceRunCreation)
self.run_and_verify(suite)
def test_4_inference_config_gen(self):
suite = unittest.TestLoader().loadTestsFromTestCase(TestInferenceConfig)
self.run_and_verify(suite)
if __name__ == "__main__":
unittest.main(verbosity=2)
| Python |
2D | giovannipizzi/SchrPoisson-2DMaterials | solver.ipynb | .ipynb | 22,187 | 536 | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Live demo of schrpoisson-2dmaterials"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This demo showcases the code developed as part of the following research work: \n",
"\n",
"A. Bussy, G. Pizzi, M. Gibertini, *Strain-induced polar discontinuities in 2D materials from combined first-principles and Schrödinger-Poisson simulations*, **Phys. Rev. B 96**, 165438 (2017), [doi:10.1103/PhysRevB.96.165438](http://doi.org/10.1103/PhysRevB.96.165438).\n",
"\n",
"Open-access: [arXiv:1705.01313](http://arxiv.org/abs/1705.01303)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os, sys\n",
"sys.path.append(os.path.join(os.path.split(os.path.abspath(__name__))[0],'code'))\n",
"import schrpoisson2D as sp2d\n",
"import ipywidgets as widgets\n",
"%matplotlib notebook\n",
"import matplotlib\n",
"import pylab as pl\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"display(widgets.HTML(\"Using the schroedinger-Poisson 2D solver at version: {}<br>Material: SnSe\".format(sp2d.__version__)))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"style = {'description_width': 'initial'}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def on_in_KbT(event):\n",
" # Same conversion factor used in the code, approximate\n",
" smearing_label_right.value = \"(Smearing in meV, ~ {:6.3f} Ry)\".format(in_KbT.value / 13.6 / 1000.)\n",
"\n",
"in_KbT = widgets.FloatSlider(\n",
" value=100,\n",
" min=0,\n",
" max=700,\n",
" step=10,\n",
" description='KbT:',\n",
" disabled=False,\n",
" continuous_update=False,\n",
" orientation='horizontal',\n",
" readout=True,\n",
" readout_format='5.0f',\n",
" style=style\n",
")\n",
"smearing_label_right = widgets.Label()\n",
"display(widgets.HBox([in_KbT, smearing_label_right]))\n",
"\n",
"in_KbT.observe(on_in_KbT, names='value')\n",
"on_in_KbT(None)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"in_central = widgets.FloatSlider(\n",
" value=15,\n",
" min=1,\n",
" max=40,\n",
" step=0.1,\n",
" description=r'Central region length (angstrom):',\n",
" disabled=False,\n",
" continuous_update=False,\n",
" orientation='horizontal',\n",
" readout=True,\n",
" readout_format='5.1f',\n",
" style=style,\n",
" layout=widgets.Layout(width='50%') \n",
")\n",
"in_central_strain = widgets.Dropdown(\n",
" options=[('0%', 0.), ('0.5%', 0.005), ('1%', 0.01), ('10%', 0.1)],\n",
" value=0.,\n",
" description='Strain:',\n",
" layout=widgets.Layout(width='20%')\n",
")\n",
"display(widgets.HBox([in_central, in_central_strain]))\n",
"\n",
"in_outer = widgets.FloatSlider(\n",
" value=18,\n",
" min=1,\n",
" max=40,\n",
" step=0.1,\n",
" description=r'Outer region length (angstrom):',\n",
" disabled=False,\n",
" continuous_update=False,\n",
" orientation='horizontal',\n",
" readout=True,\n",
" readout_format='5.1f',\n",
" style=style,\n",
" layout=widgets.Layout(width='50%')\n",
")\n",
"in_outer_strain = widgets.Dropdown(\n",
" options=[('0%', 0.), ('0.5%', 0.005), ('1%', 0.01), ('10%', 0.1)],\n",
" value=0.1,\n",
" description='Strain:',\n",
" layout=widgets.Layout(width='20%')\n",
")\n",
"display(widgets.HBox([in_outer, in_outer_strain]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def create_input(KbT, central_length,outer_length, central_strain, outer_strain):\n",
" return {\n",
" \"calculation\": \"single_point\",\n",
" \"smearing\" : True,\n",
" \"KbT\" : KbT, \n",
" \"delta_x\" : 0.2,\n",
" \"max_iteration\" : 1000,\n",
" \"nb_of_states_per_band\" : 2,\n",
" \"plot_fit\" : False,\n",
" \"out_dir\" : \"single_point_output\",\n",
" \"setup\" : { \"slab1\" : { \"strain\" : central_strain,\n",
" \"width\" : outer_length/2.,\n",
" \"polarization\" : \"positive\",\n",
" },\n",
" \"slab2\" : { \"strain\" : outer_strain,\n",
" \"width\" : central_length,\n",
" \"polarization\" : \"positive\",\n",
" },\n",
" \"slab3\" : { \"strain\" : central_strain,\n",
" \"width\" : outer_length/2.,\n",
" \"polarization\" : \"positive\",\n",
" },\n",
" },\n",
" }\n",
" \n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"material_SnSne = {\n",
" \"0.00\": { \"alpha_xx\": 10.22,\n",
" \"x_lat\": 4.408,\n",
" \"y_lat\": 4.288,\n",
" \"polarization_charge\": 0.0,\n",
" \"vacuum_level\": 3.471695,\n",
" \"valence_gamma\": {\"energy\": -1.508255819,\n",
" \"conf_mass\": 1.755925079,\n",
" \"DOS_mass\": 2.7330924,\n",
" \"degeneracy\": 1\n",
" },\n",
" \"valence_gamma-X\": {\"energy\": -0.8832657543,\n",
" \"conf_mass\": 0.125168454,\n",
" \"DOS_mass\": 0.159070572,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"valence_gamma-Y\": {\"energy\": -1.064317364,\n",
" \"conf_mass\": 0.109554071,\n",
" \"DOS_mass\": 0.159511425,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"conduction_gamma\": {\"energy\": 0.6090962485,\n",
" \"conf_mass\": 2.741100107,\n",
" \"DOS_mass\": 2.994158008,\n",
" \"degeneracy\": 1\n",
" },\n",
" \"conduction_gamma-X\": {\"energy\": 0.0902128713,\n",
" \"conf_mass\": 0.110807373,\n",
" \"DOS_mass\": 0.190387132,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"conduction_gamma-Y\": {\"energy\": 0.05720314215,\n",
" \"conf_mass\": 0.131542031,\n",
" \"DOS_mass\": 0.130378261,\n",
" \"degeneracy\": 2\n",
" }\n",
" },\n",
" \"0.005\": { \"alpha_xx\": 9.85,\n",
" \"polarization_charge\": 0.0296,\n",
" \"vacuum_level\": 3.456272,\n",
" \"valence_gamma\": {\"energy\": -1.53133791,\n",
" \"conf_mass\": 1.663041043,\n",
" \"DOS_mass\": 2.721339909,\n",
" \"degeneracy\": 1\n",
" },\n",
" \"valence_gamma-X\": {\"energy\": -0.9222633399,\n",
" \"conf_mass\": 0.128689309,\n",
" \"DOS_mass\": 0.173073867,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"valence_gamma-Y\": {\"energy\": -1.129934252,\n",
" \"conf_mass\": 0.11396212,\n",
" \"DOS_mass\": 0.169760624,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"conduction_gamma\": {\"energy\": 0.5952654761,\n",
" \"conf_mass\": 2.57832201,\n",
" \"DOS_mass\": 2.822577572,\n",
" \"degeneracy\": 1\n",
" },\n",
" \"conduction_gamma-X\": {\"energy\": 0.07651969791,\n",
" \"conf_mass\": 0.113872233,\n",
" \"DOS_mass\": 0.195572846,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"conduction_gamma-Y\": {\"energy\": 0.03308521035,\n",
" \"conf_mass\": 0.134383779,\n",
" \"DOS_mass\": 0.136358334,\n",
" \"degeneracy\": 2\n",
" }\n",
" },\n",
" \"0.01\": { \"alpha_xx\": 9.43,\n",
" \"polarization_charge\": 0.06357,\n",
" \"vacuum_level\": 3.4408595,\n",
" \"valence_gamma\": {\"energy\": -1.54984677,\n",
" \"conf_mass\": 1.56510041,\n",
" \"DOS_mass\": 2.70985957,\n",
" \"degeneracy\": 1\n",
" },\n",
" \"valence_gamma-X\": {\"energy\": -0.964184483,\n",
" \"conf_mass\": 0.13291259,\n",
" \"DOS_mass\": 0.191355,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"valence_gamma-Y\": {\"energy\": -1.198008621,\n",
" \"conf_mass\": 0.11936355,\n",
" \"DOS_mass\": 0.18232315,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"conduction_gamma\": {\"energy\": 0.5788854169,\n",
" \"conf_mass\": 2.38658757,\n",
" \"DOS_mass\": 2.66126322,\n",
" \"degeneracy\": 1\n",
" },\n",
" \"conduction_gamma-X\": {\"energy\": 0.06282279948,\n",
" \"conf_mass\": 0.117584668,\n",
" \"DOS_mass\": 0.20252247,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"conduction_gamma-Y\": {\"energy\": 0.01621894616,\n",
" \"conf_mass\": 0.137705331,\n",
" \"DOS_mass\": 0.143839918,\n",
" \"degeneracy\": 2\n",
" }\n",
" },\n",
" \"0.10\": {\n",
" \"epsilon_xx\" : 4.390845042,\n",
" \"epsilon_yy\": 4.609306158,\n",
" \"alpha_xx\": 5.40,\n",
" \"alpha_yy\": 5.74 ,\n",
" \"polarization\": -0.1774925,\n",
" \"polarization_mod\": 0.3733465,\n",
" \"polarization_units\": \"C/m^2\",\n",
" \"polarization_charge\": 0.38643,\n",
" \"vacuum_level\": 3.183275,\n",
" \"band_gap\": 1.425,\n",
" \"valence_gamma\": {\"energy\": -1.846872487,\n",
" \"position\": [0.0,0.0],\n",
" \"conf_mass\": 0.804425486,\n",
" \"DOS_mass\": 2.747762669,\n",
" \"degeneracy\": 1\n",
" },\n",
" \"valence_gamma-X\": {\"energy\": -1.639183729,\n",
" \"position\": [0.5126292283,0.0],\n",
" \"conf_mass\": 0.205658144,\n",
" \"DOS_mass\": 1.059071873,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"valence_gamma-Y\": {\"energy\": -2.150722701,\n",
" \"position\": [0.0,0.5741904823],\n",
" \"conf_mass\": 0.271355674,\n",
" \"DOS_mass\": 0.688994731,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"conduction_gamma\": {\"energy\": 0.3081798586,\n",
" \"position\": [0.0,0.0],\n",
" \"conf_mass\": 0.98472771,\n",
" \"DOS_mass\": 1.593144026,\n",
" \"degeneracy\": 1\n",
" },\n",
" \"conduction_gamma-X\": {\"energy\": -0.2144276807,\n",
" \"position\": [0.5231571587,0.0],\n",
" \"conf_mass\": 0.182798895,\n",
" \"DOS_mass\": 0.213362934,\n",
" \"degeneracy\": 2\n",
" },\n",
" \"conduction_gamma-Y\": {\"energy\": -0.205542465 ,\n",
" \"position\": [0.0,0.6099056797],\n",
" \"conf_mass\": 0.205831494,\n",
" \"DOS_mass\": 0.3196511,\n",
" \"degeneracy\": 2\n",
" }\n",
" }\n",
"}\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def onChange(event):\n",
" #pl.get_current_fig_manager().toolbar.mode == '': # Not in Zoom-Mode\n",
" xl = total_free_charge_plot.get_xlim()\n",
" yl = total_free_charge_plot.get_ylim()\n",
" current_aspect = (yl[1] - yl[0]) / (xl[1] - xl[0])\n",
" new_xlim = free_charge_plot.get_xlim()\n",
" total_free_charge_plot.set_xlim(new_xlim)\n",
" total_free_charge_plot.set_ylim([0,(new_xlim[1] - new_xlim[0]) * current_aspect]) \n",
" #with calc_output:\n",
" # print(event, dir(event))\n",
" # print(event.canvas, event.guiEvent, event.name, event.renderer)\n",
"\n",
"\n",
"def show_output(retval):\n",
" global total_free_charge_plot, free_charge_plot\n",
" cmap = matplotlib.cm.RdYlBu\n",
" graphs_output.clear_output()\n",
" graphs2_output.clear_output() \n",
" with graphs_output:\n",
" density_profile = retval['out_files']['density_profile']['data']\n",
" pl.figure()\n",
" free_charge_plot = pl.subplot(2,1,1)\n",
" x = density_profile[:,0]\n",
" pl.title(\"Free charge density profile (you can zoom and pan)\")\n",
" pl.ylabel(\"Charge density (cm$^{-2}$)\")\n",
" pl.plot(x, density_profile[:,1], label=\"Free electrons\", color=cmap(0))\n",
" pl.plot(x, density_profile[:,2], label=\"Free holes\", color=cmap(cmap.N)) \n",
" pl.plot(x, np.zeros(len(x)), color=(0.3,0.3,0.3,1.))\n",
" free_charge_plot.set_xlim([x[0], x[-1]])\n",
" pl.axvline(0., color=(0.9,0.9,0.9,1.))\n",
" pl.axvline(in_outer.value/2., color=(0.7,0.7,0.7,1.))\n",
" pl.axvline(in_outer.value/2. + in_central.value, color=(0.7,0.7,0.7,1.))\n",
" pl.axvline(in_outer.value + in_central.value, color=(0.9,0.9,0.9,1.))\n",
" pl.legend() \n",
" \n",
" total_free_charge_plot = pl.subplot(2,1,2)\n",
" pl.title(\"Net free charge\")\n",
" net_charge = density_profile[:,2] - density_profile[:,1]\n",
" pl.imshow(np.vstack([net_charge] * 2), \n",
" extent = (x[0], x[-1], 0, (x[-1]-x[0])/10.), \n",
" #aspect=10.,\n",
" interpolation='bilinear',\n",
" cmap=cmap,\n",
" )\n",
" total_free_charge_plot.yaxis.set_visible(False)\n",
" total_free_charge_plot.axes.can_zoom = False\n",
" total_free_charge_plot.axes.can_pan = False \n",
" \n",
" pl.axvline(0., color=(0.9,0.9,0.9,1.))\n",
" pl.axvline(in_outer.value/2., color=(0.7,0.7,0.7,1.))\n",
" pl.axvline(in_outer.value/2. + in_central.value, color=(0.7,0.7,0.7,1.))\n",
" pl.axvline(in_outer.value + in_central.value, color=(0.9,0.9,0.9,1.))\n",
"\n",
" pl.xlabel(\"Position ($\\AA$)\")\n",
" \n",
" pl.show()\n",
" pl.connect('draw_event', onChange) \n",
"\n",
" with graphs2_output:\n",
" #line_colors = {\n",
" # 'conduction': (252/255.,141/255.,89/255.,1.),\n",
" # 'valence': (145/255.,191/255.,219/255.,1.),\n",
" #}\n",
" line_colors = {\n",
" 'conduction': ['#fef0d9','#fdd49e','#fdbb84','#fc8d59','#ef6548','#d7301f','#990000'][::-1],\n",
" 'valence': ['#f1eef6','#d0d1e6','#a6bddb','#74a9cf','#3690c0','#0570b0','#034e7b'][::-1]\n",
" }\n",
" band_data = retval['out_files']['band_data']['metadata']\n",
" x = band_data['x']\n",
" pl.figure()\n",
" s = pl.subplot(1,1,1)\n",
" pl.plot(x, [0.] * len(x), color=(171/255.,221/255.,164/255.,1.), linestyle='-', linewidth=0.5) # Fermi energy\n",
"\n",
" for band_type in ['valence', 'conduction']:\n",
" for i, the_band in enumerate(band_data[band_type]):\n",
" color = line_colors[band_type][(i*2)%len(line_colors[band_type])]\n",
" pl.plot(x, np.array(the_band['profile']) - band_data['e_fermi'], color=color, linewidth=2)\n",
" # print(band_type, len(the_band['states']))\n",
" for state in the_band['states']:\n",
" #print(state['energy'] - band_data['e_fermi'])\n",
" pl.plot(x, np.array(state['profile']) - band_data['e_fermi'], color=color, linewidth=0.5)\n",
" pl.plot(x, [state['energy'] - band_data['e_fermi']]*len(x), color=color, linewidth=0.5, linestyle='--') \n",
"\n",
"\n",
" pl.xlabel(\"Position ($\\AA$)\")\n",
" pl.ylabel(\"Band energy (eV)\")\n",
" pl.title(\"Band profiles and eigenstates\")\n",
" s.set_xlim([x[0], x[-1]])\n",
" pl.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def my_callback(**kwargs):\n",
" status_output.value = \"Simulation step {} completed.\".format(kwargs['step'])\n",
"\n",
"def on_run(event): \n",
" calc_output.clear_output()\n",
" graphs_output.clear_output()\n",
" graphs2_output.clear_output() \n",
" run_btn.disabled = True\n",
" with calc_output:\n",
" retval = sp2d.main_run(\n",
" matprop=material_SnSne, \n",
" input_dict=create_input(\n",
" KbT=in_KbT.value / 1000. / 13.6, # The code wants Ry, internally uses this conversion factor \n",
" central_length=in_central.value,\n",
" outer_length=in_outer.value,\n",
" central_strain=in_central_strain.value,\n",
" outer_strain=in_outer_strain.value,\n",
" ),\n",
" callback=my_callback)\n",
" run_btn.disabled = False\n",
" status_output.value += \"<br><strong>Simulation completed!\"\n",
" show_output(retval)\n",
" #print(retval)\n",
"\n",
"run_btn = widgets.Button(\n",
" description='Run simulation',\n",
" disabled=False,\n",
" button_style='success', # 'success', 'info', 'warning', 'danger' or ''\n",
" tooltip='Run with the 2D Schroedinger-Poisson solver',\n",
" icon='check'\n",
")\n",
"run_btn.on_click(on_run)\n",
"display(run_btn)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def on_show_raw(event):\n",
" if len(raw_group.children) > 0:\n",
" raw_group.children = []\n",
" show_raw_btn.description = \"Show raw output\"\n",
" raw_group.box_style = ''\n",
" else:\n",
" raw_group.children = [calc_output]\n",
" show_raw_btn.description = \"Hide raw output\"\n",
" raw_group.box_style = 'warning'\n",
" \n",
"status_output = widgets.HTML()\n",
"display(status_output)\n",
"\n",
"show_raw_btn = widgets.Button(\n",
" description='Show raw output',\n",
" disabled=False,\n",
" button_style='', # 'success', 'info', 'warning', 'danger' or ''\n",
" tooltip='Show raw output',\n",
" icon=''\n",
")\n",
"show_raw_btn.on_click(on_show_raw)\n",
"display(show_raw_btn)\n",
"calc_output = widgets.Output()\n",
"raw_group = widgets.VBox([], box_style='')\n",
"display(raw_group)\n",
"graphs_output = widgets.Output()\n",
"display(graphs_output)\n",
"graphs2_output = widgets.Output()\n",
"display(graphs2_output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| Unknown |
2D | giovannipizzi/SchrPoisson-2DMaterials | input_examples/create_calc_input.py | .py | 2,037 | 60 | import json
## Change here if needed
#type_of_calc = "single_point"
type_of_calc = "map"
name_of_json_file = "example_calc_input.json"
if type_of_calc == "single_point" :
input_dict = {
"calculation": "single_point",
"smearing" : True,
"KbT" : 0.005,
"delta_x" : 0.5,
"max_iteration" : 1000,
"nb_of_states_per_band" : 2,
"plot_fit" : False,
"out_dir" : "single_point_output",
"setup" : { "slab1" : { "strain" : 0.00,
"width" : 10.0,
"polarization" : "positive",
},
"slab2" : { "strain" : 0.10,
"width" : 20.0,
"polarization" : "positive",
},
"slab3" : { "strain" : 0.00,
"width" : 10.0,
"polarization" : "positive",
},
},
}
elif type_of_calc == "map" :
input_dict = {"calculation": "map",
"smearing" : True,
"KbT" : 0.005,
"max_iteration" : 1000,
"plot_fit" : False,
"nb_of_steps" : 100,
"upper_delta_x_limit" : 0.5,
"out_dir" : "map_output",
"strain" : { "min_strain" : 0.0,
"max_strain" : 0.1,
"strain_step" : 0.05
},
"width" : { "min_width" : 10.0,
"max_width" : 20.0,
"width_step" : 10.0
}
}
else:
raise ValueError("Invalid value of 'type_of_calc'")
with open(name_of_json_file,'w') as f:
json.dump(input_dict,f,indent=2)
| Python |
2D | giovannipizzi/SchrPoisson-2DMaterials | code/schrpoisson2D.py | .py | 71,997 | 1,569 | """
Main executable of the Schroedinger--Poisson solver for 2D materials.
See PDF documentation for its usage.
You need to pass on the command line the path to two JSON files, the first
to the materials properties, and the second to the calculation input flags:
python modulable_2Dschrpoisson.py {material_props}.json {calc_input}.json
Note that to run the code you need first to compile the fortran code using
'make'.
If you use this code in your work, please cite the following paper:
A. Bussy, G. Pizzi, M. Gibertini, Strain-induced polar discontinuities
in 2D materials from combined first-principles and Schroedinger-Poisson
simulations, Phys. Rev. B 96, 165438 (2017).
This code is released under a MIT license, see LICENSE.txt file in the main
folder of the code repository, hosted on GitHub at
https://github.com/giovannipizzi/schrpoisson-2dmaterials
"""
import numpy as n
import scipy
import scipy.linalg
import time
import scipy.special
import os
from operator import add
import sys
import warnings
import json
try:
import schrpoisson_wire as spw
except ImportError:
raise ImportError("You need to have the schrpoisson_wire module.\n"
"To obtain it, you have to compile the Fortran code using f2py.\n"
"If you have f2py already installed, you will most probably need only to\n"
"run 'make' in the same folder as the code.")
# Python code version
__version__ = "1.1.0"
#hbar^2/m0 in units of eV*ang*ang
HBAR2OVERM0=7.61996163
# periodicity, should remain true in this case
is_periodic = True
# Small threshold to check for charge neutrality
small_threshold = 1.e-6
# If True, in run_simulation redirect part of the output to /dev/null
reduce_stdout_output = True
class ValidationError(Exception):
pass
class InternalError(Exception):
pass
def read_input_materials_properties(matprop):
"""
Build a suitable dictionary containing the materials properties starting from an external json file
"""
suitable_mat_prop = {}
num_of_mat = len(matprop)
valence_keys = []
conduction_keys = []
try:
a_lat = matprop["0.00"]["x_lat"]
b_lat = matprop["0.00"]["y_lat"]
except KeyError:
raise ValidationError("Error: lattice parameters not set up correctly in file '%s'" %json_matprop)
#looping over the first input dict entry to get the keys
try:
for key in list(matprop["0.00"].keys()):
if "valence" in key:
valence_keys.append(key)
if "conduction" in key:
conduction_keys.append(key)
if len(valence_keys) == 0 or len(conduction_keys) == 0:
raise KeyError
except KeyError:
raise ValidationError("Error: The material properties json file (%s) must contain unstrained data with key '0.00' and subdictionaries must contain valence/conduction in their names" %json_matprop)
#looping over the different strains
try:
for strain in list(matprop.keys()):
condenergies = []
valenergies = []
condmass = []
valmass = []
conddosmass = []
valdosmass = []
conddegeneracy = []
valdegeneracy = []
#generalities
suitable_mat_prop[strain] = {}
suitable_mat_prop[strain]["alpha"] = 4.*n.pi* 0.0055263496 * matprop[strain]["alpha_xx"] # 4 pi epsilon0 in units of e/(V*ang)
suitable_mat_prop[strain]["ndoping"] = 0. #no doping allowed for now
suitable_mat_prop[strain]["val_offset"] = 0.
suitable_mat_prop[strain]["pol_charge"] = matprop[strain]["polarization_charge"]*1./b_lat * 1e8 #in units of e/cm
#energy extrema specifics
for key in valence_keys:
valenergies.append(matprop[strain][key]["energy"]-matprop[strain]["vacuum_level"]+matprop["0.00"]["vacuum_level"]) #need to align bands be substracting the vaccum level
valmass.append(matprop[strain][key]["conf_mass"])
valdosmass.append(matprop[strain][key]["DOS_mass"])
valdegeneracy.append(matprop[strain][key]["degeneracy"])
for key in conduction_keys:
condenergies.append(matprop[strain][key]["energy"]-matprop[strain]["vacuum_level"]+matprop["0.00"]["vacuum_level"]) #need to align bands be substracting the vaccum level
condmass.append(matprop[strain][key]["conf_mass"])
conddosmass.append(matprop[strain][key]["DOS_mass"])
conddegeneracy.append(matprop[strain][key]["degeneracy"])
#building the dictionary
suitable_mat_prop[strain]["valenergies"] = valenergies
suitable_mat_prop[strain]["condenergies"] = condenergies
suitable_mat_prop[strain]["valmass"] = valmass
suitable_mat_prop[strain]["condmass"] = condmass
suitable_mat_prop[strain]["valdosmass"] = valdosmass
suitable_mat_prop[strain]["conddosmass"] = conddosmass
suitable_mat_prop[strain]["valdegeneracy"] = valdegeneracy
suitable_mat_prop[strain]["conddegeneracy"] = conddegeneracy
except KeyError:
raise ValidationError("Error: The material properties json file (%s) is not rightly organized: some dictionary keys might be missing" %json_matprop)
#creating the delta doping layers that will appear at interfaces
#looping over the strains, add a n and p delta doping for each
for strain in list(suitable_mat_prop.keys()):
#creating n_deltadoping
suitable_mat_prop[strain+"_n_deltadoping"] = {}
suitable_mat_prop[strain+"_n_deltadoping"]["ndoping"] = suitable_mat_prop[strain]["pol_charge"]
#creating p_deltadoping
suitable_mat_prop[strain+"_p_deltadoping"] = {}
suitable_mat_prop[strain+"_p_deltadoping"]["ndoping"] = -suitable_mat_prop[strain]["pol_charge"]
return suitable_mat_prop, a_lat, b_lat
def update_mat_prop_for_new_strain(mat_prop, new_strain, plot_fit = False):
"""
For a given strain, the relevant information such masses and energies are interpolated and the
material properties dictionary is updated
"""
# Preliminary plot, if needed
if plot_fit:
import matplotlib.pyplot as plt
# initializing arrays containing valence and conduction properties
valenergies = n.zeros((len(mat_prop)//3,len(mat_prop["0.00"]["valenergies"]))) # len(mat_prop)/3 because for each strain there are 2 delta doping
condenergies = n.zeros((len(mat_prop)//3,len(mat_prop["0.00"]["condenergies"])))
valmass = n.zeros((len(mat_prop)//3,len(mat_prop["0.00"]["valmass"])))
condmass = n.zeros((len(mat_prop)//3,len(mat_prop["0.00"]["condmass"])))
valdosmass = n.zeros((len(mat_prop)//3,len(mat_prop["0.00"]["valdosmass"])))
conddosmass = n.zeros((len(mat_prop)//3,len(mat_prop["0.00"]["conddosmass"])))
pol_charge = n.zeros(len(mat_prop)//3)
alpha = n.zeros(len(mat_prop)//3)
# the strain array vs which evry physical quantity will be fitted (not sorted yet)
strain = n.zeros(len(mat_prop)//3)
# looping on the material properties versus strain
i = 0
for key in list(mat_prop.keys()):
if "doping" not in key:
strain[i] = float(key)
valenergies[i,:] = mat_prop[key]["valenergies"]
condenergies[i,:] = mat_prop[key]["condenergies"]
valmass[i,:] = mat_prop[key]["valmass"]
condmass[i,:] = mat_prop[key]["condmass"]
valdosmass[i,:] = mat_prop[key]["valdosmass"]
conddosmass[i,:] = mat_prop[key]["conddosmass"]
pol_charge[i] = mat_prop[key]["pol_charge"]
alpha[i] = mat_prop[key]["alpha"]
i += 1
# sorting the arrays so that they correspond to strain in increasing order
order = n.argsort(strain)
strain = strain[order]
valenergies = valenergies[order,:]
condenergies = condenergies[order,:]
valmass = valmass[order,:]
condmass = condmass[order,:]
valdosmass = valdosmass[order,:]
conddosmass = conddosmass[order,:]
pol_charge = pol_charge[order]
alpha = alpha[order]
# the future dictionary entry for the new strain
new_strain_prop = {}
#actually fitting with optional visualization
#polarization charge
p = n.polyfit(strain,pol_charge,3)
new_strain_prop["pol_charge"] = p[0]*new_strain**3 + p[1]*new_strain**2 + p[2]*new_strain + p[3]
if plot_fit:
plt.ion()
plt.figure(1)
plt.title("Polarization charge fit")
plt.plot(strain,pol_charge,'kx')
x = n.arange(0.,max(max(strain),new_strain)+0.001,0.001)
y = p[0]*x**3 + p[1]*x**2 + p[2]*x + p[3]
plt.plot(x,y )
plt.xlabel("Strain")
# alpha
p = n.polyfit(strain,alpha,3)
new_strain_prop["alpha"] = p[0]*new_strain**3 + p[1]*new_strain**2 + p[2]*new_strain + p[3]
if plot_fit:
plt.figure(2)
plt.title("Alpha fit")
plt.plot(strain,alpha,'kx')
x = n.arange(0.,max(max(strain),new_strain)+0.001,0.001)
y = p[0]*x**3 + p[1]*x**2 + p[2]*x + p[3]
plt.plot(x,y )
plt.xlabel("Strain")
# valence band energies
new_valenergies = []
if plot_fit:
plt.figure(3)
plt.title("Valence energies fit")
plt.xlabel("Strain")
for j in range(len(mat_prop["0.00"]["valenergies"])):
p = n.polyfit(strain,valenergies[:,j],2)
new_valenergies.append( p[0]*new_strain**2 + p[1]*new_strain + p[2])
if plot_fit:
plt.plot(strain,valenergies[:,j],'kx')
x = n.arange(0.,max(max(strain),new_strain)+0.001,0.001)
y = p[0]*x**2 + p[1]*x + p[2]
plt.plot(x,y)
new_strain_prop["valenergies"] = new_valenergies
# conduction band energies
new_condenergies = []
if plot_fit:
plt.figure(4)
plt.title("Conduction energies fit")
plt.xlabel("Strain")
for j in range(len(mat_prop["0.00"]["condenergies"])):
p = n.polyfit(strain,condenergies[:,j],2)
new_condenergies.append( p[0]*new_strain**2 + p[1]*new_strain + p[2])
if plot_fit:
plt.plot(strain,condenergies[:,j],'kx')
x = n.arange(0.,max(max(strain),new_strain)+0.001,0.001)
y = p[0]*x**2 + p[1]*x + p[2]
plt.plot(x,y)
new_strain_prop["condenergies"] = new_condenergies
# valence band confinement masses
new_valmass = []
if plot_fit:
plt.figure(5)
plt.title("Valence confinement (inverse) mass fit")
plt.xlabel("Strain")
for j in range(len(mat_prop["0.00"]["valmass"])):
p = n.polyfit(strain,1./valmass[:,j],2)
new_valmass.append( 1./(p[0]*new_strain**2 + p[1]*new_strain + p[2]))
if plot_fit:
plt.plot(strain,1./valmass[:,j],'kx')
x = n.arange(0.,max(max(strain),new_strain)+0.001,0.001)
y = (p[0]*x**2 + p[1]*x + p[2])
plt.plot(x,y)
new_strain_prop["valmass"] = new_valmass
# conduction band confinement masses
new_condmass = []
if plot_fit:
plt.figure(6)
plt.title("Conduction confinement (inverse) mass fit")
plt.xlabel("Strain")
for j in range(len(mat_prop["0.00"]["condmass"])):
p = n.polyfit(strain,1./condmass[:,j],2)
new_condmass.append( 1./(p[0]*new_strain**2 + p[1]*new_strain + p[2]))
if plot_fit:
plt.plot(strain,1./condmass[:,j],'kx')
x = n.arange(0.,max(max(strain),new_strain)+0.001,0.001)
y = p[0]*x**2 + p[1]*x + p[2]
plt.plot(x,y)
new_strain_prop["condmass"] = new_condmass
# valence band DOS masses
new_valdosmass = []
if plot_fit:
plt.figure(7)
plt.title("Valence DOS (inverse) mass fit")
plt.xlabel("Strain")
for j in range(len(mat_prop["0.00"]["valdosmass"])):
p = n.polyfit(strain,1./valdosmass[:,j],2)
new_valdosmass.append( 1./(p[0]*new_strain**2 + p[1]*new_strain + p[2]))
if plot_fit:
plt.plot(strain,1./valdosmass[:,j],'kx')
x = n.arange(0.,max(max(strain),new_strain)+0.001,0.001)
y = p[0]*x**2 + p[1]*x + p[2]
plt.plot(x,y)
new_strain_prop["valdosmass"] = new_valdosmass
# conduction band DOS masses
new_conddosmass = []
if plot_fit:
plt.figure(8)
plt.title("Conduction DOS (inverse) mass fit")
plt.xlabel("Strain")
for j in range(len(mat_prop["0.00"]["conddosmass"])):
p = n.polyfit(strain,1./conddosmass[:,j],2)
new_conddosmass.append(1./(p[0]*new_strain**2 + p[1]*new_strain + p[2]))
if plot_fit:
plt.plot(strain,1./conddosmass[:,j],'kx')
x = n.arange(0.,max(max(strain),new_strain)+0.001,0.001)
y = p[0]*x**2 + p[1]*x + p[2]
plt.plot(x,y)
new_strain_prop["conddosmass"] = new_conddosmass
new_strain_prop["ndoping"] = 0. #no doping allowed for now
new_strain_prop["val_offset"] = 0.
#plotting the fits if need be
if plot_fit:
plt.show()
#print >> sys.stderr, ("Press any key (+ENTER) to close all plots and continue")
#raw_input()
#plt.close("all")
plt.ioff()
# it should never happen as it should be tested before hand
for key in list(mat_prop.keys()):
if "doping" not in key:
if float(key) == new_strain:
mat_prop[str(new_strain)] = mat_prop[key]
mat_prop[str(new_strain)+"_n_deltadoping"] = {}
mat_prop[str(new_strain)+"_n_deltadoping"]["ndoping"] = mat_prop[key]["pol_charge"]
mat_prop[str(new_strain)+"_p_deltadoping"] = {}
mat_prop[str(new_strain)+"_p_deltadoping"]["ndoping"] = -mat_prop[key]["pol_charge"]
return None
# updating the dictionary passed as an argument
mat_prop[str(new_strain)] = new_strain_prop
# also adding delta dopings
# creating n_deltadoping
mat_prop[str(new_strain)+"_n_deltadoping"] = {}
mat_prop[str(new_strain)+"_n_deltadoping"]["ndoping"] = mat_prop[str(new_strain)]["pol_charge"]
# creating p_deltadoping
mat_prop[str(new_strain)+"_p_deltadoping"] = {}
mat_prop[str(new_strain)+"_p_deltadoping"]["ndoping"] = -mat_prop[str(new_strain)]["pol_charge"]
class Slab(object):
"""
General class to represent a system composed of multiple stripes
"""
def __init__(self, layers, materials_properties, delta_x, smearing, beta_eV):
"""
Pass a suitable xgrid (containing the sampling points in units of angstroms) and
a (in angstroms)
"""
if len(layers) == 0:
raise ValueError("layers must have at least one layer")
self.max_step_size = 0.8
self._slope = 0. # in V/ang
self.delta_x = delta_x
self.smearing=smearing
self.beta_eV=beta_eV
total_length = 0.
# I create a list where each element is a tuple in the format
# (full_material_properties, end_x_ang)
# (the first layer starts at x=0.
self._layers_range = []
xgrid_pieces = []
materials = []
for layer_idx, l in enumerate(layers):
nintervals = int(n.ceil(l[1]/self.delta_x))
# I want always the same grid spacing, so I possibly increase (slightly) the
# thickness
layer_length = nintervals * self.delta_x
grid_piece = n.linspace(total_length, total_length+layer_length,nintervals+1)
# Note: the following works also for l[1]==0 (delta dopings), returning a length-1
# array that then becomes array([]) [i.e., an empty list] when stripping the first
# point with [1:]
# The first layer should not be a delta-doping layer: this is checked later.
if layer_idx == 0:
# For the first layer I do not remove the first point
xgrid_pieces.append(grid_piece)
else:
# I remove the first point, it is in the previous piece
xgrid_pieces.append(grid_piece[1:])
materials.append(materials_properties[l[0]])
#print sum(len(i) for i in xgrid_pieces[:-1]), sum(len(i) for i in xgrid_pieces)
total_length += layer_length
self._xgrid = n.concatenate(xgrid_pieces)
# A check that all steps are equal; I calculate the error of each step w.r.t. the
# expected step delta-x
steps_error = (self._xgrid[1:] - self._xgrid[:-1]) - self.delta_x
if abs(steps_error).max() > 1.e-10:
raise AssertionError("The steps should be all equal to delta_x, but they aren't! "
"max is: {}".format(abs(steps_error).max()))
# Polarizability
self._alpha = n.zeros(len(self._xgrid))
last_idx = 0
for mat, grid_piece in zip(materials, xgrid_pieces):
if len(grid_piece)!=0: # skip delta dopings
self._alpha[last_idx:last_idx+len(grid_piece)] = mat['alpha']
last_idx += len(grid_piece)
# Conduction band and valence band profiles, in eV
# effective masses and DOS masses, in units of the free electron mass
# degeneracy is unitless and integer
# Need the total number of valence and conduction extrema
self._ncond_min = len(materials[0]['condenergies'])
self._nval_max = len(materials[0]['valenergies'])
# since all materials are requiered to have the same degeneracy for corresponding energy maxima
self._conddegen = materials[0]['conddegeneracy']
self._valdegen = materials[0]['valdegeneracy']
self._condband = n.zeros((self._ncond_min,len(self._xgrid)))
self._valband = n.zeros((self._nval_max,len(self._xgrid)))
self._valmass = n.zeros((self._nval_max,len(self._xgrid)))
self._condmass = n.zeros((self._ncond_min,len(self._xgrid)))
self._valdosmass = n.zeros((self._nval_max,len(self._xgrid)))
self._conddosmass = n.zeros((self._ncond_min,len(self._xgrid)))
# conduction band
for i in range(self._ncond_min):
last_idx = 0
for mat, grid_piece in zip(materials, xgrid_pieces):
if len(grid_piece)!=0: # skip delta dopings
self._condband[i,last_idx:last_idx+len(grid_piece)] = mat['val_offset'] + mat['condenergies'][i]
self._condmass[i,last_idx:last_idx+len(grid_piece)] = mat['condmass'][i]
self._conddosmass[i,last_idx:last_idx+len(grid_piece)] = mat['conddosmass'][i]
last_idx += len(grid_piece)
# valence band
for j in range(self._nval_max):
last_idx = 0
for mat, grid_piece in zip(materials, xgrid_pieces):
if len(grid_piece)!=0: # skip delta dopings
self._valband[j,last_idx:last_idx+len(grid_piece)] = mat['val_offset'] + mat['valenergies'][j]
self._valmass[j,last_idx:last_idx+len(grid_piece)] = mat['valmass'][j]
self._valdosmass[j,last_idx:last_idx+len(grid_piece)] = mat['valdosmass'][j]
last_idx += len(grid_piece)
# Doping; I also count total free holes and free electrons. In e/cm
self._doping = n.zeros(len(self._xgrid))
last_idx = 0
for mat, grid_piece in zip(materials, xgrid_pieces):
if len(grid_piece)!=0: # Finite thickness layer
# The doping is distributed over the len(grid_piece) lines
self._doping[last_idx:last_idx+len(grid_piece)] = mat['ndoping']/len(grid_piece)
else: # it is a delta doping
if last_idx == 0:
raise ValueError("You cannot put a delta doping layer as the very first layer")
self._doping[last_idx-1] += mat['ndoping']
last_idx += len(grid_piece)
# electrostatic potential in eV
self._V = n.zeros(len(self._xgrid))
# memory for converging algorithm
self._old_V = n.zeros(len(self._xgrid))
self._indicator = n.zeros(2)
self._counter = 0
self._subcounter = 0
self._Ef = n.zeros(2)
self._E_count = 0
self._max_ind = 0
self._finalV_check = 0.0
self._finalE_check = 0.0
# Add an atribute that tells how much time is spent doing different tasks for optimization purposes
# The tasks of interests are Solving Poisson equation, computing states (i.e. Hamiltonian digonalization), finding the Fermi energy
# All times in seconds
self._time_Poisson = 0.0
self._time_Fermi = 0.0
self._time_Hami = 0.0
def get_computing_times(self):
return self._time_Poisson, self._time_Fermi, self._time_Hami
def update_computing_times(self,process,value):
"""
updates the time spent on a numerical process (string) either "Fermi", "Hami", "Poisson"
"""
if process == "Fermi":
self._time_Fermi += value
elif process == "Poisson":
self._time_Poisson += value
elif process == "Hami":
self._time_Hami += value
else:
warnings.warn(process+" is not a valid process for performance monitoring")
def get_required_net_free_charge(self):
"""
Return the net free charge needed (in e/cm) to compensate the doping.
"""
# Minus sign because if I have a n-doping (self._doping > 0), I need a negative
# charge to compensate
return -n.sum(self._doping)
def update_V(self,c_states, v_states, e_fermi, zero_elfield=True):
"""
Both free_el_density and free_holes_density should be positive
Return True upon convergence
"""
self._counter += 1
self._Ef[self._counter%2] = e_fermi
max_iteration = 5000
V_conv_threshold = 2.e-4
Ef_conv_threshold = 1.e-6 # if fermi energy does not change from one iteration to another, converged
free_electrons_density = get_electron_density(c_states, e_fermi, self._conddosmass, self.npoints, self._conddegen, smearing=self.smearing, beta_eV=self.beta_eV)
free_holes_density = get_hole_density(v_states, e_fermi, self._valdosmass, self.npoints, self._valdegen, smearing=self.smearing, beta_eV=self.beta_eV)
total_charge_density = self._doping - free_electrons_density + free_holes_density
#updating the time spent solving Poisson
start_t = time.time()
if is_periodic:
new_V = -1.*spw.periodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0] # minus 1 because function returns electrostatic potential, not energy
else:
new_V = -1.*spw.nonperiodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0]
end_t = time.time()
self._time_Poisson += end_t-start_t
new_V -= n.mean(new_V)
if self._counter == 1:
self._max_ind = n.argmax(new_V)
if zero_elfield:
# in V/ang
self._slope = (new_V[-1] - new_V[0])/(self._xgrid[-1] - self._xgrid[0])
new_V -= self._slope*self._xgrid
else:
self._slope = 0.
#we want ot avoid oscillations in converging algorithm
#one has to stock new_V for comparison purposes
self._indicator[self._counter%2] = new_V[self._max_ind]-self._V[self._max_ind] #need to keep track of oscillations when converging
if self._indicator[0]*self._indicator[1] < 0:
#oscillation, take the middle ground
if not reduce_stdout_output:
print("OSCILLATION")
self._subcounter = 0
self.max_step_size *= 0.1
if self.max_step_size <= 0.1*V_conv_threshold:
self.max_step_size = 0.1*V_conv_threshold
else:
self._subcounter += 1
if self._subcounter == 20:
self.max_step_size *= 1.4
self._subcounter = 0
step = new_V - self._V
current_max_step_size = n.max(n.abs(step))
#convergence check
self._over = False
if current_max_step_size < V_conv_threshold:
start_t = time.time()
if is_periodic:
check_V = -1.*spw.periodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0] # minus 1 because function returns electrostatic potential, not energy
else:
check_V = -1.*spw.nonperiodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0]
end_t = time.time()
self._time_Poisson += end_t-start_t
check_val = n.max(n.abs(check_V-self._V))
self._indicator[self._counter%2] = check_V[self._max_ind]-self._V[self._max_ind]
if check_val > 5*V_conv_threshold:
current_max_step_size = check_val
step = check_V - self._V
#self.max_step_size *= 0.5
else:
self._over = True
if not reduce_stdout_output:
print('convergence param:', current_max_step_size)
if current_max_step_size != 0 and self._over == False:
#self._V += step * min(self.max_step_size, current_max_step_size) #/ (current_max_step_size)
self._V += step * self.max_step_size
self._old_V = self._V.copy()
elif current_max_step_size == 0 and self._over == False:
self._V = new_V
self._old_V = self._V.copy()
elif self._over == True:
self._V = self._old_V
if not reduce_stdout_output:
print("Final convergence parameter: ", check_val)
self._finalV_check = check_val
if n.abs(self._Ef[0]-self._Ef[1]) <= Ef_conv_threshold and current_max_step_size < 10*V_conv_threshold:
self._E_count += 1
if self._E_count == 4:
if not reduce_stdout_output:
print("Convergence of Fermi energy: ", n.abs(self._Ef[0]-self._Ef[1]))
current_max_step_size = 0.1*V_conv_threshold # froced convergence if that happens 4 times in a row
if is_periodic:
check_V = -1.*spw.periodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0] # minus 1 because function returns electrostatic potential, not energy
else:
check_V = -1.*spw.nonperiodic_recursive_poisson(self._xgrid,total_charge_density,self._alpha,max_iteration)[0]
check_val = n.max(n.abs(check_V-self._V))
if not reduce_stdout_output:
print("Final convergence parameter: ", check_val)
self._finalV_check = check_val
else:
self._E_count = 0
self._finalE_check = n.abs(self._Ef[0]-self._Ef[1])
return current_max_step_size < V_conv_threshold
def get_V(self):
"""
Return the electrostatic potential in eV
"""
return self._V
def get_xgrid(self):
"""
Return the x grid, in angstrom
"""
return self._xgrid
@property
def npoints(self):
return len(self._xgrid)
def get_conduction_profile(self):
"""
Return the conduction band profile in eV
"""
# need dimensions to agree
V = n.zeros((self._ncond_min,len(self._xgrid)))
for i in range(self._ncond_min):
V[i,:] = self._V
return self._condband + V
def get_valence_profile(self):
"""
Return the valence band profile in eV
"""
# need dimensions to agree
V = n.zeros((self._nval_max,len(self._xgrid)))
for i in range(self._nval_max):
V[i,:] = self._V
return self._valband + V
def get_band_gap(self):
"""
Scans valence and conduction profiles in order to find the absolute conduction minimum and the absolute valence band maximum and returns the difference
"""
conduction = self.get_conduction_profile()
valence = self.get_valence_profile()
cond_min = n.min(conduction)
val_max = n.max(valence)
return cond_min - val_max
def MV_smearing(E,beta,mu):
"""
Marzari-Vanderbilt smearing function to be integrated in conjuction with the density of states
Be careful: units of beta, E and mu must be consistent
"""
return 0.5*scipy.special.erf(-beta*(E-mu)-1./n.sqrt(2)) + 1./n.sqrt(2.*n.pi)*n.exp(-(beta*(E-mu)+1./n.sqrt(2))**2) + 0.5
def get_electron_density(c_states, e_fermi, c_mass_array, npoints,degen, smearing, beta_eV, band_contribution = False, avg_eff_mass = False):
"""
Fill subbands with a 1D dos, at T=0 (for now; T>0 requires numerical integration)
The first index of c_states must be the state energy in eV
e_fermi in eV
c_mass is array with the conduction DOS mass (on the grid)
in units of the free electron mass
degen is the array containing the degeneracy of each conduction band minimum
Return linear electron density, in e/cm
if avg_eff_mass, an array containing the average effective mass for each state and the state energy is returned
"""
# The 1D DOS is (including the factor of 2 for the spin):
# g(E) = sqrt(2 * effmass)/(pi*hbar) * 1/sqrt(E-E0)
# where effmass is the band effective mass, E0 is the band edge.
#
# I rewrite it as g(E) = D * sqrt(meff/m0) / sqrt(E-E0)
# where (meff/m0) is simply the effective mass in units of the electron free mass,
# and D=sqrt(2) / pi / sqrt(HBAR2OVERM0) and will be in units of 1/ang/sqrt(eV)
D = n.sqrt(2.) / n.pi / n.sqrt(HBAR2OVERM0)
el_density = n.zeros(npoints)
contrib = n.zeros(len(degen))
avg_mass = n.zeros((1,3))
# All the conduction band minima have to be taken into account with the appropriate degeneracy
for j in range(len(degen)): #number of minima
deg = degen[j]
if j > 0 and avg_eff_mass == True:
avg_mass = n.append(avg_mass,[[0.,0.,0.]],axis=0) # so that bands are separated by a line of zeros
for state_energy, state in c_states[j]:
energy_range = 20. # eV, to be very safe
#if state_energy > e_fermi:
# continue
square_norm = sum((state)**2)
# I average the inverse of the effective mass
# Both state and c_mass_array should have the same length
# NOT SURE: square_norm or sqrt(square_norm) ? AUGU: I'm pretty sure it's square_norm and I changed it
averaged_eff_mass = 1./(sum(state**2 / c_mass_array[j]) / square_norm)
if avg_eff_mass == True:
avg_mass = n.append(avg_mass,[[state_energy,state_energy-e_fermi,averaged_eff_mass]],axis=0)
if not smearing and state_energy < e_fermi:
# At T=0, integrating from E0 to Ef the DOS gives
# D * sqrt(meff) * int_E0^Ef 1/(sqrt(E-E0)) dE =
# D * sqrt(meff) * 2 * sqrt(Ef-E0) [if Ef>E0, else zero]
el_density += deg * D * n.sqrt(averaged_eff_mass) * 2. * n.sqrt(e_fermi - state_energy) * (
state**2 / square_norm)
contrib[j] += n.sum(deg * D * n.sqrt(averaged_eff_mass) * 2. * n.sqrt(e_fermi - state_energy) * (
state**2 / square_norm))
elif smearing and state_energy-e_fermi < energy_range: # more than enough margin
# Need to numerically integrate the density of state times the occupation given by MV_smearing
# to compute the integral, one uses the trick explained there to avoid singularities: http://math.stackexchange.com/questions/1351734/numerical-integration-of-divergent-function
n_int = 10000. # number of intervals
# change of variable E = state_energy + t**2
dt = n.sqrt(energy_range)/n_int
t = n.arange(0,n.sqrt(energy_range),dt)
#plt.figure(1)
#plt.plot(energy,deg * D * sqrt(averaged_eff_mass) * 1./n.sqrt(energy-state_energy),"b")
#plt.plot(energy,MV_smearing(energy,beta_eV,e_fermi),"r")
#plt.plot(energy,g_times_f,"k")
#plt.title("%s"%e_fermi)
#plt.show()
temp_dens = 2*deg * D * n.sqrt(averaged_eff_mass) * n.trapz(MV_smearing(state_energy+t**2,beta_eV,e_fermi),dx=dt) * (state**2 / square_norm)
el_density += temp_dens
contrib[j] += n.sum(temp_dens)
# Up to now, el_density is in 1/ang; we want it in 1/cm
if band_contribution == False:
return el_density * 1.e8
elif band_contribution == True and avg_eff_mass == False:
return el_density * 1.e8, contrib * 1.e8
else:
return el_density * 1.e8, contrib * 1.e8, avg_mass
def update_doping(materials_props,material,doping):
"""
materials_props is a dictionnary containing the materials properties (should be a copy of the original one)
material is an entry (string) of materials_properties for which the doping must be updated
doping is the updated value of the doping in e/cm. It must be negative for holes
"""
materials_props[material]['ndoping'] = doping
def get_energy_gap(c_states,v_states,c_degen,v_degen):
"""
returns the difference between the lowest conduction electron state and the highest valence hole state
c_degen and v_degen are the degeneracy lists
"""
all_val_states_energies = n.zeros(1)
all_cond_states_energies = n.zeros(1)
for i in range(len(c_degen)):
all_cond_states_energies = n.append(all_cond_states_energies,[s[0] for s in c_states[i]])
for j in range(len(v_degen)):
all_val_states_energies = n.append(all_val_states_energies, [s[0] for s in v_states[j]])
#suppressing the first entries of the arrays
return n.min(n.delete(all_cond_states_energies,0)) - n.max(n.delete(all_val_states_energies,0))
def get_hole_density(v_states, e_fermi, v_mass_array, npoints, degen, smearing, beta_eV, band_contribution = False, avg_eff_mass = False):
"""
For all documentation and comments, see get_electron_density
v_mass_array should contain the DOS mass of holes, i.e., positive
degen is the array containing the degeneracy of each valence band maximum
Return a positive number
"""
D = n.sqrt(2.) / n.pi / n.sqrt(HBAR2OVERM0)
h_density = n.zeros(npoints)
avg_mass = n.zeros((1,3))
contrib = n.zeros(len(degen))
for j in range(len(degen)):
deg = degen[j]
if j > 0 and avg_eff_mass == True:
avg_mass = n.append(avg_mass,[[0.,0.,0.]],axis=0) # so that bands are separated by a line of zeros
for state_energy, state in v_states[j]:
energy_range = 20. # eV to be extra safe
# Note that here the sign is opposite w.r.t. the conduction case
#if state_energy < e_fermi:
# continue
square_norm = sum((state)**2)
averaged_eff_mass = 1./(sum(state**2 / v_mass_array[j]) / square_norm)
if avg_eff_mass == True:
avg_mass = n.append(avg_mass,[[state_energy,state_energy-e_fermi,averaged_eff_mass]],axis=0)
if not smearing and state_energy > e_fermi:
h_density += deg * D * n.sqrt(averaged_eff_mass) * 2. * n.sqrt(state_energy - e_fermi) * (
state**2 / square_norm)
contrib[j] += n.sum(deg * D * n.sqrt(averaged_eff_mass) * 2. * n.sqrt(state_energy - e_fermi) * (
state**2 / square_norm))
elif smearing and e_fermi-state_energy < energy_range:
n_int = 10000. # number of intervals
# change of variable E = state_energy + t**2
dt = n.sqrt(energy_range)/n_int
t = n.arange(0,n.sqrt(energy_range),dt)
temp_dens = 2*deg * D * n.sqrt(averaged_eff_mass) * n.trapz(MV_smearing(2*e_fermi-state_energy+t**2,beta_eV,e_fermi),dx=dt) * (state**2 / square_norm)
h_density += temp_dens
contrib[j] += n.sum(temp_dens)
# to keep a trace of old work
"""
n_int = 100000. # 500 intervals
delta_E = energy_range/n_int
energy = n.arange(state_energy-energy_range,state_energy,delta_E)
energy -= 1.e-8 # to avoid dividing by zero
g_times_f = deg * D * sqrt(averaged_eff_mass) * 1./n.sqrt(state_energy-energy) * MV_smearing(2*e_fermi-energy,beta_eV,e_fermi)
#plt.figure(2)
#plt.plot(energy,deg * D * sqrt(averaged_eff_mass) * 1./n.sqrt(state_energy-energy),"b")
#plt.plot(energy,MV_smearing(2*e_fermi-energy,beta_eV,e_fermi),"r")
#plt.plot(energy,g_times_f,"k")
#plt.title("%s"%e_fermi)
#plt.show()
h_density += n.trapz(g_times_f,dx=delta_E) * (state**2 / square_norm)
contrib[j] += sum(n.trapz(g_times_f,dx=delta_E) * (state**2 / square_norm))
"""
if band_contribution == False:
return h_density * 1.e8
elif band_contribution == True and avg_eff_mass == False:
return h_density * 1.e8, contrib * 1.e8
else:
return h_density * 1.e8, contrib * 1.e8, avg_mass
def find_efermi(c_states, v_states, c_mass_array, v_mass_array, c_degen, v_degen, npoints, target_net_free_charge, smearing, beta_eV):
"""
Pass the conduction and valence states (and energies),
the conduction and valence DOS mass arrays,
and the net charge to be used as a target (positive means that we want holes than electrons)
"""
more_energy = 1. # in eV
# TODO: we may want a precision also on the charge, not only on the energy
# DONE
energy_precision = 1.e-8 # eV
charge_precision = 1. # out of the 10**7 it is still very precise
all_states_energies = n.zeros(1)
for i in range(len(c_degen)):
all_states_energies = n.append(all_states_energies,[s[0] for s in c_states[i]])
for j in range(len(v_degen)):
all_states_energies = n.append(all_states_energies, [s[0] for s in v_states[j]])
all_states_energies = n.delete(all_states_energies,0)
# I set the boundaries for the bisection algorithm; I could in principle
# also extend these ranges
ef_l = all_states_energies.min()-more_energy
ef_r = all_states_energies.max()+more_energy
electrons_l = n.sum(get_electron_density(c_states, ef_l, c_mass_array,npoints,c_degen, smearing=smearing, beta_eV=beta_eV))
holes_l = n.sum(get_hole_density(v_states, ef_l, v_mass_array, npoints,v_degen, smearing=smearing, beta_eV=beta_eV))
electrons_r = n.sum(get_electron_density(c_states, ef_r, c_mass_array, npoints,c_degen, smearing=smearing, beta_eV=beta_eV))
holes_r = n.sum(get_hole_density(v_states, ef_r, v_mass_array,npoints,v_degen, smearing=smearing, beta_eV=beta_eV))
net_l = holes_l - electrons_l
net_r = holes_r - electrons_r
if (net_l - target_net_free_charge) * (
net_r - target_net_free_charge) > 0:
raise ValueError("The net charge at the boundary of the bisection algorithm "
"range has the same sign! {}, {}, target={}; ef_l={}, ef_r={}".format(
net_l, net_r, target_net_free_charge,ef_l, ef_r))
absdiff = 10*charge_precision
en_diff = ef_r - ef_l
while en_diff > energy_precision:
ef = (ef_l + ef_r)/2.
electrons = n.sum(get_electron_density(c_states, ef, c_mass_array,npoints,c_degen, smearing=smearing, beta_eV=beta_eV))
holes = n.sum(get_hole_density(v_states, ef, v_mass_array,npoints,v_degen, smearing=smearing, beta_eV=beta_eV))
net = holes - electrons
absdiff = abs(net - target_net_free_charge)
if (net - target_net_free_charge) * (
net_r - target_net_free_charge) > 0:
net_r = net
ef_r = ef
else:
net_l = net
ef_l = ef
en_diff = ef_r - ef_l
#check on the charge precision
if absdiff > charge_precision:
#need to go over the loop once more
en_diff = 10*energy_precision
return ef #(ef_r + ef_l)/2. it changes everything since the bissection algorithm worked for ef and not for (ef_r + ef_l)/2.
def get_conduction_states_p(slab):
"""
This function diagonalizes the matrix using the fact that the matrix is banded. This provides
a huge speedup w.r.t. to a dense matrix.
NOTE: _p stands for the periodic version
"""
more_bands_energy = 0.2 # How many eV to to above the top of the conduction band
# The list containing the tuples that will be returned
res = []
# Ham matrix in eV; I store only the first two diagonals
# H[2,:] is the diagonal,
# H[1,1:] is the first upper diagonal,
# H[0,2:] is the second upper diagonal
# However, many possible energy minima in conduction band => self._ncond_min similar matrices, all contained in the same H
H = n.zeros((slab._ncond_min,3, slab.npoints))
# Two arrays to map the old order of the matrix with the new one, and vice versa
rangearray = n.arange(slab.npoints)
# this is to be used as index when rebuilding the wavefunctions
reordering = n.zeros(slab.npoints,dtype=int)
reordering[rangearray <= (slab.npoints-1)//2] = (2 * rangearray)[rangearray <= (slab.npoints-1)//2]
reordering[rangearray > (slab.npoints-1)//2] = (
2 * slab.npoints - 2 * rangearray - 1)[rangearray > (slab.npoints-1)//2]
# # I don't know if I ever need this
# inverse_reordering = n.zeros(slab.npoints,dtype=int)
# inverse_reordering[::2] = (rangearray//2)[::2]
# inverse_reordering[1::2] = (slab.npoints - (rangearray+1)//2)[1::2]
# Given the index i in the not-reordered matrix, for the matrix element (i-1)--(i),
# return the indices in the 2 upper lines of the reordered, banded matrix.
# Notes:
# * the (i-1)--(i) matrix element is the i-th element of the superdiagonal array
# defined later
# * applying reordering, we get that in the reordered matrix, the
# (i-1)--(i) element should occupy the j1--j2 matrix element.
# * I resort j1, j2 such that j1 < j2 to fill the upper diagonal
# (NOTE! if this was a complex hermitian matrix, when reorderning I should also take
# the complex conjugate. Here the matrix is real and symmetric and I don't have to worry)
# * j2-j1 can only be 1 or 2, if the reordering is correct
# * if the j2-j1==1, I have to fill H[1,:], else H[0,:]: the first index is 2-(j2-j1)
# * the second index, also due to the fact of that the superdiagonals are stored
# in H[0,2:] and H[1,1:] (see docs of scipy.linalg.eig_banded), is simply j2
#
# * What happens to the element 1-N (the one that gives periodicity) where N=matrixsize?
# * If I store it in superdiagonal[0], when calculating j1_list, for that element
# I get reordering[-1] == reordering[slab.npoints-1], that is
# what we want.
j1_list = reordering[n.arange(slab.npoints)-1]
j2_list = reordering[n.arange(slab.npoints)]
temp_list = j2_list.copy()
indices_to_swap = [j2_list<j1_list]
j2_list[indices_to_swap] = j1_list[indices_to_swap]
j1_list[indices_to_swap] = temp_list[indices_to_swap]
reordering_superdiagonals = ( 2 - (j2_list - j1_list) , j2_list )
# A check
if any((j2_list - j1_list) < 1) or any((j2_list - j1_list) > 2):
raise AssertionError("Wrong indices difference in j1/2_lists (cond)!")
#Need to loop over all conduction band minima and construct their Hamiltonian
for i in range(slab._ncond_min):
# On the diagonal, the potential: sum of the electrostatic potential + conduction band profile
# This is obtained with get_conduction_profile()
H[i,2,:][reordering] = slab.get_conduction_profile()[i,:]
min_energy = H[i,2,:].min()
max_energy = H[i,2,:].max() + more_bands_energy
# The zero-th element contains the periodicity term
mass_differences = n.zeros(slab.npoints)
mass_differences[0] = (slab._condmass[i,0] + slab._condmass[i,-1])/2.
mass_differences[1:] = (slab._condmass[i,1:] + slab._condmass[i,:-1])/2.
# Finite difference method for 2nd derivatives. Remember that the equation with an effective
# mass is:
# d/dx ( 1/m(x) d/dx psi(x)), i.e. 1/m(x) goes inside the first derivative
# I set the coefficient for the 2nd derivative on the diagonal
# mass_differences[1] is the average mass between 0 and 1
H[i,2,:][reordering] += (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences
# mass_differences[1+1] is the average mass between 1 and 2
H[i,2,:][reordering] += (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences[
(n.arange(slab.npoints)+1) % slab.npoints]
# note! The matrix is symmetric only if the mesh step is identical for all steps
# I use 1: in the second index because the upper diagonal has one element less, and
# this is the format required by eig_banded
# Note that this also sets superdiagonal[0] to the correct element that should be at
# the corner of the matrix, at position (n-1, 0)
superdiagonal = - (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences
H[i][reordering_superdiagonals] = superdiagonal
w, v = scipy.linalg.eig_banded(H[i], lower=False, eigvals_only=False,
overwrite_a_band=True, # May enhance performance
select='v', select_range=(min_energy,max_energy),
max_ev=slab.npoints) # max_ev: max # of eigenvalues to expect
# I use the worst case scenario, where I get
# all of them
result_to_reorder = list(zip(w, v.T))
res.append(tuple((w, v[reordering]) for w, v in result_to_reorder))
return res
def get_valence_states_p(slab):
"""
See discussion in get_conduction_states, plus comments here for what has changed
NOTE: _p stands for the periodic version
"""
more_bands_energy = 0.2 # How many eV to to below the bottom of the valence band
# The list containing the tuples that will be returned
res = []
H = n.zeros((slab._nval_max,3, slab.npoints))
rangearray = n.arange(slab.npoints)
reordering = n.zeros(slab.npoints,dtype=int)
reordering[rangearray <= (slab.npoints-1)//2] = (2 * rangearray)[rangearray <= (slab.npoints-1)//2]
reordering[rangearray > (slab.npoints-1)//2] = (
2 * slab.npoints - 2 * rangearray - 1)[rangearray > (slab.npoints-1)//2]
j1_list = reordering[n.arange(slab.npoints)-1]
j2_list = reordering[n.arange(slab.npoints)]
temp_list = j2_list.copy()
indices_to_swap = [j2_list<j1_list]
j2_list[indices_to_swap] = j1_list[indices_to_swap]
j1_list[indices_to_swap] = temp_list[indices_to_swap]
reordering_superdiagonals = ( 2 - (j2_list - j1_list) , j2_list )
if any((j2_list - j1_list) < 1) or any((j2_list - j1_list) > 2):
raise AssertionError("Wrong indices difference in j1/2_lists (valence)!")
for i in range(slab._nval_max):
H[i,2,:][reordering] = slab.get_valence_profile()[i,:]
min_energy = H[i,2,:].min() - more_bands_energy
max_energy = H[i,2,:].max()
# In the valence bands, it is as if the mass is negative
mass_differences = n.zeros(slab.npoints)
mass_differences[0] = -(slab._valmass[i,0] + slab._valmass[i,-1])/2.
mass_differences[1:] = -(slab._valmass[i,1:] + slab._valmass[i,:-1])/2.
H[i,2,:][reordering] += (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences
H[i,2,:][reordering] += (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences[
(n.arange(slab.npoints)+1) % slab.npoints]
superdiagonal = - (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences
H[i][reordering_superdiagonals] = superdiagonal
w, v = scipy.linalg.eig_banded(H[i], lower=False, eigvals_only=False,
overwrite_a_band=True, # May enhance performance
select='v', select_range=(min_energy,max_energy),
max_ev=slab.npoints) # max_ev: max # of eigenvalues to expect
# I use the worst case scenario, where I get
# all of them
result_to_reorder = list(zip(w, v.T))
res.append(tuple((w, v[reordering]) for w, v in result_to_reorder))
return res
def get_conduction_states_np(slab):
"""
This function diagonalizes the matrix using the fact that the matrix is banded. This provides
a huge speedup w.r.t. to a dense matrix.
NOTE: _np stands for the non-periodic version
"""
more_bands_energy = 0.2 # How many eV to to above the top of the conduction band
# The list containing the tuples that will be returned
res = []
# Ham matrix in eV; I store only the first upper diagonal
# H[1,:] is the diagonal,
# H[0,1:] is the first upper diagonal
H = n.zeros((slab._ncond_min,2, slab.npoints))
for i in range(slab._ncond_min):
# On the diagonal, the potential: sum of the electrostatic potential + conduction band profile
# This is obtained with get_conduction_profile()
H[i,1,:] = slab.get_conduction_profile()[i]
min_energy = H[i,1,:].min()
max_energy = H[i,1,:].max() + more_bands_energy
mass_differences = (slab._condmass[i,1:] + slab._condmass[i,:-1])/2.
# Finite difference method for 2nd derivatives. Remember that the equation with an effective
# mass is:
# d/dx ( 1/m(x) d/dx psi(x)), i.e. 1/m(x) goes inside the first derivative
# I set the coefficient for the 2nd derivative on the diagonal
H[i,1,1:] += (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences
H[i,1,:-1] += (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences
# note! The matrix is symmetric only if the mesh step is identical for all steps
# I use 1: in the second index because the upper diagonal has one element less, and
# this is the format required by eig_banded
H[i,0,1:] = - (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences
w, v = scipy.linalg.eig_banded(H[i], lower=False, eigvals_only=False,
overwrite_a_band=True, # May enhance performance
select='v', select_range=(min_energy,max_energy),
max_ev=slab.npoints) # max_ev: max # of eigenvalues to expect
# I use the worst case scenario, where I get
# all of them
res.append(list(zip(w, v.T)))
return res
def get_valence_states_np(slab):
"""
See discussion in get_conduction_states, plus comments here for what has changed
NOTE: _np stands for the non-periodic version
"""
more_bands_energy = 0.2 # How many eV to to below the bottom of the valence band
# The list containing the tuples that will be returned
res = []
H = n.zeros((slab._nval_max,2, slab.npoints))
for i in range(slab._nval_max):
H[i,1,:] = slab.get_valence_profile()[i]
min_energy = H[i,1,:].min() - more_bands_energy
max_energy = H[i,1,:].max()
# In the valence bands, it is as if the mass is negative
mass_differences = -(slab._valmass[i,1:] + slab._valmass[i,:-1])/2.
H[i,1,1:] += (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences
H[i,1,:-1] += (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences
H[i,0,1:] = - (HBAR2OVERM0/2.) / slab.delta_x**2 / mass_differences
w, v = scipy.linalg.eig_banded(H[i], lower=False, eigvals_only=False,
overwrite_a_band=True, # May enhance performance
select='v', select_range=(min_energy,max_energy),
max_ev=slab.npoints) # max_ev: max # of eigenvalues to expect
# I use the worst case scenario, where I get
# all of them
res.append(list(zip(w, v.T)))
return res
def run_simulation(slab, max_steps, nb_states, smearing, beta_eV, b_lat, delta_x, callback=None):
"""
This function launches the self-consistant Schroedinger--Poisson calculations and returns all the relevant data
If a callback function is passed, it is called with the following kwargs:
callback(step, e_fermi)
"""
it = 0
# I don't want the terminal to be flooded with every single step
converged = False
try:
for iteration in range(max_steps):
if not reduce_stdout_output:
print('starting iteration {}...'.format(iteration))
it += 1
start_t = time.time()
if is_periodic:
c_states = get_conduction_states_p(slab)
v_states = get_valence_states_p(slab)
else:
c_states = get_conduction_states_np(slab)
v_states = get_valence_states_np(slab)
end_t = time.time()
slab.update_computing_times("Hami", end_t-start_t)
start_t = time.time()
e_fermi = find_efermi(c_states, v_states, slab._conddosmass, slab._valdosmass,
slab._conddegen, slab._valdegen, npoints=slab.npoints,
target_net_free_charge=slab.get_required_net_free_charge(),
smearing=smearing, beta_eV=beta_eV)
end_t = time.time()
slab.update_computing_times("Fermi", end_t-start_t)
if not reduce_stdout_output:
print(iteration, e_fermi)
if callback is not None:
callback(step=iteration+1, e_fermi=e_fermi, final=False)
zero_elfield = is_periodic
converged = slab.update_V(c_states, v_states, e_fermi, zero_elfield=zero_elfield)
# slab._slope is in V/ang; the factor to bring it to V/cm
if not reduce_stdout_output:
print('Added E field: {} V/cm '.format(slab._slope * 1.e8))
if converged:
break
except KeyboardInterrupt:
pass
if not converged:
raise InternalError("****** ERROR! Calculation not converged ********")
#should print all results at each step in files
el_density, el_contrib, avg_cond_mass = get_electron_density(c_states, e_fermi, slab._conddosmass, slab.npoints, slab._conddegen, smearing=smearing, beta_eV=beta_eV, band_contribution = True, avg_eff_mass = True)
hole_density, hole_contrib, avg_val_mass = get_hole_density(v_states, e_fermi, slab._valdosmass,slab.npoints, slab._valdegen, smearing=smearing, beta_eV=beta_eV, band_contribution = True, avg_eff_mass =True)
tot_el_dens = n.sum(el_density)
tot_hole_dens = n.sum(hole_density)
tot_el_dens_2 = tot_el_dens * b_lat*1.e-8 # in units of e/b
tot_hole_dens_2 = tot_hole_dens * b_lat*1.e-8 # in units of e/b
el_density_per_cm2 = el_density * 1./(slab.delta_x*1.e-8)
hole_density_per_cm2 = hole_density * 1./(slab.delta_x*1.e-8)
#contribution in %
if tot_el_dens != 0:
el_contrib /= tot_el_dens
if tot_hole_dens != 0:
hole_contrib /= tot_hole_dens
#print "El contrib: ", el_contrib
#print "Hole contrib: ", hole_contrib
bands = {
'x': slab.get_xgrid(),
'e_fermi': e_fermi,
'conduction': [],
'valence': []
}
matrix = [slab.get_xgrid(),n.ones(slab.npoints) * e_fermi]
zoom_factor = 10. # To plot eigenstates
# adding the potential profile of each band
for k in range(slab._ncond_min):
this_edge = {}
i=0
matrix.append(slab.get_conduction_profile()[k])
this_edge['profile'] = slab.get_conduction_profile()[k]
this_edge['states'] = []
for w, v in c_states[k]:
if i >= nb_states:
break
this_edge['states'].append(
{
'energy': w,
'profile': w + zoom_factor * n.abs(v)**2,
})
matrix.append(w + zoom_factor * n.abs(v)**2)
matrix.append(n.ones(slab.npoints) * w)
i+=1
bands['conduction'].append(this_edge)
for l in range(slab._nval_max):
this_edge = {}
j=0
matrix.append(slab.get_valence_profile()[l])
this_edge['profile'] = slab.get_valence_profile()[l]
this_edge['states'] = []
for w, v in v_states[l][::-1]:
if j >= nb_states:
break
# Plot valence bands upside down
this_edge['states'].append(
{
'energy': w,
'profile': w - zoom_factor * n.abs(v)**2,
})
matrix.append(w - zoom_factor * n.abs(v)**2)
matrix.append(n.ones(slab.npoints) * w)
j+= 1
bands['valence'].append(this_edge)
#Keeping the user aware of the time spent on each main task
print("Total time spent solving Poisson equation: ", slab.get_computing_times()[0], " (s)")
print("Total time spent finding the Fermi level(s): ", slab.get_computing_times()[1], " (s)")
print("Total time spent computing the electronic states: ", slab.get_computing_times()[2] , " (s)")
return [[it, slab._finalV_check, slab._finalE_check, delta_x, e_fermi, tot_el_dens, tot_hole_dens ,tot_el_dens_2,tot_hole_dens_2 ],
[matrix, bands],
[slab.get_xgrid(),el_density_per_cm2,hole_density_per_cm2]]
def main_run(matprop, input_dict, callback=None):
"""
Main loop to run the code.
:param matprop: dictionary with the content of the matprop json used in input
:param input_dict: dictionary with the content of the input_dict json used in input
"""
out_files = {}
mat_properties, a_lat, b_lat = read_input_materials_properties(matprop)
# Check and set smearing
smearing = input_dict["smearing"]
KbT = input_dict["KbT"]*13.6 # from Ry to eV, most often this precision is sufficient...
beta_eV = 1./KbT
#calculation type
calculation_type = input_dict["calculation"]
#max number of step for each self-consistent cycle
max_steps = input_dict["max_iteration"]
#do we plot the fits for new strains ?
plot_fit = input_dict["plot_fit"]
if calculation_type == "single_point":
print("\n")
print("Starting single-point calculation...")
#updating the mat_properties dict in case there are non registered strains in the setup
plotting_the_fit = False
if plot_fit == True:
plotting_the_fit = True
for key in input_dict["setup"]:
update_mat_prop_for_new_strain(mat_prop = mat_properties, new_strain = input_dict["setup"][key]["strain"], plot_fit = plotting_the_fit)
plotting_the_fit = False
#constructing the slab
if len(input_dict["setup"]) < 3:
raise ValidationError("Error: There must be at least three entries in the setup subdictionary of json file '%s'" %calc_input)
#the first and last layers must be entered manually
layers_p = []
layers_p.append((str(input_dict["setup"]["slab1"]["strain"]),input_dict["setup"]["slab1"]["width"]))
if input_dict["setup"]["slab1"]["polarization"] == "positive":
layers_p.append((str(input_dict["setup"]["slab1"]["strain"])+"_p_deltadoping",0.0))
else:
layers_p.append((str(input_dict["setup"]["slab1"]["strain"])+"_n_deltadoping",0.0))
for i in range(len(input_dict["setup"])-2):
slab_key = "slab"+str(i+2)
#delta doping before layer
if input_dict["setup"][slab_key]["polarization"] == "positive":
layers_p.append((str(input_dict["setup"][slab_key]["strain"])+"_n_deltadoping",0.0))
else:
layers_p.append((str(input_dict["setup"][slab_key]["strain"])+"_p_deltadoping",0.0))
#actual layer
layers_p.append((str(input_dict["setup"][slab_key]["strain"]),input_dict["setup"][slab_key]["width"]))
#delta doping after the layer
if input_dict["setup"][slab_key]["polarization"] == "positive":
layers_p.append((str(input_dict["setup"][slab_key]["strain"])+"_p_deltadoping",0.0))
else:
layers_p.append((str(input_dict["setup"][slab_key]["strain"])+"_n_deltadoping",0.0))
#the last slab
slab_key = "slab"+str(len(input_dict["setup"]))
if input_dict["setup"][slab_key]["polarization"] == "positive":
layers_p.append((str(input_dict["setup"][slab_key]["strain"])+"_n_deltadoping",0.0))
else:
layers_p.append((str(input_dict["setup"][slab_key]["strain"])+"_p_deltadoping",0.0))
layers_p.append((str(input_dict["setup"][slab_key]["strain"]),input_dict["setup"][slab_key]["width"]))
delta_x = input_dict["delta_x"]
slab = Slab(layers_p, materials_properties=mat_properties, delta_x = delta_x,
smearing=smearing, beta_eV=beta_eV)
res = run_simulation(slab = slab, max_steps = max_steps, nb_states = input_dict["nb_of_states_per_band"], smearing=smearing, beta_eV=beta_eV, b_lat=b_lat, delta_x=delta_x, callback=callback)
print("\n")
print(("Convergence reached after %s iterations." %res[0][0]))
print(("Voltage convergence parameter: %s" % res[0][1]))
print(("Fermi energy convergence parameter: %s" % res[0][2]))
print(("Total number of free electrons: %s (1/cm)" % res[0][5]))
print(("Total number of free holes: %s (1/cm)" % res[0][6]))
print("\n")
#writing results into files
out_files['general_info'] = {
'filename': 'general_info.txt',
'description': "General information",
'data': n.atleast_2d(res[0]),
'header': '1: Nb of iterations, 2: Voltage conv param, 3: Fermi energy conv param, 4: delta_x (ang), 5: Fermi energy (eV), 6: Total free electron density (1/cm), 7: Total free holes density (1/cm), 8: Total free electron density (1/b), 9: Total free holes density (1/b)'
}
out_files['band_data'] = {
'filename': 'band_data.txt',
'description': "Band data",
'data': n.transpose(res[1][0]),
'header': "1: position (ang), 2: Fermi energy (eV), the rest is organized as follow for each band:\n First column is the potential profile of the band (in eV). The next pairs of columns are the wave function and the energy (eV) of the band's states",
'metadata': res[1][1],
}
out_files['density_profile'] = {
'filename': 'density_profile.txt',
'description': "Free-carrier density",
'data': n.transpose(res[2]),
'header': "1: position (ang), 2: Free electrons density (1/cm^2), 3: Free holes density (1/cm^2) "
}
elif calculation_type == "map":
print("\n")
print("Starting map calculation...")
print("\n")
#build arrays containings the strains and the widths
#strain
strain_array = n.arange(input_dict["strain"]["min_strain"],input_dict["strain"]["max_strain"]+0.5*input_dict["strain"]["strain_step"],input_dict["strain"]["strain_step"])
#width
width_array = n.arange(input_dict["width"]["min_width"],input_dict["width"]["max_width"]+0.5*input_dict["width"]["width_step"],input_dict["width"]["width_step"])
#updating the materials properties for the new strains
plotting_the_fit = False
if plot_fit == True:
plotting_the_fit = True
for strain in strain_array:
update_mat_prop_for_new_strain(mat_prop = mat_properties, new_strain = strain, plot_fit = plotting_the_fit)
plotting_the_fit = False
#need to create a slab for each of those situation, run a simulation and retrieve the total carrier density
data = n.zeros((strain_array.size*width_array.size,12))
i = 0
for strain in strain_array:
for width in width_array:
layers_p = [("0.00",width/2.),(str(strain)+"_n_deltadoping",0.0), (str(strain),width*(1.+strain)),
(str(strain)+"_p_deltadoping",0.0), ("0.00",width/2.)]
#set delta_x so that the same number of steps are taken for each situation
delta_x = width * (2. + strain)/input_dict["nb_of_steps"]
if delta_x > input_dict["upper_delta_x_limit"]:
delta_x = input_dict["upper_delta_x_limit"]
slab = Slab(layers_p, materials_properties=mat_properties, delta_x = delta_x,
smearing=smearing, beta_eV=beta_eV)
print("Starting single-point calculation with strain = %s and width = %s ..." %(strain,width))
res = run_simulation(slab = slab, max_steps = max_steps, nb_states = 10, #arbitrary nb of states since not of interest here
smearing = smearing, beta_eV=beta_eV, b_lat=b_lat, delta_x=delta_x,
callback=callback)
print("\n")
data[i] = [strain, width , res[0][7], res[0][8], width*(1.+strain), res[0][0], res[0][1], res[0][2], res[0][3], res[0][4], res[0][5], res[0][6]]
i += 1
#saving the data in a file
out_files['map_data'] = {
'filename': 'map_data.txt',
'description': "Map data",
'data': data,
'header': "1: strain, 2: width of unstrained slab (ang), 3: total electron density (1/b), 4: total holes density (1/b) 5: width of the strained slab (ang), 6: nb of iterations, 7: potential conv param, 8: Fermi energy conv param, 9: delta_x (ang), 10: Fermi energy (eV) 11: total electron density (1/cm), 12: total holes density (1/cm)"
}
else:
raise ValidationError("The Calculation must either be 'single-point' or 'map'")
return {'out_files': out_files}
if __name__ == "__main__":
# Read files
try:
json_matprop = sys.argv[1]
calc_input = sys.argv[2]
except IndexError:
print(("Pass two parameters, containing the JSON files with the materials properties and the calculation input"), file=sys.stderr)
sys.exit(1)
try:
with open(json_matprop) as f:
matprop = json.load(f)
except IOError:
print(("Error: The material properties json file (%s) passed as argument does not exist" % json_matprop), file=sys.stderr)
sys.exit(1)
except ValueError:
print(("Error: The material properties json file (%s) is probably not a valid JSON file" % json_matprop), file=sys.stderr)
sys.exit(1)
try:
with open(calc_input) as f:
input_dict = json.load(f)
except IOError:
print(("Error: The calculation input json file (%s) passed as argument does not exist" % calc_input), file=sys.stderr)
sys.exit(1)
except ValueError:
print(("Error: The material properties json file (%s) is probably not a valid JSON file" % json_matprop), file=sys.stderr)
sys.exit(1)
try:
retval = main_run(matprop=matprop, input_dict=input_dict)
except ValidationError as e:
print("Validation error: {}".format(e), file=sys.stderr)
sys.exit(2)
except InternalError as e:
print("Error: {}".format(e), file=sys.stderr)
sys.exit(3)
out_files = retval['out_files']
#folder where the output data will be printed
out_folder = input_dict["out_dir"]
if out_folder not in os.listdir(os.curdir):
os.mkdir(out_folder)
#writing results into files
for file in sorted(out_files):
filedata = out_files[file]
fname = os.path.join(out_folder, filedata['filename'])
n.savetxt(fname,filedata['data'],
header=filedata['header'])
print("{} saved in '{}/{}'".format(
filedata['description'],
out_folder,
fname,
))
# Disclaimer
print("#"*72)
print("# If you use this code in your work, please cite the following paper:")
print("# ")
print("# A. Bussy, G. Pizzi, M. Gibertini, Strain-induced polar discontinuities")
print("# in 2D materials from combined first-principles and Schroedinger-Poisson")
print("# simulations, Phys. Rev. B 96, 165438 (2017).")
print("# DOI: 10.1103/PhysRevB.96.165438")
print("#"*72)
| Python |
2D | giovannipizzi/SchrPoisson-2DMaterials | code/schrpoisson_wire.f90 | .f90 | 18,289 | 554 | !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! This file contains the main logic for calculating the potential due to a single
!! wire and to an array of wires. These functions (that are the most expensive part
!! of the code) are then called from python.
!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!
!! If you use this code in your work, please cite the following paper:
!!
!! A. Bussy, G. Pizzi, M. Gibertini, Strain-induced polar discontinuities
!! in 2D materials from combined first-principles and Schroedinger-Poisson
!! simulations, Phys. Rev. B 96, 165438 (2017).
!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!
!! This code is released under a MIT license, see LICENSE.txt file in the main
!! folder of the code repository, hosted on GitHub at
!! https://github.com/giovannipizzi/schrpoisson-2dmaterials
!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!
!! FORTRAN code version: 1.0.0
!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!==================================================================
!==================NONPERIODIC FUNCTIONS===========================
subroutine v_wire_nonperiodic(x,lambda,potential)
!! This subroutine computes the unscreened potential due to an infinite (along y)
!! charged wire at a distance x
double precision, intent(in) :: x
!! x is the distance in ang between the wire and the point of interest
double precision, intent(in) :: lambda
!! lambda is the line charged density of the wire in e/cm
double precision, intent(out) :: potential
!! in V
double precision :: lamb
!! lambda in units of e/ang
double precision :: PI, EPSILON0
parameter(PI = 3.1415926535897932d0)
parameter(EPSILON0 = 0.0055263496) ! in e/(V*ang)
!first off, need lambda in e/ang
lamb = lambda*1.E-8
potential = -lamb/(2.*PI*EPSILON0) * LOG(x)
end subroutine v_wire_nonperiodic
subroutine v_array_wire_nonperiodic(coord,x,rho_in,potential,n)
!! this subroutine computes the potential at a point from a charge density
!! (array of wires with different charges)
double precision, intent(in) :: coord
!! coordinate of point of interest along x axis (in ang)
double precision, intent(in), dimension(n) :: x
!! the array of grid points in ang
double precision, intent(in), dimension(n) :: rho_in
!! the charge density in e/cm defined at each grid point
double precision, intent(out) :: potential
!! the potential in V at point coord due to array of wires
integer, intent(in) :: n
!! dimension of all arrays
double precision :: potr
!! temporary storing of potential on the right (for integration)
double precision :: potl
!! temporary storing of potential on the left (for integration)
integer :: i
! numerical integration to get potential
do i=1,n-1 ! up to n-1 because we use trapezoidal method for integration
call v_wire_nonperiodic(ABS(coord-x(i)),rho_in(i),potl)
call v_wire_nonperiodic(ABS(coord-x(i+1)),rho_in(i+1),potr)
potential = potential + 0.5*(potr+potl) !*(x(2)-x(1))
end do
end subroutine v_array_wire_nonperiodic
subroutine full_v_array_wire_nonperiodic(x,rho_in,potential,n)
!! this subroutine computes the potential at each grid point from a charge density
!! (array of wires with diffrent charges)
double precision, intent(in), dimension(n) :: x
!! the array of grid points in ang
double precision, intent(in), dimension(n) :: rho_in
!! the charge density in e/cm defined at each grid point
double precision, intent(out), dimension(n) :: potential
!! the potential in V at each grid point due to array of wires
integer, intent(in) :: n
!! dimension of all arrays
double precision :: coord
double precision :: pot ! for temporary storage of potential
integer :: i
do i=1,n
pot = 0.0 ! because of the += in the v_array_wire_periodic subroutine
coord = (i-1)*(x(2)-x(1))
call v_array_wire_nonperiodic(coord,x,rho_in,pot,n)
potential(i) = pot
end do
end subroutine full_v_array_wire_nonperiodic
subroutine nonperiodic_recursive_poisson(x,rho_in,alpha,max_iteration,potential,rho_out,n)
!! this subroutine solves the screened Poisson equation for 2D materials
!! with different polarizabilities
double precision, intent(in), dimension(n) :: x
!! the array containing the position of all grid points in ang
double precision, intent(in), dimension(n) :: rho_in
!! initial charge density in e/cm defined at each grid point
double precision, intent(in), dimension(n) :: alpha
!! the value of the polarizability at each grid point in e/V
integer, intent(in) :: max_iteration
!! the maximum amount of allowed iterations for the recursive Poisson algorithm
double precision, intent(out), dimension(n) :: potential
!! the solution of recursive Poisson in V defined at each grid point
double precision, intent(out), dimension(n) :: rho_out
!! The total charge density including polarization charges in e/cm at each grid point
integer, intent(in) :: n
!! dimension of all grid-related arrays
integer :: i
integer :: counter, subcounter
integer, dimension(1) :: max_ind
double precision :: beta
double precision :: Vleft
double precision, dimension(2) :: indicator ! used to check if there are oscillations while converging
double precision :: h ! step size in ang
double precision :: diff,diff_old ! allowed difference for convergence tests
double precision, dimension(n) :: old_rho ! induced charge density before algorithm step
double precision, dimension(n) :: new_rho
double precision, dimension(n) :: dV ! the space derivative of the potential at each point
double precision, dimension(n) :: aldV ! product of alpha and dV at each grid point
double precision, dimension(n) :: V ! temporary storage of the potential
logical :: conv
!RESOLUTION
h = x(2)-x(1)
! First off, one computes the unscreened potential due to rho_in
! One computes it a half grid point on the right, mainly to avoid things such as LOG(0)
call full_v_array_wire_nonperiodic(x-0.5*h,rho_in,V,n)
! One computes the derivative of V at grid points using central finite difference
do i=2,n
dV(i) = 1./h * (V(i)-V(i-1))
end do
Vleft = 0.
call v_array_wire_nonperiodic(-0.5*h,x,rho_in,Vleft,n)
! dV(1) = dV(2)
dV(1)= 1./h * (V(1)-Vleft)
aldV = alpha*dV
! now one computes the induced charge density at each point in e/ang
do i=2,n-1
old_rho(i) = 1./(2*h) * (aldV(i+1)-aldV(i-1))
end do
old_rho(1) = 1./h * (aldV(2)-aldV(1))
old_rho(n) = 1./h * (aldV(n)-aldV(n-1))
! change it to e/cm
old_rho = old_rho*h ! e/ang
old_rho = old_rho*1.E8 ! e/cm
! old_rho(1)=old_rho(2)
! old_rho(n)=old_rho(n-1)
! time for recursive algorithm
counter = 0
subcounter = 0
beta =0.5
diff = 10000.
conv = .TRUE.
max_ind = MAXLOC(old_rho)
do while (diff > 1.E-4)
counter = counter +1
subcounter = subcounter +1
diff_old = diff
!print *,counter, diff
if (counter > max_iteration) then
print *, "WARNING: recursive Poisson algorithm does not converge fast enough"
conv = .FALSE.
EXIT
end if
! update the potential including polarization charges
call full_v_array_wire_nonperiodic(x-0.5*h,rho_in+old_rho,V,n)
! doing the same operations as before
do i=2,n
dV(i) = 1./h * (V(i)-V(i-1))
end do
Vleft=0.
call v_array_wire_nonperiodic(-0.5*h,x,rho_in+old_rho,Vleft,n)
dV(1) = 1./h * (V(1)-Vleft)
! dV(1) =dV(2)
aldV = alpha*dV
do i=2,n-1
new_rho(i) = 1./(2*h) * (aldV(i+1)-aldV(i-1))
end do
new_rho(1) = 1./h * (aldV(2)-aldV(1))
new_rho(n) = 1./h * (aldV(n)-aldV(n-1))
! need rho in e/cm
new_rho = new_rho*h ! e/ang
new_rho = new_rho*1.E8 ! e/cm
!new_rho(1)=new_rho(2)
!new_rho(n)=new_rho(n-1)
indicator(MOD(counter, 2)+1) = old_rho(max_ind(1))-new_rho(max_ind(1))
if (indicator(1)*indicator(2) < 0) then !there is an oscillation in the convergence
beta = beta/1.5
new_rho = 0.5*(new_rho+old_rho)
subcounter = 0
end if
if (subcounter > 3) then
beta = beta*2
subcounter = 0
if (beta>1.) then
beta = 1.
end if
end if
diff = ABS(MAXVAL(old_rho-new_rho))/ABS(MAXVAL(rho_in))
old_rho = beta*new_rho + (1.-beta)*old_rho
end do
! need to return a value of the potential defined on grid points, use linear interpolation
do i=2,n
potential(i) = 0.5*(V(i)+V(i-1))
end do
call v_array_wire_nonperiodic(-0.5*h,x,rho_in+old_rho,potential(1),n)
potential(1) = 0.5*(potential(1)+V(1))
rho_out = old_rho + rho_in ! total charge density
!convergence check
call full_v_array_wire_nonperiodic(x-0.5*h,rho_in+old_rho,V,n)
!doing the same operations as before
do i=2,n
dV(i) = 1./h * (V(i)-V(i-1))
end do
dV(1) =dV(2)
aldV = alpha*dV
do i=2,n-1
new_rho(i) = 1./(2*h) * (aldV(i+1)-aldV(i-1))
end do
!need rho in e/cm
new_rho = new_rho*h !e/ang
new_rho = new_rho*1.E8 ! e/cm
new_rho(1)=new_rho(2)
new_rho(n)=new_rho(n-1)
if (conv .eqv. .TRUE.) then
!print *, "Recursive Poisson algorithm converged in", counter, "steps."
!print *, "Convergence check:", ABS(MAXVAL(old_rho-new_rho))/ABS(MAXVAL(rho_in))
else
print *, "Recursive Poisson algorithm could not converge after", max_iteration, "steps."
end if
end subroutine nonperiodic_recursive_poisson
!==================================================================
!====================PERIODIC FUNCTIONS============================
subroutine v_wire_periodic(x,lambda,potential,period)
!! This subroutine computes the unscreend potential due to a periodic array
!! of charged wires at a distance x from one of them
double precision, intent(in) :: x
!! x is the distance in ang between the wire and the point of interest
double precision, intent(in) :: lambda
!! lambda is the line charged density of the wire in e/cm
double precision, intent(out) :: potential
!! in V
double precision, intent(in) :: period
!! periodicity of setup in ang
double precision :: lamb
!! lambda in units of e/ang
double precision :: PI, EPSILON0
parameter(PI = 3.1415926535897932d0)
parameter(EPSILON0 = 0.0055263496) ! in e/(V*ang)
! first off, need lambda in e/ang
lamb = lambda*1.E-8
potential = -lamb/(2.*PI*EPSILON0) * LOG(SIN(PI/period*x))
end subroutine v_wire_periodic
subroutine v_array_wire_periodic(coord,x,rho_in,potential,n)
!! this subroutine computes the potential at a point from a periodic
!! charge density
double precision, intent(in) :: coord
!! coordinate of point of interest along x axis (in ang)
double precision, intent(in), dimension(n) :: x
!! the array of grid points in ang
double precision, intent(in), dimension(n) :: rho_in
!! the charge density in e/cm defined at each grid point
double precision, intent(out) :: potential
!! the potential in V at point coord due to array of wires
integer, intent(in) :: n
!! dimension of all arrays
double precision :: potr
!! temporary storing of potential on the right (for integration)
double precision :: potl
!! temporary storing of potential on the left (for integration)
double precision :: period
!! the period along the x axis in ang
integer :: i
period = x(n)-x(1)
! numerical integration to get potential
do i=1,n-1 ! up to n-1 cuz one uses trapezoidal method for integration
call v_wire_periodic(ABS(coord-x(i)),rho_in(i),potl,period)
call v_wire_periodic(ABS(coord-x(i+1)),rho_in(i+1),potr,period)
potential = potential + 0.5*(potr+potl) !*(x(2)-x(1))
end do
end subroutine v_array_wire_periodic
subroutine full_v_array_wire_periodic(x,rho_in,potential,n)
!! this subroutine computes the potential at each grid point from a
!! charge density (array of wires with diffrent charges)
double precision, intent(in), dimension(n) :: x
!! the array of grid points in ang
double precision, intent(in), dimension(n) :: rho_in
!! the charge density in e/cm defined at each grid point
double precision, intent(out), dimension(n) :: potential
!! the potential in V at each grid point due to array of wires
integer, intent(in) :: n
!! dimension of all arrays
double precision :: coord
double precision :: pot ! for temporary storage of potential
integer :: i
do i=1,n
pot = 0.0 ! because of the += in the v_array_wire_periodic subroutine
coord = (i-1)*(x(2)-x(1))
call v_array_wire_periodic(coord,x,rho_in,pot,n)
potential(i) = pot
end do
end subroutine full_v_array_wire_periodic
subroutine periodic_recursive_poisson(x,rho_in,alpha,max_iteration,potential,rho_out,n)
!! this subroutine solves the screened Poisson equation for 2D materials
!! with diffrent polarizabilities
double precision, intent(in), dimension(n) :: x
!! the array containing the position of all grid points in ang
double precision, intent(in), dimension(n) :: rho_in
!! initial charge density in e/cm defined at each grid point
double precision, intent(in), dimension(n) :: alpha
!! the value of the polarizability at each grid point in e/V
integer, intent(in) :: max_iteration
!! the maximum number of allowed iterations for the recursive Poisson algorithm
double precision, intent(out), dimension(n) :: potential
!! the solution of recursive Poisson in V defined at each grid point
double precision, intent(out), dimension(n) :: rho_out
!! The total charge density including polarization charges in e/cm at each grid point
integer, intent(in) :: n
!! dimension of all grid-related arrays
integer :: i
integer :: counter, subcounter
integer, dimension(1) :: max_ind
double precision :: beta
double precision, dimension(2) :: indicator ! used to check if there are oscillations whilec converging
double precision :: h ! step size in ang
double precision :: diff,diff_old ! allowed difference for convergence tests
double precision, dimension(n) :: old_rho ! induced charge density before algorithm step
double precision, dimension(n) :: new_rho
double precision, dimension(n) :: dV ! the space derivative of the potential at each point
double precision, dimension(n) :: aldV ! product of alpha and dV at each grid point
double precision, dimension(n) :: V !temporar storage of the potential
logical :: conv
h = x(2)-x(1)
!First off, one computes the unscreened potential due to rho_in
!One computes it a half grid point on the right, mainly to avoid things such as LOG(0)
call full_v_array_wire_periodic(x-0.5*h,rho_in,V,n)
V(n)=V(1) !need that because V(n) is not defined (there is a log(0))
! One computes the derivative of V at grid points using central finite difference with periodic BCs
do i=2,n-1
dV(i) = 1./h * (V(i)-V(i-1))
end do
dV(1) = 1./h * (V(1)-V(n-1)) ! because V(n) is out of bounds
dV(n) = dV(1)
aldV = alpha*dV
! now one computes the induced charge density at each point in e/ang, using periodic BCs
do i=2,n-1
old_rho(i) = 1./(2*h) * (aldV(i+1)-aldV(i-1))
end do
old_rho(1)=1./(2*h) * (aldV(2)-aldV(n-1))
old_rho(n)=old_rho(1)
! change it to e/cm
old_rho = old_rho*h ! e/ang
old_rho = old_rho*1.E8 ! e/cm
! we start now the main iterative algorithm
counter = 0
subcounter = 0
beta =0.5
diff = 10000.
conv = .TRUE.
max_ind = MAXLOC(old_rho)
do while (diff > 1.E-4)
counter = counter +1
subcounter = subcounter +1
diff_old = diff
!print *,counter, diff
if (counter > max_iteration) then
print *, "WARNING: recursive Poisson algorithm does not converge fast enough"
conv = .FALSE.
EXIT
end if
! update the potential including polarization charges
call full_v_array_wire_periodic(x-0.5*h,rho_in+old_rho,V,n)
V(n) = V(1)
!doing the same operations as before
do i=2,n-1
dV(i) = 1./h * (V(i)-V(i-1))
end do
dV(1) = 1./h * (V(1)-V(n-1)) ! because V(n) is out of bonds
dV(n) = dV(1)
aldV = alpha*dV
do i=2,n-1
new_rho(i) = 1./(2*h) * (aldV(i+1)-aldV(i-1))
end do
new_rho(1)=1./(2*h) * (aldV(2)-aldV(n-1))
new_rho(n)=new_rho(1)
! need rho in e/cm
new_rho = new_rho*h ! e/ang
new_rho = new_rho*1.E8 ! e/cm
indicator(MOD(counter,2)+1) = old_rho(max_ind(1))-new_rho(max_ind(1))
if (indicator(1)*indicator(2) < 0) then ! there is an oscillation in the convergence
beta = beta/1.5
new_rho = 0.5*(new_rho+old_rho)
subcounter = 0
end if
if (subcounter > 3) then
beta = beta*2
subcounter = 0
if (beta>1.) then
beta = 1.
end if
end if
diff = ABS(MAXVAL(old_rho-new_rho))/ABS(MAXVAL(rho_in))
old_rho = beta*new_rho + (1.-beta)*old_rho
end do
! need to return a value of the potential defined on grid points, use linear interpolation
do i=2,n
potential(i) = 0.5*(V(i)+V(i-1))
end do
potential(1) = potential(n) ! periodic BCs
rho_out = old_rho+rho_in ! total charge density
! convergence check
call full_v_array_wire_periodic(x-0.5*h,rho_in+old_rho,V,n)
V(n) = V(1)
!doing the same operations as before
do i=2,n
dV(i) = 1./h * (V(i)-V(i-1))
end do
dV(1) = 1./h * (V(1)-V(n-1)) ! because V(n) is out of bonds
dV(n) = dV(1)
aldV = alpha*dV
do i=2,n-1
new_rho(i) = 1./(2*h) * (aldV(i+1)-aldV(i-1))
end do
new_rho(1)=1./(2*h) * (aldV(2)-aldV(n-1))
new_rho(n)=new_rho(1)
! need rho in e/cm
new_rho = new_rho*h ! e/ang
new_rho = new_rho*1.E8 ! e/cm
if (conv .eqv. .TRUE.) then
!print *, "Recursive Poisson algorithm converged in", counter, "steps."
!print *, "Convergence check:", ABS(MAXVAL(old_rho-new_rho))/ABS(MAXVAL(rho_in))
else
print *, "Recursive Poisson algorithm could not converge after", max_iteration, "steps."
end if
end subroutine periodic_recursive_poisson
| Fortran |
2D | giovannipizzi/SchrPoisson-2DMaterials | docs/img/create_xsf.py | .py | 701 | 29 | import numpy as np
import ase
import ase.io
x_positions_1 = np.arange(0.0,7.0,3.0)
x_positions_2 = np.arange(10.0,31.0,4.0)
x_positions_3 = np.arange(33.0, 41.0, 3.0)
x_pos = np.append(x_positions_1, x_positions_2)
x_pos = np.append(x_pos,x_positions_3)
cell = ase.Atoms('C'*x_pos.size,
pbc=True,
cell = ( (x_pos[-1],0.,0.),
(0.,3.0,0.),
(0.,0.,15.0)))
cell.set_atomic_numbers([6, 6, 6,
16, 16, 16, 16, 16, 16,
6, 6, 6])
pos = []
for i in range(x_pos.size):
pos.append([x_pos[i],0.0,7.5])
cell.set_positions(pos)
ase.io.write("example.xsf",cell,"xsf")
| Python |
2D | harshaa765/Bilinear-CZM-UMAT | Bilinear_CZM_UMAT.for | .for | 7,183 | 193 | ! Author: @Harshdeep_Sharma
! Affiliation: Indian Institute of Technology, Patna
! Find me @: Material Testing Lab, Block-3
! Lab Incharge: Dr. Akhilendra Singh
! Contact me: harshsharma52@gmail.com
! Desc: Bilinear UMAT (2D/3D Interfacial problems)
! Designed for standard solver and suitable for quasi-static and static problems
!
SUBROUTINE UMAT(STRESS,STATEV,DDSDDE,SSE,SPD,SCD,
1 RPL,DDSDDT,DRPLDE,DRPLDT,
2 STRAN,DSTRAN,TIME,DTIME,TEMP,DTEMP,PREDEF,DPRED,CMNAME,
3 NDI,NSHR,NTENS,NSTATV,PROPS,NPROPS,COORDS,DROT,PNEWDT,
4 CELENT,DFGRD0,DFGRD1,NOEL,NPT,LAYER,KSPT,JSTEP,KINC)
!
INCLUDE 'ABA_PARAM.INC'
!
CHARACTER*80 CMNAME
DIMENSION STRESS(NTENS),STATEV(NSTATV),
1 DDSDDE(NTENS,NTENS),DDSDDT(NTENS),DRPLDE(NTENS),
2 STRAN(NTENS),DSTRAN(NTENS),TIME(2),PREDEF(1),DPRED(1),
3 PROPS(NPROPS),COORDS(3),DROT(3,3),DFGRD0(3,3),DFGRD1(3,3),
4 JSTEP(4)
! ADDITIONAL VECTORS AND TENSORS
DIMENSION DSEC(NTENS,NTENS), DELTAT(NTENS), K_MAT(NTENS,NTENS)
! PARAMETER(MULT_FACTOR=2)
! MATERIAL PROPERTIES FROM INPUT FILE :
A_KN = PROPS(1) !NORMAL PENALTY STIFFNESS
TAU_N = PROPS(2) !NORMAL COHESIVE STRENGTH
TAU_S = PROPS(3) !SHEAR COHESIVE STRENGTH
TAU_T = PROPS(4) !TEAR COHESIVE STRENGTH
G_NC = PROPS(5) !NORMAL FRACTURE TOUGHNESS
G_SC = PROPS(6) !SHEAR FRACTURE TOUGHNESS
G_TC = PROPS(7) !TEAR FRACTURE TOUGHNESS
ETA = PROPS(8) !BK PARAMETER (ENERGY RELEASE RATE)
! SHEAR PENALTY STIFFNESS
A_KS = (G_NC/G_SC)*((TAU_S/TAU_N)**2)*A_KN
IF(NTENS .EQ. 3) THEN
A_KT = (G_NC/G_TC)*((TAU_T/TAU_N)**2)*A_KN
ENDIF
! CONSTRUCT K_MAT
K_MAT(:,:) = 0.
K_MAT(1,1) = A_KN
K_MAT(2,2) = A_KS
IF(NTENS .EQ. 3) THEN
K_MAT(3,3) = A_KS
ENDIF
! RETRIEVING STATE VARIABLES
DMG_OLD = STATEV(1)
! SEPARATION @ {N+1}
Do I = 1, NTENS
DELTAT(I) = STRAN(I) + DSTRAN(I)
Enddo
! NORMAL AND SHEAR SEPARATION @ {N+1}
DELTA_N = DELTAT(1)
DELTA_S = DELTAT(2)
IF(NTENS .EQ. 3) THEN !TEAR SEPARATION @ {N+1}
DELTA_T = DELTAT(3)
ENDIF
! DISCRIMINATE
! CASE I: PURE COMPRESSIVE STATE
IF((NTENS .LT. 3 .AND. DELTA_N .LT. 0. .AND. ABS(DELTA_S) .LE.
& 1.E-8 ) .OR. (NTENS .EQ. 3 .AND. DELTA_N .LT. 0. .AND.
& ABS(DELTA_S) .LE. 1.E-8 .AND. ABS(DELTA_T) .LE. 1.E-8)) THEN
DO I=1,NTENS
STRESS(I) = K_MAT(I,I)*DELTAT(I)
ENDDO
DDSDDE(:,:) = K_MAT(:,:)
ELSE
! CASE II: DELTA_EQ WILL BE GREATER THAN 0
! MIXED-MODE RATIO (ENERGY)
G_SHEAR = A_KS*DELTA_S**2.
G_N = A_KN*MAX(DELTA_N,0.)**2
IF(NTENS .EQ. 3) THEN
G_SHEAR = G_SHEAR + A_KT*DELTA_T**2
ENDIF
G_TOTAL = G_N + G_SHEAR
B = G_S/G_TOTAL
! MIXED-MODE ONSET SEPARATION VALUE
DELTA_NC = TAU_N/A_KN
DELTA_SC = TAU_S/A_KS
A_KM = A_KN*(1.-B) + (A_KS)*B
DELTA_MC = SQRT((A_KN*DELTA_NC**2 + (A_KS*DELTA_SC**2-
& A_KN*DELTA_NC**2)*(B**ETA))/A_KM)
IF(NTENS .EQ. 3) THEN
DELTA_TC = TAU_T/A_KT
A_KM = A_KN*(1.-B) + (A_KS+A_KT)*B
DELTA_MC = SQRT((A_KN*DELTA_NC**2 + (A_KS*DELTA_SC**2+A_KT*
& DELTA_TC**2- A_KN*DELTA_NC**2)*(B**ETA))/A_KM)
ENDIF
! MIXED-MODE FINAL SEPARATION VALUE
DELTA_NF = 2.*G_NC/(A_KN*DELTA_NC)
DELTA_SF = 2.*G_SC/(A_KS*DELTA_SC)
DELTA_MF = ((A_KN*DELTA_NC*DELTA_NF)+((A_KS*DELTA_SC*
& DELTA_SF)-(A_KN*DELTA_NC*DELTA_NF))*(B**ETA))/(A_KM*DELTA_MC)
IF(NTENS .EQ. 3) THEN
DELTA_TF = 2.*G_TC/(A_KT*DELTA_TC)
DELTA_MF = ((A_KN*DELTA_NC*DELTA_NF)+((A_KS*DELTA_SC*DELTA_SF+
& A_KT*DELTA_TC*DELTA_TF)-(A_KN*DELTA_NC*DELTA_NF))*
& (B**ETA))/(A_KM*DELTA_MC)
ENDIF
! MIXED-MODE SEPARATION VALUE AT THE CURRENT INCREMENT
! DELTA_M = SQRT(DELTA_N**2. + DELTA_S**2.)
A_NUM = A_KN*MAX(DELTA_N,0.)**2 + A_KS*DELTA_S**2
A_DENOM = A_KN**2*MAX(DELTA_N,0.)**2 + A_KS**2*DELTA_S**2
IF(NTENS .EQ. 3) THEN
A_NUM = A_KN*MAX(DELTA_N,0.)**2 + A_KS*DELTA_S**2 + A_KT*
& DELTA_T**2
A_DENOM = A_KN**2*MAX(DELTA_N,0.)**2 + A_KS**2*DELTA_S**2+
& A_KT**2*DELTA_T**2
ENDIF
DELTA_M = A_NUM/SQRT(A_DENOM)
! DAMAGE CALCULATION AT THE CURRENT INCREMENT
RT_OLD =(DELTA_MC*DELTA_MF)/(DELTA_MF-DMG_OLD*(DELTA_MF-DELTA_MC))
! RT = MAX(RT_OLD,DELTA_M)
IF(DELTA_M .GT. RT_OLD) THEN
DMG =(DELTA_MF*(DELTA_M-DELTA_MC))/(DELTA_M*(DELTA_MF-DELTA_MC))
IF(DMG .GE. 1.) DMG = 1.
ELSE
DMG = DMG_OLD
ENDIF
! STORING THE STATE VARIABLES FOR THE NEXT INCREMENT
STATEV(1) = DMG
! CALCULATION OF SECANT STIFFNESS MATRIX
DSEC(:,:) = 0.
DSEC(:,:) = (1.-DMG)*K_MAT
! CALCULATION OF TRACTION FOR THE CURRENT INCREMENT (BASED ON SECANT STIFFNESS)
Do I = 1, NTENS
STRESS(I) = 0.
Do J = 1, NTENS
STRESS(I) = STRESS(I) + DSEC(I,J) * DELTAT(J)
Enddo
Enddo
! CALCULATION OF TANGENT STIFFNESS MATRIX
DDSDDE = 0.
IF(DELTA_M .GT. RT_OLD .AND. DELTA_M .LT. DELTA_MF ) THEN
COEFF1 = ((DELTA_MF*DELTA_MC)/((DELTA_MF-DELTA_MC)*DELTA_M**2))
FIRST_TERM = 2.*A_KS*DELTA_S/SQRT(A_DENOM)
SECOND_TERM=-1.*A_NUM*A_KS**2*DELTA_S/(SQRT(A_DENOM)*A_DENOM)
DELTAD_DELTAS = COEFF1*(FIRST_TERM+SECOND_TERM)
IF(NTENS .EQ. 3) THEN
FIRST_TERM = 2.*A_KT*DELTA_T/SQRT(A_DENOM)
SECOND_TERM=-1.*A_NUM*A_KT**2*DELTA_T/(SQRT(A_DENOM)*
& A_DENOM)
DELTAD_DELTAT = COEFF1*(FIRST_TERM+SECOND_TERM)
ENDIF
DDSDDE(2,2) = DSEC(2,2) - A_KS*DELTA_S*DELTAD_DELTAS
IF(NTENS .EQ. 3) THEN
DDSDDE(3,2) = DSEC(3,2) - A_KT*DELTA_T*DELTAD_DELTAS
DDSDDE(2,3) = DSEC(2,3) - A_KS*DELTA_S*DELTAD_DELTAT
DDSDDE(3,3) = DSEC(3,3) - A_KT*DELTA_T*DELTAD_DELTAT
ENDIF
IF(DELTA_N.LT.0.) THEN
DDSDDE(:,1) = DSEC(:,1)
DDSDDE(1,:) = DSEC(1,:)
ELSE
FIRST_TERM = 2.*A_KN*DELTA_N/SQRT(A_DENOM)
SECOND_TERM=-1.*A_NUM*A_KN**2*DELTA_N/(SQRT(A_DENOM)*A_DENOM)
DELTAD_DELTAN = COEFF1*(FIRST_TERM+SECOND_TERM)
DDSDDE(1,1) = DSEC(1,1) - A_KN*DELTA_N*DELTAD_DELTAN
DDSDDE(2,1) = DSEC(2,1) - A_KS*DELTA_S*DELTAD_DELTAN
DDSDDE(1,2) = DSEC(1,2) - A_KN*DELTA_N*DELTAD_DELTAS
IF(NTENS .EQ. 3) THEN
DDSDDE(3,1) = DSEC(3,1) - A_KT*DELTA_T*DELTAD_DELTAN
DDSDDE(1,3) = DSEC(1,3) - A_KN*DELTA_N*DELTAD_DELTAT
ENDIF
ENDIF
ELSE
DDSDDE(:,:) = DSEC(:,:)
ENDIF
ENDIF
RETURN
END
| Unknown |
2D | romankempt/hetbuilder | setup.py | .py | 2,187 | 79 | #!/usr/bin/env python
import re
from setuptools import find_packages
from pathlib import Path
import os
import sys
VERSIONFILE = "hetbuilder/__init__.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
version = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
try:
import skbuild
from skbuild import setup
except ImportError:
print(
"Please update pip, you need pip 10 or greater,\n"
" or you need to install the PEP 518 requirements in pyproject.toml yourself",
file=sys.stderr,
)
raise
print("Scikit-build version:", skbuild.__version__)
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="hetbuilder",
version=version,
author="Roman Kempt",
author_email="roman.kempt@tu-dresden.de",
description="A tool to build heterostructure interfaces based on coincidence lattice theory.",
long_description=long_description,
license="MIT",
url="https://github.com/romankempt/hetbuilder.git",
download_url="https://github.com/romankempt/hetbuilder.git",
packages=find_packages(),
package_data={"": ["*.xyz", "CMakeLists.txt"]},
# package_dir={"": ""},
cmake_install_dir="hetbuilder",
include_package_data=True,
# scripts=["./bin/hetbuilder"],
install_requires=[
"numpy",
"scipy",
"spglib",
"matplotlib",
"ase",
"networkx",
"pretty_errors",
"rich",
"typer",
# "pybind11",
],
classifiers=[
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.11",
"Programming Language :: C++",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Scientific/Engineering :: Physics",
],
cmake_args=[
"-DCMAKE_BUILD_TYPE={}".format("RELEASE"), # not used on MSVC, but no harm,
],
zip_safe=False,
)
| Python |
2D | romankempt/hetbuilder | app/app.py | .py | 1,019 | 53 | from flask import (
Flask,
request,
Response,
redirect,
url_for,
render_template,
jsonify,
send_from_directory,
flash,
)
from flask_cors import CORS
import random
import sys
import json
from hetbuilder.atom_checks import check_atoms
app = Flask(__name__)
app = Flask(__name__, static_url_path="", static_folder="client/public/")
# Path for our main Svelte page
@app.route("/")
def index():
return app.send_static_file("index.html")
# Path for all the static files (compiled JS/CSS, etc.)
@app.route("/<path:path>")
def home(path):
return send_from_directory("client/public", path)
@app.route("/rand")
def hello():
return str(random.randint(0, 100))
@app.route("/post", methods=["POST"])
def post():
if request.method == "POST":
f1 = request.files.get("lower")
f2 = request.files.get("upper")
print(f1, f2)
# data = jsonify(request.get_json())
return '{"foo": 1}'
if __name__ == "__main__":
app.run(debug=True)
| Python |
2D | romankempt/hetbuilder | backend/helper_classes.h | .h | 3,025 | 108 | #pragma once
#include "math_functions.h"
#include <iostream>
class CoincidencePairs
{
public:
int2dvec_t M;
int2dvec_t N;
CoincidencePairs(int2dvec_t &cM, int2dvec_t &cN)
{
M = cM;
N = cN;
};
int score() const;
};
// Two coincidence matrices are qualitatively the same if they have yield the same area.
inline bool operator==(const CoincidencePairs &M1, const CoincidencePairs &M2)
{
int2dvec_t m1 = M1.M;
int2dvec_t m2 = M2.M;
// I have to be careful to only compare M and M, not M and N
int detm1 = m1[0][0] * m1[1][1] - m1[0][1] * m1[1][0];
int detm2 = m2[0][0] * m2[1][1] - m2[0][1] * m2[1][0];
bool same_area = std::abs(detm1 - detm2) < 1e-4;
return same_area;
}
inline bool operator>(const CoincidencePairs &M1, const CoincidencePairs &M2)
{
int score1 = M1.score();
int score2 = M2.score();
return (score1 < score2);
}
inline bool operator<(const CoincidencePairs &M1, const CoincidencePairs &M2)
{
int score1 = M1.score();
int score2 = M2.score();
return (score1 > score2);
}
class pBar
{
public:
double neededProgress;
std::string firstPartOfpBar;
std::string lastPartOfpBar = "]",
pBarFiller = "=",
pBarUpdater = ">";
bool show = false;
pBar(double cneededProgress, std::string cfirstPartOfpBar, bool cshow)
{
neededProgress = cneededProgress;
firstPartOfpBar = cfirstPartOfpBar;
show = cshow;
};
pBar()
{
neededProgress = 100;
firstPartOfpBar = "";
show = false;
};
void update(double newProgress)
{
currentProgress += newProgress;
amountOfFiller = (int)((currentProgress / neededProgress) * (double)pBarLength);
}
void print()
{
if (show)
{
currUpdateVal %= pBarUpdater.length();
std::cout << "\r" // Bring cursor to start of line
<< firstPartOfpBar; // Print out first part of pBar
for (int a = 0; a < amountOfFiller; a++)
{ // Print out current progress
std::cout << pBarFiller;
}
std::cout << pBarUpdater[currUpdateVal];
for (int b = 0; b < pBarLength - amountOfFiller; b++)
{ // Print out spaces
std::cout << " ";
}
std::cout << lastPartOfpBar // Print out last part of progress bar
<< " (" << (int)(100 * (currentProgress / neededProgress)) << "%)" // This just prints out the percent
<< std::flush;
currUpdateVal += 1;
}
}
private:
int amountOfFiller,
pBarLength = 50, // I would recommend NOT changing this
currUpdateVal = 0; // Do not change
double currentProgress = 0; // Do not change
// neededProgress = 100; // I would recommend NOT changing this
}; | Unknown |
2D | romankempt/hetbuilder | backend/helper_classes.cpp | .cpp | 528 | 18 | #include "helper_classes.h"
/**
* Yields a score to allow for sorting coincidence pairs by how "arbitrarily nice" they are.
*
* Prefers coincidence matrices that are symmetric and positive.
*/
int CoincidencePairs::score() const
{
int sum = this->M[0][0] + this->M[0][1] + this->M[1][0] + this->M[1][1];
int positivity = sum > 0;
int offdiagsymm = (this->M[0][1] == this->M[1][0]);
int diagsymm = (this->M[0][0] == this->M[1][1]);
int score = positivity + offdiagsymm + diagsymm;
return score;
}; | C++ |
2D | romankempt/hetbuilder | backend/interface_class.h | .h | 2,414 | 89 | #pragma once
#include "atom_class.h"
/**
* Container class for an interface after the coincidence lattice search.
*
* Contains the bottom layer and top layer in supercell form, as well as the
* stack thereof.
*
* Comparison operators are defined to allow for ordering and comparisons in a set.
*/
class Interface
{
public:
Atoms bottomLayer;
Atoms topLayer;
Atoms stack;
double angle;
int2dvec_t M;
int2dvec_t N;
int spaceGroup;
friend bool operator==(const Interface &c1, const Interface &c2);
friend bool operator>(const Interface &c1, const Interface &c2);
friend bool operator<(const Interface &c1, const Interface &c2);
Interface(Atoms cBottomLayer,
Atoms cTopLayer,
Atoms cStack,
double cAngle,
int2dvec_t cMatrixM,
int2dvec_t cMatrixN,
int cspaceGroup)
{
bottomLayer = cBottomLayer;
topLayer = cTopLayer;
stack = cStack;
angle = cAngle;
M = cMatrixM;
N = cMatrixN;
spaceGroup = cspaceGroup;
};
void set_stack(Atoms &stack)
{
this->stack = stack;
}
void set_spacegroup(int &sg)
{
this->spaceGroup = sg;
}
};
inline bool operator==(const Interface &c1, const Interface &c2)
{
// bool spgmatch = (c1.spaceGroup == c2.spaceGroup);
bool nummatch = (c1.stack.numAtom == c2.stack.numAtom);
// bool anglematch = std::abs(c1.angle - c2.angle) < 1e-4;
double area1 = std::abs(c1.stack.lattice[0][0] * c1.stack.lattice[1][1] - c1.stack.lattice[0][1] * c1.stack.lattice[1][0]);
double area2 = std::abs(c2.stack.lattice[0][0] * c2.stack.lattice[1][1] - c2.stack.lattice[0][1] * c2.stack.lattice[1][0]);
bool areamatch = std::abs(area1 - area2) < 1e-6;
bool equals = (nummatch && areamatch);
bool match = false;
if (equals)
{
Atoms a1 = c1.stack;
Atoms a2 = c2.stack;
bool match = a1.xtalcomp_compare(a2);
}
return (equals && match);
}
inline bool operator==(Interface &c1, Interface &c2)
{
bool match = c1.stack.xtalcomp_compare(c2.stack);
return match;
}
inline bool operator>(const Interface &c1, const Interface &c2)
{
return (c1.stack.numAtom > c2.stack.numAtom);
}
inline bool operator<(const Interface &c1, const Interface &c2)
{
return (c1.stack.numAtom < c2.stack.numAtom);
}
| Unknown |
2D | romankempt/hetbuilder | backend/logging_functions.cpp | .cpp | 2,961 | 121 | #include <vector>
#include <iostream>
#include <map>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "math_functions.h"
#include "logging_functions.h"
typedef std::vector<int> int1dvec_t;
typedef std::vector<double> double1dvec_t;
typedef std::vector<std::vector<int>> int2dvec_t;
typedef std::vector<std::vector<double>> double2dvec_t;
// Prints 1d vector.
template <typename T>
void print_1d_vector(std::vector<T> &vec)
{
for (int i = 0; i < vec.size(); i++)
{
std::cout << "\t" << vec[i] << ' ';
}
std::cout << std::endl;
};
template void print_1d_vector<int>(int1dvec_t &vec);
template void print_1d_vector<double>(double1dvec_t &vec);
// Prints 2d vector.
template <typename T>
void print_2d_vector(const std::vector<std::vector<T>> &vec)
{
for (int i = 0; i < vec.size(); i++)
{
for (int j = 0; j < vec[i].size(); j++)
{
std::cout << "\t" << vec[i][j] << " ";
}
std::cout << std::endl;
}
};
template <typename T>
void print_2d_vector(std::vector<std::vector<T>> &vec)
{
for (int i = 0; i < vec.size(); i++)
{
for (int j = 0; j < vec[i].size(); j++)
{
std::cout << "\t" << vec[i][j] << ' ';
}
std::cout << std::endl;
}
};
template void print_2d_vector<int>(const int2dvec_t &vec);
template void print_2d_vector<double>(const double2dvec_t &vec);
template void print_2d_vector<int>(int2dvec_t &vec);
template void print_2d_vector<double>(double2dvec_t &vec);
// Prints number of OpenMP threads.
void log_number_of_threads()
{
#ifdef _OPENMP
int nthreads, tid;
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
/* Only master thread does this */
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Using %d OpenMP threads.\n", nthreads);
}
} /* All threads join master thread and disband */
#endif
};
// Get number of OpenMP threads.
int get_number_of_threads()
{
int nthreads = 1;
#ifdef _OPENMP
int tid;
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel shared(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
/* Only master thread does this */
if (tid == 0)
{
nthreads = omp_get_num_threads();
}
} /* All threads join master thread and disband */
#endif
return nthreads;
};
// Prints map of single-valued double key with a 2d vector of ints.
void print_map_key_2d_vector(std::map<double, int2dvec_t> const &m)
{
for (auto i = m.begin(); i != m.end(); ++i)
{
std::cout << "Key :" << (*i).first << std::endl;
int2dvec_t matrix = (*i).second;
print_2d_vector<int>(matrix);
std::cout << std::endl;
};
};
| C++ |
2D | romankempt/hetbuilder | backend/math_functions.h | .h | 1,367 | 40 | #pragma once
#include <vector>
#include <array>
#include <cmath>
typedef std::vector<int> int1dvec_t;
typedef std::vector<double> double1dvec_t;
typedef std::vector<std::vector<int>> int2dvec_t;
typedef std::vector<std::vector<double>> double2dvec_t;
template <typename T1, typename T2>
std::vector<T1> basis_2x2_dot_2d_vector(const std::vector<std::vector<T1>> &basis, std::vector<T2> &vec);
template <typename T>
double1dvec_t rotate_2d_vector(std::vector<T> &vec, const double &theta);
template <typename T>
double get_distance(std::vector<T> &Am, std::vector<T> &RBn);
int get_gcd(int a, int b);
int find_gcd(std::vector<int> &arr, int n);
template <typename T1, typename T2>
std::vector<T1> vec1x3_dot_3x3_matrix(std::vector<T1> &a, std::vector<std::vector<T2>> &matrix);
template <typename T1, typename T2>
std::vector<T1> matrix3x3_dot_vec3x1(std::vector<std::vector<T2>> &matrix, std::vector<T1> &a);
template <typename T>
double get_3x3_matrix_determinant(std::vector<std::vector<T>> &mat);
template <typename T>
double2dvec_t invert_3x3_matrix(std::vector<std::vector<T>> &mat);
template <typename T1, typename T2>
std::vector<std::vector<T2>> matrix3x3_dot_matrix3x3(std::vector<std::vector<T1>> &mat1, std::vector<std::vector<T2>> &mat2);
template <typename T>
std::vector<std::vector<T>> transpose_matrix3x3(std::vector<std::vector<T>> &mat); | Unknown |
2D | romankempt/hetbuilder | backend/interface_class.cpp | .cpp | 86 | 6 | #include "interface_class.h"
/**
* score function was moved to CoincidencePairs
*/
| C++ |
2D | romankempt/hetbuilder | backend/atom_class.cpp | .cpp | 8,540 | 256 | #include "logging_functions.h"
#include "math_functions.h"
#include "atom_class.h"
#include "spglib.h"
#include "xtalcomp.h"
#include <algorithm>
// Prints lattice, positions, scaled positions and atomic numbers of atoms object.
void Atoms::print()
{
std::cout << "Lattice: " << std::endl;
print_2d_vector(this->lattice);
std::cout << "Positions: " << std::endl;
print_2d_vector(this->positions);
std::cout << "Scaled Positions: " << std::endl;
double2dvec_t scalpos = get_scaled_positions();
print_2d_vector(scalpos);
std::cout << "Magnetic Moments: '" << std::endl;
for (auto j : this->magmoms)
{
std::cout << "\t" << j << " ";
}
std::cout << std::endl;
std::cout << "Atomic Numbers: " << std::endl;
for (auto j : this->atomic_numbers)
{
std::cout << "\t" << j << " ";
}
std::cout << std::endl;
};
// Returns the index mapping. Mostly useful for debugging the supercell to see which atoms were moved where.
int1dvec_t Atoms::get_index_mapping()
{
return this->indices;
};
// Returns fractional coordinates.
double2dvec_t Atoms::get_scaled_positions()
{
double2dvec_t cell = this->lattice;
double2dvec_t icell = invert_3x3_matrix(cell);
double2dvec_t icellT = transpose_matrix3x3(icell);
double2dvec_t scaled_positions;
for (int row = 0; row < this->positions.size(); row++)
{
double1dvec_t subvec = matrix3x3_dot_vec3x1(icellT, this->positions[row]);
scaled_positions.push_back(subvec);
}
return scaled_positions;
};
// Converts fractional coordinates to cartesian coordinates.
double2dvec_t Atoms::scaled_positions_to_cartesian(double2dvec_t &scalpos)
{
double2dvec_t cell = this->lattice;
double2dvec_t cart_pos;
for (int row = 0; row < scalpos.size(); row++)
{
double1dvec_t subvec = vec1x3_dot_3x3_matrix(scalpos[row], cell);
cart_pos.push_back(subvec);
}
return cart_pos;
};
// Scales cell of atoms object to size of newcell and adjusts cartesian atomic positions.
void Atoms::scale_cell(double2dvec_t &newcell)
{
double2dvec_t scal_pos = this->get_scaled_positions();
this->lattice = newcell;
double2dvec_t cart_pos = this->scaled_positions_to_cartesian(scal_pos);
this->positions = cart_pos;
};
// Overload + operator to add two Atoms objects.
Atoms Atoms::operator+(const Atoms &b)
{
double2dvec_t pos1 = this->positions;
double2dvec_t cell1 = this->lattice;
int1dvec_t numbers1 = this->atomic_numbers;
int1dvec_t indices1 = this->indices;
double1dvec_t magmoms1 = this->magmoms;
for (int row = 0; row < b.numAtom; row++)
{
pos1.push_back(b.positions[row]);
numbers1.push_back(b.atomic_numbers[row]);
indices1.push_back(b.indices[row]);
magmoms1.push_back(b.magmoms[row]);
}
Atoms newAtoms(cell1, pos1, numbers1, indices1, magmoms1);
return newAtoms;
};
// Helper function to convert double2dvec_t lattice to double array for spglib, which is transposed.
void Atoms::lattice_to_spglib_array(double arr[3][3])
{
// transposition
for (unsigned i = 0; (i < 3); i++)
{
for (unsigned j = 0; (j < 3); j++)
{
arr[j][i] = this->lattice[i][j];
}
}
};
// Helper function to convert double2dvec_t positions to double array for spglib.
void Atoms::positions_to_spglib_array(double arr[][3])
{
double2dvec_t scalpos = this->get_scaled_positions();
for (unsigned i = 0; i < scalpos.size(); i++)
{
for (unsigned j = 0; (j < 3); j++)
{
arr[i][j] = scalpos[i][j];
}
}
};
// Helper function to convert int1dvec_t atomic numbers to int array for spglib.
void Atoms::atomic_numbers_to_spglib_types(int arr[])
{
for (unsigned i = 0; (i < this->numAtom); i++)
{
arr[i] = this->atomic_numbers[i];
}
};
// Standardizes unit cell and positions via spglib. Returns spacegroup if successful, otherwise returns 0.
// Does not support magnetic moments. Magnetic moments are lost here.
int Atoms::standardize(int to_primitive, int no_idealize, double symprec, double angle_tolerance)
{
double spglibPos[this->numAtom][3];
positions_to_spglib_array(spglibPos);
double spglibBasis[3][3];
lattice_to_spglib_array(spglibBasis);
int spglibTypes[this->numAtom];
atomic_numbers_to_spglib_types(spglibTypes);
int newNumAtoms = spgat_standardize_cell(spglibBasis,
spglibPos,
spglibTypes,
this->numAtom,
to_primitive,
no_idealize,
symprec,
angle_tolerance);
int spaceGroup;
char symbol[11];
spaceGroup = spgat_get_international(symbol,
spglibBasis,
spglibPos,
spglibTypes,
this->numAtom,
symprec,
angle_tolerance);
if (newNumAtoms != 0)
{
// transposition
for (unsigned i = 0; (i < 3); i++)
{
for (unsigned j = 0; (j < 3); j++)
{
this->lattice[j][i] = spglibBasis[i][j];
}
}
this->numAtom = newNumAtoms;
double2dvec_t spglibScalPos;
int1dvec_t spglibNewTypes(newNumAtoms, 0);
double1dvec_t newMagneticMoments(newNumAtoms, 0.00);
for (unsigned i = 0; (i < newNumAtoms); i++)
{
double1dvec_t subvec = {spglibPos[i][0], spglibPos[i][1], spglibPos[i][2]};
spglibScalPos.push_back(subvec);
spglibNewTypes[i] = spglibTypes[i];
newMagneticMoments[i] = 0.00;
}
double2dvec_t cart_pos = scaled_positions_to_cartesian(spglibScalPos);
this->positions = cart_pos;
this->atomic_numbers = spglibNewTypes;
this->magmoms = newMagneticMoments;
}
return spaceGroup;
}
// Helper function to convert double2dvec_t lattice to single vector for XtalComp.
XcMatrix Atoms::lattice_to_xtalcomp_cell()
{
double arr[3][3];
for (unsigned i = 0; (i < 3); i++)
{
for (unsigned j = 0; (j < 3); j++)
{
arr[i][j] = this->lattice[i][j];
}
}
XcMatrix xtalcomp_cell(arr);
return xtalcomp_cell;
};
// Helper function to convert int1dvec_t atomic numbers to int vector for XtalComp.
std::vector<unsigned int> Atoms::atomic_numbers_to_xtalcomp_types()
{
std::vector<unsigned int> xtalcomp_types(std::begin(this->atomic_numbers), std::end(this->atomic_numbers));
return xtalcomp_types;
};
// Helper function to convert double2dvec_t scaled positions to vector of XcVector positions for XtalComp.
std::vector<XcVector> Atoms::positions_to_xtalcomp_positions()
{
std::vector<XcVector> xtalcomp_pos;
XcVector subvector;
double2dvec_t scalpos = this->get_scaled_positions();
for (const auto &entries : scalpos)
{
subvector.set(entries[0], entries[1], entries[2]);
xtalcomp_pos.push_back(subvector);
}
return xtalcomp_pos;
}
// Wrapper around the XtalComp Comparison Algorithm.
// Added additional check for magnetic moments.
bool Atoms::xtalcomp_compare(Atoms &other)
{
XcMatrix cell1 = this->lattice_to_xtalcomp_cell();
XcMatrix cell2 = other.lattice_to_xtalcomp_cell();
std::vector<XcVector> pos1 = this->positions_to_xtalcomp_positions();
std::vector<XcVector> pos2 = other.positions_to_xtalcomp_positions();
std::vector<unsigned int> types1 = this->atomic_numbers_to_xtalcomp_types();
std::vector<unsigned int> types2 = other.atomic_numbers_to_xtalcomp_types();
// we compare if the sorted magmoms are identical
double1dvec_t magmoms1 = this->magmoms;
double1dvec_t magmoms2 = other.magmoms;
std::sort(magmoms1.begin(), magmoms1.end());
std::sort(magmoms2.begin(), magmoms2.end());
bool match = false;
if (magmoms1 == magmoms2)
{
match = XtalComp::compare(cell1, types1, pos1,
cell2, types2, pos2,
NULL, 0.05, 0.25, false);
// std::cout << "XtalComp returns " << match << std::endl;
}
return match;
} | C++ |
2D | romankempt/hetbuilder | backend/coincidence_algorithm.cpp | .cpp | 11,789 | 355 | #include <set>
// include <chrono>
// include <ctime>
#include <algorithm>
#include "logging_functions.h"
#include "math_functions.h"
#include "atom_class.h"
#include "atom_functions.h"
#include "helper_classes.h"
#include "interface_class.h"
#include "coincidence_algorithm.h"
#ifdef _OPENMP
#include <omp.h>
#endif
typedef std::map<double, std::vector<CoincidencePairs>> angle_dict_t;
/**
* Solves the equation |Am - R(theta)Bn| < tolerance for a given angle theta.
*
* The results are stored in a 2d vector of integers containing m1, m2, n1, n2.
* OpenMP is employed to distribute the nested loops on threads, but an ordered construct
* has to be used to push back the vector for thread safety.
*
* The case of m1 = m2 = n1 = n2 is already removed, including the null vector.
*/
int2dvec_t CoincidenceAlgorithm::find_coincidences(double2dvec_t &A, double2dvec_t &B, double &theta, int &Nmin, int &Nmax, double &tolerance)
{
int2dvec_t coincidences;
#pragma omp parallel for default(none) shared(A, B, theta, Nmin, Nmax, tolerance, coincidences) schedule(static) ordered collapse(4)
for (int i = -Nmax; i < (Nmax + 1); i++)
{
for (int j = -Nmax; j < (Nmax + 1); j++)
{
for (int k = -Nmax; k < (Nmax + 1); k++)
{
for (int l = -Nmax; l < (Nmax + 1); l++)
{
if ((std::abs(i) >= Nmin) && (std::abs(j) >= Nmin) && (std::abs(k) >= Nmin) && (std::abs(l) >= Nmin))
{
int1dvec_t vecM = {i, j};
int1dvec_t vecN = {k, l};
double1dvec_t Am;
double1dvec_t Bn;
double1dvec_t RBn;
double norm;
int match;
bool all_equal;
Am = basis_2x2_dot_2d_vector<double, int>(A, vecM);
Bn = basis_2x2_dot_2d_vector<double, int>(B, vecN);
RBn = rotate_2d_vector<double>(Bn, theta);
norm = get_distance<double>(Am, RBn);
match = norm < tolerance;
all_equal = (i == j) && (j == k) && (k == l);
if (match && !all_equal)
{
int1dvec_t row = {i, j, k, l};
#pragma omp ordered
coincidences.push_back(row);
}
}
}
}
}
}
if (coincidences.size() > 0)
{
return coincidences;
}
else
{
return {};
};
};
/**
* Constructs the independent pairs (m1,m2,m3,m4) and (n1,n2,n3,n4).
*
* I'm not a 100 % sure if I should iterate over all i j or only j > i.
*
* All pairs with an absolute greatest common divisor different from 1 are removed,
* because they correspond to scalar multiples of other smaller super cells.
*/
int2dvec_t CoincidenceAlgorithm::find_unique_pairs(int2dvec_t &coincidences)
{
int2dvec_t uniquePairs;
#pragma omp parallel for shared(uniquePairs, coincidences) schedule(static) ordered collapse(2)
for (int i = 0; i < coincidences.size(); i++)
{
for (int j = 0; j < coincidences.size(); j++)
{
int m1 = coincidences[i][0];
int m2 = coincidences[i][1];
int n1 = coincidences[i][2];
int n2 = coincidences[i][3];
int m3 = coincidences[j][0];
int m4 = coincidences[j][1];
int n3 = coincidences[j][2];
int n4 = coincidences[j][3];
int detM = m1 * m4 - m2 * m3;
int detN = n1 * n4 - n2 * n3;
if ((detM > 0) && (detN > 0))
{
int1dvec_t subvec{m1, m2, m3, m4, n1, n2, n3, n4};
int gcd = find_gcd(subvec, 8);
if (abs(gcd) == 1)
{
#pragma omp ordered
uniquePairs.push_back(subvec);
};
};
};
};
// return {} if no unique pairs were found
if (uniquePairs.size() > 0)
{
return uniquePairs;
}
else
{
return {};
}
};
/**
* Reduces the number of unique pairs by filtering out the pairs with the same determinant.
*
* Pairs with positive, symmetric entries are preferred.
*/
angle_dict_t CoincidenceAlgorithm::reduce_unique_pairs(std::map<double, int2dvec_t> &AnglesMN, pBar &bar)
{
angle_dict_t fAnglesMN;
for (auto i = AnglesMN.begin(); i != AnglesMN.end(); ++i)
{
double theta = (*i).first;
int2dvec_t pairs = (*i).second;
std::vector<CoincidencePairs> CoinPairs;
for (int j = 0; j < pairs.size(); j++)
{
int1dvec_t row = pairs[j];
int2dvec_t M = {{row[0], row[1], 0},
{row[2], row[3], 0},
{0, 0, 1}};
int2dvec_t N = {{row[4], row[5], 0},
{row[6], row[7], 0},
{0, 0, 1}};
CoincidencePairs pair(M, N);
CoinPairs.push_back(pair);
}
std::set<CoincidencePairs> s(CoinPairs.begin(), CoinPairs.end());
std::vector<CoincidencePairs> v(s.begin(), s.end());
fAnglesMN.insert(std::make_pair(theta, v));
bar.update(1);
bar.print();
}
return fAnglesMN;
};
/**
* Builds all supercells, applying the supercell matrices M and N and the Rotation R(theta).
*
* The unit cell of the stack (interface) is given bei C = A + weight * (B - A).
* The interfaces are standardized via spglib for the given symprec and angle_tolerance.
* The loop over the supecell generation and standardization is OpenMP parallel.
*
* Returns a vector of interfaces.
*/
std::vector<Interface> CoincidenceAlgorithm::build_all_supercells(Atoms &bottom, Atoms &top, angle_dict_t &AnglesMN,
double &weight, double &distance, pBar &bar)
{
std::vector<Interface> stacks;
for (auto i = AnglesMN.begin(); i != AnglesMN.end(); ++i)
{
double theta = (*i).first;
std::vector<CoincidencePairs> pairs = (*i).second;
#pragma omp parallel for shared(bottom, top, stacks, AnglesMN, theta, pairs) schedule(static) ordered collapse(1)
for (int j = 0; j < pairs.size(); j++)
{
CoincidencePairs row = pairs[j];
int2dvec_t M = row.M;
int2dvec_t N = row.N;
Atoms bottomLayer = make_supercell(bottom, M);
Atoms topLayer = make_supercell(top, N);
Atoms topLayerRot = rotate_atoms_around_z(topLayer, theta);
Atoms interface = stack_atoms(bottomLayer, topLayerRot, weight, distance);
Interface stack(bottomLayer, topLayerRot, interface, theta, M, N, 1);
#pragma omp ordered
stacks.push_back(stack);
};
bar.update(1);
bar.print();
};
return stacks;
};
/**
* Filters the interfaces.
*
* Interfaces are considered equal if their spacegroup, area and number of atoms matches.
* If not, an XtalComp equivalence check is performed.
*
* Returns a vector of interfaces.
*/
std::vector<Interface> CoincidenceAlgorithm::filter_supercells(std::vector<Interface> &stacks, pBar &bar)
{
std::set<Interface> s;
std::vector<Interface> v;
for (int i = 0; i < stacks.size(); i++)
{
s.insert(stacks[i]);
bar.update(1);
bar.print();
}
v.assign(s.begin(), s.end());
return v;
};
/**
* Executes the coincidence lattice search algorithm for given parameters.
*/
std::vector<Interface> CoincidenceAlgorithm::run(int Nmax,
int Nmin,
double1dvec_t angles,
double tolerance,
double weight,
double distance,
bool standardize,
int no_idealize,
double symprec,
double angle_tolerance,
int verbose)
{
int2dvec_t coincidences;
std::map<double, int2dvec_t> AnglesMN;
// basis is transposed
double2dvec_t basisA = {{this->primitive_bottom.lattice[0][0], this->primitive_bottom.lattice[1][0]}, {this->primitive_bottom.lattice[0][1], this->primitive_bottom.lattice[1][1]}};
double2dvec_t basisB = {{this->primitive_top.lattice[0][0], this->primitive_top.lattice[1][0]}, {this->primitive_top.lattice[0][1], this->primitive_top.lattice[1][1]}};
std::vector<Interface> stacks;
pBar bar;
std::string info;
if (verbose > 0)
{
info = "\t Angle Search \t \t [";
bar = pBar(angles.size(), info, true);
}
for (int i = 0; i < angles.size(); i++)
{
double theta = angles[i];
coincidences = find_coincidences(basisA, basisB, theta, Nmin, Nmax, tolerance);
if (coincidences.size() > 0)
{
int2dvec_t uniquePairs;
uniquePairs = find_unique_pairs(coincidences);
if (uniquePairs.size() > 0)
{
AnglesMN.insert(std::make_pair(theta, uniquePairs));
}
};
if (verbose > 0)
{
bar.update(1);
bar.print();
}
};
if (verbose > 0)
std::cout << std::endl;
if (AnglesMN.size() > 0)
{
angle_dict_t fAnglesMN;
if (
verbose > 0)
{
info = "\t Coincidence Pairs \t [";
bar = pBar(AnglesMN.size(), info, true);
}
fAnglesMN = reduce_unique_pairs(AnglesMN, bar);
if (verbose > 0)
std::cout << std::endl;
if (verbose > 0)
{
info = "\t Building Supercells \t [";
bar = pBar(fAnglesMN.size(), info, true);
}
stacks = build_all_supercells(this->primitive_bottom,
this->primitive_top,
fAnglesMN,
weight,
distance,
bar);
if (verbose > 0)
std::cout << std::endl;
}
else
{
return {};
}
if (stacks.size() > 0)
{
if (verbose > 0)
{
info = "\t Removing Duplicates \t [";
bar = pBar(stacks.size(), info, true);
}
int startsize = stacks.size();
stacks = filter_supercells(stacks, bar);
if (verbose > 0)
std::cout << std::endl;
int endsize = stacks.size();
if (verbose > 1)
std::cout << "\t Removed " << startsize - endsize << " duplicates from " << startsize << " stacks." << std::endl;
}
if (standardize)
{
if (verbose > 0)
{
info = "\t Standardizing \t \t [";
bar = pBar(stacks.size(), info, true);
}
for (int i = 0; i < stacks.size(); i++)
{
Interface interface = stacks[i];
Atoms stack = interface.stack;
int sg = stack.standardize(1, no_idealize, symprec, angle_tolerance);
interface.set_stack(stack);
interface.set_spacegroup(sg);
stacks[i] = interface;
bar.update(1);
bar.print();
}
if (verbose > 0)
std::cout << std::endl;
};
return stacks;
}; | C++ |
2D | romankempt/hetbuilder | backend/coincidence_algorithm.h | .h | 1,768 | 51 | #pragma once
#include "logging_functions.h"
#include "math_functions.h"
#include "atom_class.h"
#include "atom_functions.h"
#include "helper_classes.h"
#include "interface_class.h"
typedef std::map<double, std::vector<CoincidencePairs>> angle_dict_t;
/**
* Class definition of the lattice coincidence algorithm.
*
* Executed by the run() method.
*/
class CoincidenceAlgorithm
{
public:
Atoms primitive_bottom;
Atoms primitive_top;
CoincidenceAlgorithm(Atoms cPrimitiveBottom,
Atoms cPrimitiveTop)
{
primitive_bottom = cPrimitiveBottom;
primitive_top = cPrimitiveTop;
};
int2dvec_t find_coincidences(double2dvec_t &A, double2dvec_t &B, double &theta, int &Nmin, int &Nmax, double &tolerance);
int2dvec_t find_unique_pairs(int2dvec_t &coincidences);
angle_dict_t reduce_unique_pairs(std::map<double, int2dvec_t> &AnglesMN, pBar &bar);
std::vector<Interface> build_all_supercells(Atoms &bottom, Atoms &top, angle_dict_t &AnglesMN, double &weight, double &distance, pBar &bar);
std::vector<Interface> filter_supercells(std::vector<Interface> &stacks, pBar &bar);
std::vector<Interface> run(int cNmax = 10,
int cNmin = 0,
double1dvec_t cAngles = {0.0, 30.0, 60.0, 90.0},
double cTolerance = 0.01,
double cWeight = 0.5,
double cDistance = 4.0,
bool cStandardize = true,
int cNoIdealize = 0,
double cSymPrec = 1e-5,
double cAngleTolerance = 5.0,
int verbose = 0);
};
| Unknown |
2D | romankempt/hetbuilder | backend/atom_functions.cpp | .cpp | 9,021 | 288 | #include <cmath>
#include "math_functions.h"
#include "logging_functions.h"
#include "atom_class.h"
#include "atom_functions.h"
using std::sin, std::cos, std::sqrt, std::pow, std::abs;
/**
* Find all lattice points contained in a supercell.
Adapted from pymatgen, which is available under MIT license:
The MIT License (MIT) Copyright (c) 2011-2012 MIT & The Regents of the
University of California, through Lawrence Berkeley National Laboratory
*/
double2dvec_t lattice_points_in_supercell(int2dvec_t &superCellMatrix)
{
int2dvec_t diagonals = {{0, 0, 0},
{0, 0, 1},
{0, 1, 0},
{0, 1, 1},
{1, 0, 0},
{1, 0, 1},
{1, 1, 0},
{1, 1, 1}};
int2dvec_t dpoints;
int1dvec_t dotproduct;
for (int row = 0; row < diagonals.size(); row++)
{
dotproduct = vec1x3_dot_3x3_matrix<int, int>(diagonals[row], superCellMatrix);
dpoints.push_back(dotproduct);
}
int1dvec_t mins = {0, 0, 0};
int1dvec_t maxes = {0, 0, 0};
for (int row = 0; row < dpoints.size(); row++)
{
for (int j = 0; j < 3; j++)
{
if (dpoints[row][j] < mins[j])
{
mins[j] = dpoints[row][j];
}
if (dpoints[row][j] > maxes[j])
{
maxes[j] = dpoints[row][j];
}
}
}
maxes = {maxes[0] + 1, maxes[1] + 1, maxes[2] + 1};
int2dvec_t ar, br, cr;
int1dvec_t subvec(3, 0);
for (int a = mins[0]; a < maxes[0]; a++)
{
subvec = {a, 0, 0};
ar.push_back(subvec);
}
for (int b = mins[1]; b < maxes[1]; b++)
{
subvec = {0, b, 0};
br.push_back(subvec);
}
for (int c = mins[2]; c < maxes[2]; c++)
{
subvec = {0, 0, c};
cr.push_back(subvec);
}
int2dvec_t allpoints;
for (int i = 0; i < ar.size(); i++)
{
for (int j = 0; j < br.size(); j++)
{
for (int k = 0; k < cr.size(); k++)
{
subvec[0] = ar[i][0] + br[j][0] + cr[k][0];
subvec[1] = ar[i][1] + br[j][1] + cr[k][1];
subvec[2] = ar[i][2] + br[j][2] + cr[k][2];
allpoints.push_back(subvec);
}
}
}
// convert integer matrix to doubles
double2dvec_t allpoints_double;
for (int row = 0; row < allpoints.size(); row++)
{
double1dvec_t doubleVec(allpoints[row].begin(), allpoints[row].end());
allpoints_double.push_back(doubleVec);
};
double determinant = get_3x3_matrix_determinant<int>(superCellMatrix);
double2dvec_t invSuperCellMatrix = invert_3x3_matrix<int>(superCellMatrix);
double2dvec_t fracpoints;
std::vector<double> dp;
for (int row = 0; row < allpoints.size(); row++)
{
dp = vec1x3_dot_3x3_matrix<double, double>(allpoints_double[row], invSuperCellMatrix);
fracpoints.push_back(dp);
}
double2dvec_t tvects;
double fa, fb, fc;
double1dvec_t fvec;
for (int row = 0; row < fracpoints.size(); row++)
{
fa = fracpoints[row][0];
fb = fracpoints[row][1];
fc = fracpoints[row][2];
if ((fa <= (1 - 1e-10) && (fa >= (-1e-10))) && (fb <= (1 - 1e-10) && (fb >= (-1e-10))) && (fc <= (1 - 1e-10) && (fc >= (-1e-10))))
{
fvec = {fa, fb, fc};
tvects.push_back(fvec);
}
}
try
{
int detsize = (int)determinant;
if (detsize != tvects.size())
{
throw "Determinant of supercell does not match number of lattice points.";
}
}
catch (const char *msg)
{
std::cout << msg << std::endl;
tvects = {};
}
return tvects;
};
/**
* Generate a supercell by applying a SuperCellMatrix to
the input atomic configuration prim.
Indices of the supercell atom map to the indices of the primitive cell for later use.
*/
Atoms make_supercell(Atoms &prim, int2dvec_t &superCellMatrix)
{
double2dvec_t fracpoints = lattice_points_in_supercell(superCellMatrix);
double2dvec_t cell = prim.lattice;
double2dvec_t supercell = matrix3x3_dot_matrix3x3<int, double>(superCellMatrix, cell);
double2dvec_t lattice_points;
double1dvec_t dotproduct;
for (int row = 0; row < fracpoints.size(); row++)
{
dotproduct = vec1x3_dot_3x3_matrix<double, double>(fracpoints[row], supercell);
lattice_points.push_back(dotproduct);
}
double2dvec_t new_positions = prim.positions;
int1dvec_t new_numbers = prim.atomic_numbers;
int1dvec_t index_mapping = prim.indices;
double1dvec_t new_magmoms = prim.magmoms;
for (int i = 0; i < prim.numAtom; i++)
{
double1dvec_t atom_pos = prim.positions[i];
int number = prim.atomic_numbers[i];
int index = prim.indices[i];
double magmom = prim.magmoms[i];
for (int row = 0; row < lattice_points.size(); row++)
{
double1dvec_t lp = lattice_points[row];
if ((std::abs(lp[0]) + std::abs(lp[1]) + std::abs(lp[2])) > 1e-6)
{
for (int k = 0; k < 3; k++)
{
lp[k] += atom_pos[k];
}
new_positions.push_back(lp);
new_numbers.push_back(number);
index_mapping.push_back(index);
new_magmoms.push_back(magmom);
}
}
}
Atoms superatoms = {supercell, new_positions, new_numbers, index_mapping, new_magmoms};
return superatoms;
};
// Rotates Atoms object around z-axis for given angle theta in degrees.
Atoms rotate_atoms_around_z(Atoms &atoms, double &theta)
{
double t = M_PI * theta / 180.0;
double c = std::cos(t);
double s = std::sin(t);
double2dvec_t R = {{c, -s, 0}, {s, c, 0}, {0, 0, 1}};
double2dvec_t positions = atoms.positions;
double2dvec_t rotPositions;
for (int row = 0; row < positions.size(); row++)
{
double1dvec_t vec = positions[row];
double1dvec_t rotvec = {0.0, 0.0, 0.0};
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
rotvec[i] += (R[i][j] * vec[j]);
}
}
rotPositions.push_back(rotvec);
}
double2dvec_t tCell = transpose_matrix3x3<double>(atoms.lattice);
double2dvec_t rotCellTranspose = matrix3x3_dot_matrix3x3(R, tCell);
double2dvec_t rotCell = transpose_matrix3x3<double>(rotCellTranspose);
Atoms rotatoms(rotCell, rotPositions, atoms.atomic_numbers, atoms.indices, atoms.magmoms);
return rotatoms;
};
// Translates Atoms object along z-direction for given shift.
void translate_atoms_z(Atoms &atoms, double shift)
{
double2dvec_t pos1 = atoms.positions;
double2dvec_t new_pos;
for (int row = 0; row < atoms.numAtom; row++)
{
double1dvec_t subvec = {pos1[row][0],
pos1[row][1],
pos1[row][2] + shift};
new_pos.push_back(subvec);
};
atoms.positions = new_pos;
};
// Helper function to get minimum and maximum z values of positions.
std::tuple<double, double> get_min_max_z(Atoms &atoms)
{
double min_z = atoms.positions[0][2];
double max_z = atoms.positions[atoms.numAtom - 1][2];
for (int row = 0; row < atoms.numAtom; row++)
{
double z = atoms.positions[row][2];
if (z < min_z)
{
min_z = z;
}
if (z > max_z)
{
max_z = z;
}
}
return std::make_tuple(min_z, max_z);
};
/**
* Stacks two Atoms objects on top of each other with an interlayer distance given by distance.
*
* Returns a new Atoms object.
*
* The new unit cell is given by C = A + weight * (B - A).
*/
Atoms stack_atoms(Atoms bottom, Atoms top, double &weight, double &distance)
{
// need to make sure that both cells have the same initial c length (probably from python)
auto [min_z1, max_z1] = get_min_max_z(bottom);
auto [min_z2, max_z2] = get_min_max_z(top);
translate_atoms_z(bottom, -min_z1);
double bottom_thickness = max_z1 - min_z1;
double top_thickness = max_z2 - min_z2;
double shift = -min_z2 + bottom_thickness + distance;
translate_atoms_z(top, shift);
double2dvec_t latticeA = bottom.lattice;
double2dvec_t latticeB = top.lattice;
double2dvec_t newcell(3, std::vector<double>(3, 0));
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
newcell[i][j] = latticeA[i][j] + weight * (latticeB[i][j] - latticeA[i][j]);
}
}
newcell[2][2] = bottom.lattice[2][2];
bottom.scale_cell(newcell);
top.scale_cell(newcell);
Atoms stack = bottom + top;
stack.lattice[2][2] = bottom_thickness + top_thickness + distance + 50.0;
return stack;
}; | C++ |
2D | romankempt/hetbuilder | backend/math_functions.cpp | .cpp | 6,428 | 189 | #include <cmath>
#include <vector>
#include <array>
#include <set>
#include <map>
#include "math_functions.h"
using std::sin, std::cos, std::sqrt, std::pow, std::abs;
typedef std::vector<int> int1dvec_t;
typedef std::vector<double> double1dvec_t;
typedef std::vector<std::vector<int>> int2dvec_t;
typedef std::vector<std::vector<double>> double2dvec_t;
// Function to return matrix vector product of basis A(2,2) with vector(2)
template <typename T1, typename T2>
std::vector<T1> basis_2x2_dot_2d_vector(const std::vector<std::vector<T1>> &basis, std::vector<T2> &vec)
{
std::vector<T1> result;
for (int i = 0; i < 2; i++)
{
result.push_back(basis[i][0] * vec[0] + basis[i][1] * vec[1]);
}
return result;
};
template int1dvec_t basis_2x2_dot_2d_vector<int, int>(const int2dvec_t &basis, int1dvec_t &vec);
template double1dvec_t basis_2x2_dot_2d_vector<double, int>(const double2dvec_t &basis, int1dvec_t &vec);
template double1dvec_t basis_2x2_dot_2d_vector<double, double>(const double2dvec_t &basis, double1dvec_t &vec);
// Function to rotate vector(2) by angle theta in degrees
template <typename T>
double1dvec_t rotate_2d_vector(std::vector<T> &vec, const double &theta)
{
double1dvec_t result = {0.0, 0.0};
double t = theta * M_PI / 180.0;
double R[2][2] = {{cos(t), -sin(t)}, {sin(t), cos(t)}};
result[0] = R[0][0] * vec[0] + R[0][1] * vec[1];
result[1] = R[1][0] * vec[0] + R[1][1] * vec[1];
return result;
};
template double1dvec_t rotate_2d_vector<int>(int1dvec_t &vec, const double &theta);
template double1dvec_t rotate_2d_vector<double>(double1dvec_t &vec, const double &theta);
// Returns distance |Am - RBn|
template <typename T>
double get_distance(std::vector<T> &Am, std::vector<T> &RBn)
{
double norm;
norm = (Am[0] - RBn[0]) * (Am[0] - RBn[0]);
norm += (Am[1] - RBn[1]) * (Am[1] - RBn[1]);
norm = sqrt(norm);
return norm;
};
template double get_distance<int>(int1dvec_t &Am, int1dvec_t &RBn);
template double get_distance<double>(double1dvec_t &Am, double1dvec_t &RBn);
// Function to return gcd of a and b
int get_gcd(int a, int b)
{
if (a == 0)
return b;
return get_gcd(b % a, a);
}
// Function to find gcd of array of numbers
int find_gcd(int1dvec_t &arr, int n)
{
int result = arr[0];
for (int i = 1; i < n; i++)
{
result = get_gcd(arr[i], result);
if (result == 1)
{
return 1;
}
}
return result;
}
// Function to perform dot product of row vector(3) times matrix(3,3)
template <typename T1, typename T2>
std::vector<T1> vec1x3_dot_3x3_matrix(std::vector<T1> &a, std::vector<std::vector<T2>> &matrix)
{
std::vector<T1> b(3, 0);
for (int i = 0; i < a.size(); i++)
{
b[i] = a[0] * matrix[0][i] + a[1] * matrix[1][i] + a[2] * matrix[2][i];
}
return b;
};
template int1dvec_t vec1x3_dot_3x3_matrix<int, int>(int1dvec_t &a, int2dvec_t &matrix);
template double1dvec_t vec1x3_dot_3x3_matrix<double, int>(double1dvec_t &a, int2dvec_t &matrix);
template double1dvec_t vec1x3_dot_3x3_matrix<double, double>(double1dvec_t &a, double2dvec_t &matrix);
// Function to perform dot product of matrix(3,3) times column vector
template <typename T1, typename T2>
std::vector<T1> matrix3x3_dot_vec3x1(std::vector<std::vector<T2>> &matrix, std::vector<T1> &a)
{
std::vector<T1> b(3, 0);
for (int i = 0; i < a.size(); i++)
{
b[i] = a[0] * matrix[i][0] + a[1] * matrix[i][1] + a[2] * matrix[i][2];
}
return b;
};
template int1dvec_t matrix3x3_dot_vec3x1<int, int>(int2dvec_t &matrix, int1dvec_t &a);
template double1dvec_t matrix3x3_dot_vec3x1<double, int>(int2dvec_t &matrix, double1dvec_t &a);
template double1dvec_t matrix3x3_dot_vec3x1<double, double>(double2dvec_t &matrix, double1dvec_t &a);
// Function to get determinant of 3x3 matrix
template <typename T>
double get_3x3_matrix_determinant(std::vector<std::vector<T>> &mat)
{
double determinant = 0;
// finding determinant
for (int i = 0; i < 3; i++)
determinant = determinant + (mat[0][i] * (mat[1][(i + 1) % 3] * mat[2][(i + 2) % 3] - mat[1][(i + 2) % 3] * mat[2][(i + 1) % 3]));
return determinant;
};
template double get_3x3_matrix_determinant<int>(int2dvec_t &mat);
template double get_3x3_matrix_determinant<double>(double2dvec_t &mat);
// Function to get inverse of 3x3 matrix
template <typename T>
double2dvec_t invert_3x3_matrix(std::vector<std::vector<T>> &mat)
{
double determinant = get_3x3_matrix_determinant(mat);
double2dvec_t minv(3, std::vector<double>(3, 0)); // inverse of matrix m
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
minv[i][j] = ((mat[(j + 1) % 3][(i + 1) % 3] * mat[(j + 2) % 3][(i + 2) % 3]) - (mat[(j + 1) % 3][(i + 2) % 3] * mat[(j + 2) % 3][(i + 1) % 3])) / determinant;
}
return minv;
}
template double2dvec_t invert_3x3_matrix<int>(int2dvec_t &mat);
template double2dvec_t invert_3x3_matrix<double>(double2dvec_t &mat);
/**
* This function multiplies two 3x3 matrices and returns a 3x3 matrix.
*/
template <typename T1, typename T2>
std::vector<std::vector<T2>> matrix3x3_dot_matrix3x3(std::vector<std::vector<T1>> &mat1, std::vector<std::vector<T2>> &mat2)
{
std::vector<std::vector<T2>> res(3, std::vector<T2>(3, 0));
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
for (int k = 0; k < 3; k++)
res[i][j] += mat1[i][k] * mat2[k][j];
}
}
return res;
};
template int2dvec_t matrix3x3_dot_matrix3x3<int, int>(int2dvec_t &mat1, int2dvec_t &mat2);
template double2dvec_t matrix3x3_dot_matrix3x3<int, double>(int2dvec_t &mat1, double2dvec_t &mat2);
template double2dvec_t matrix3x3_dot_matrix3x3<double, double>(double2dvec_t &mat1, double2dvec_t &mat2);
// Returns transpose of 3x3 matrix mat.
template <typename T>
std::vector<std::vector<T>> transpose_matrix3x3(std::vector<std::vector<T>> &mat)
{
std::vector<std::vector<T>> outtrans(mat[0].size(),
std::vector<T>(mat.size()));
for (int i = 0; i < mat.size(); i++)
{
for (int j = 0; j < mat[0].size(); j++)
{
outtrans[j][i] = mat[i][j];
}
}
return outtrans;
};
template int2dvec_t transpose_matrix3x3(int2dvec_t &mat);
template double2dvec_t transpose_matrix3x3(double2dvec_t &mat); | C++ |
2D | romankempt/hetbuilder | backend/atom_class.h | .h | 2,862 | 110 | #pragma once
#include "math_functions.h"
#include "xtalcomp.h"
class Atoms
{
public:
double2dvec_t lattice;
double2dvec_t positions;
int1dvec_t atomic_numbers;
int numAtom;
int1dvec_t indices;
double1dvec_t magmoms;
// constructor if neither indices nor magmoms were provided
Atoms(double2dvec_t cLattice,
double2dvec_t cPositions,
int1dvec_t cAtomicNumbers)
{
lattice = cLattice;
positions = cPositions;
atomic_numbers = cAtomicNumbers;
numAtom = atomic_numbers.size();
for (int i = 0; i < numAtom; i++)
{
indices.push_back(i);
magmoms.push_back(0.0);
}
};
// constructor if indices were not provided
Atoms(double2dvec_t cLattice,
double2dvec_t cPositions,
int1dvec_t cAtomicNumbers,
double1dvec_t cMagmoms)
{
lattice = cLattice;
positions = cPositions;
atomic_numbers = cAtomicNumbers;
magmoms = cMagmoms;
numAtom = atomic_numbers.size();
for (int i = 0; i < numAtom; i++)
indices.push_back(i);
};
// constructor if magmoms were not provided
Atoms(double2dvec_t cLattice,
double2dvec_t cPositions,
int1dvec_t cAtomicNumbers,
int1dvec_t cIndices)
{
lattice = cLattice;
positions = cPositions;
atomic_numbers = cAtomicNumbers;
numAtom = atomic_numbers.size();
indices = cIndices;
for (int i = 0; i < numAtom; i++)
{
magmoms.push_back(0.0);
}
};
// constructor if magmoms and indices were provided
Atoms(double2dvec_t cLattice,
double2dvec_t cPositions,
int1dvec_t cAtomicNumbers,
int1dvec_t cIndices,
double1dvec_t cMagmoms)
{
lattice = cLattice;
positions = cPositions;
atomic_numbers = cAtomicNumbers;
numAtom = atomic_numbers.size();
indices = cIndices;
magmoms = cMagmoms;
};
// empty constructur
Atoms(){};
void print(void);
int1dvec_t get_index_mapping(void);
double2dvec_t get_scaled_positions(void);
double2dvec_t scaled_positions_to_cartesian(double2dvec_t &scalpos);
void scale_cell(double2dvec_t &newcell);
Atoms operator+(const Atoms &b);
void lattice_to_spglib_array(double arr[3][3]);
void positions_to_spglib_array(double arr[][3]);
void atomic_numbers_to_spglib_types(int arr[]);
int standardize(int to_primitive = 1, int no_idealize = 0, double symprec = 1e-5, double angle_tolerance = 5.0);
XcMatrix lattice_to_xtalcomp_cell();
std::vector<unsigned int> atomic_numbers_to_xtalcomp_types();
std::vector<XcVector> positions_to_xtalcomp_positions();
bool xtalcomp_compare(Atoms &other);
};
| Unknown |
2D | romankempt/hetbuilder | backend/pybindings.cpp | .cpp | 3,467 | 77 | #include <Python.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl_bind.h>
#include <pybind11/iostream.h>
#include "logging_functions.h"
#include "math_functions.h"
#include "atom_class.h"
#include "atom_functions.h"
#include "interface_class.h"
#include "coincidence_algorithm.h"
namespace py = pybind11;
typedef std::vector<int> int1dvec_t;
typedef std::vector<double> double1dvec_t;
typedef std::vector<std::vector<int>> int2dvec_t;
typedef std::vector<std::vector<double>> double2dvec_t;
PYBIND11_MAKE_OPAQUE(int1dvec_t);
PYBIND11_MAKE_OPAQUE(int2dvec_t);
PYBIND11_MAKE_OPAQUE(double1dvec_t);
PYBIND11_MAKE_OPAQUE(double2dvec_t);
PYBIND11_MODULE(hetbuilder_backend, m)
{
m.doc() = "C++ implementation of the coincidence algorithm."; // optional module docstring
// datatype casting
py::bind_vector<int1dvec_t>(m, "int1dVector");
py::bind_vector<int2dvec_t>(m, "int2dVector");
py::bind_vector<double1dvec_t>(m, "double1dVector");
py::bind_vector<double2dvec_t>(m, "double2dVector");
// output stream
py::add_ostream_redirect(m, "ostream_redirect");
// combined datatype castings
py::bind_vector<std::vector<Interface>>(m, "Interfaces");
// class bindings
py::class_<Atoms>(m, "CppAtomsClass")
.def(py::init<double2dvec_t &, double2dvec_t &, int1dvec_t &, int1dvec_t &, double1dvec_t &>())
.def_readwrite("lattice", &Atoms::lattice)
.def_readwrite("positions", &Atoms::positions)
.def_readwrite("atomic_numbers", &Atoms::atomic_numbers)
.def_readwrite("indices", &Atoms::indices)
.def_readwrite("magmoms", &Atoms::magmoms)
.def("standardize", &Atoms::standardize, "Spglib standardization.")
.def("print", &Atoms::print, py::call_guard<py::scoped_ostream_redirect, py::scoped_estream_redirect>(), "Print information.")
.def("scale_cell", &Atoms::scale_cell, "Scales cell.")
.def("compare", &Atoms::xtalcomp_compare, "Performs XtalComp check if two Atoms are equivalent.")
.def("get_index_mapping", &Atoms::get_index_mapping, "Returns index mapping in supercell.");
py::class_<Interface>(m, "CppInterfaceClass")
.def(py::init<Atoms &, Atoms &, Atoms &, double &, int2dvec_t &, int2dvec_t &, int &>())
.def_readonly("bottom", &Interface::bottomLayer)
.def_readonly("top", &Interface::topLayer)
.def_readonly("stack", &Interface::stack)
.def_readonly("angle", &Interface::angle)
.def_readonly("M", &Interface::M)
.def_readonly("N", &Interface::N)
.def_readonly("spacegroup", &Interface::spaceGroup);
py::class_<CoincidenceAlgorithm>(m, "CppCoincidenceAlgorithmClass")
.def(py::init<Atoms &, Atoms &>())
.def("run", &CoincidenceAlgorithm::run, "Runs the coincidence algorithm.");
// function definitions
m.def("get_number_of_omp_threads", &get_number_of_threads, "Returns number of available OMP threads.");
m.def("cpp_make_supercell", &make_supercell, "C++ implementation to make supercell");
m.def("cpp_lattice_points_in_supercell", &lattice_points_in_supercell, py::call_guard<py::scoped_ostream_redirect, py::scoped_estream_redirect>(), "C++ implementation to find lattice points in supercell matrix.");
m.def("cpp_rotate_atoms_around_z", &rotate_atoms_around_z, py::call_guard<py::scoped_ostream_redirect, py::scoped_estream_redirect>(), "C++ implementation to rotate cell and atomic positions.");
} | C++ |
2D | romankempt/hetbuilder | backend/atom_functions.h | .h | 306 | 10 | #pragma once
#include <vector>
double2dvec_t lattice_points_in_supercell(int2dvec_t &superCellMatrix);
Atoms make_supercell(Atoms &prim, int2dvec_t &superCellMatrix);
Atoms rotate_atoms_around_z(Atoms &atoms, double &theta);
Atoms stack_atoms(Atoms bottom, Atoms top, double &weight, double &distance); | Unknown |
2D | romankempt/hetbuilder | backend/logging_functions.h | .h | 613 | 24 | #pragma once
#include <vector>
#include <iostream>
#include <map>
typedef std::vector<int> int1dvec_t;
typedef std::vector<double> double1dvec_t;
typedef std::vector<std::vector<int>> int2dvec_t;
typedef std::vector<std::vector<double>> double2dvec_t;
template <typename T>
void print_1d_vector(std::vector<T> &vec);
template <typename T>
void print_2d_vector(const std::vector<std::vector<T>> &vec);
template <typename T>
void print_2d_vector(std::vector<std::vector<T>> &vec);
void log_number_of_threads();
int get_number_of_threads();
void print_map_key_2d_vector(std::map<double, int2dvec_t> const &m); | Unknown |
2D | romankempt/hetbuilder | hetbuilder/backend_test.py | .py | 5,100 | 177 | """ Test functions for local testing, by copying the shared library hetbuilder_backend.so to hetbuilder/ """
from dataclasses import dataclass
from hetbuilder.algorithm import CoincidenceAlgorithm
from hetbuilder.plotting import InteractivePlot
from pathlib import Path
import ase.io
from ase.build import mx2
import numpy as np
from ase.utils.structure_comparator import SymmetryEquivalenceCheck
from ase.geometry import cell_to_cellpar, cellpar_to_cell
from ase.build import make_supercell
from ase.atoms import Atoms
from hetbuilder.algorithm import ase_atoms_to_cpp_atoms
from timeit import default_timer as time
from itertools import islice
from random import randint
def test_algorithm():
graphene_cell = cellpar_to_cell(np.array([2.46, 2.46, 100.0, 90.0, 90.0, 120]))
graphene_positions = np.array([[0.0, 1.42028166, -1.6775], [0.0, 0.0, -1.6775]])
atoms1 = Atoms(
symbols=["C", "C"], positions=graphene_positions, cell=graphene_cell, pbc=True
)
atoms2 = mx2("WS2")
atoms2.pbc = True
atoms2.cell[2, 2] = 100
alg = CoincidenceAlgorithm(atoms1, atoms2)
results = alg.run(
tolerance=0.1, Nmax=10, angle_limits=(0, 30), angle_stepsize=0.01, verbosity=2
)
if results is not None:
ip = InteractivePlot(atoms1, atoms2, results, 0.5)
ip.plot_results()
else:
print("nope")
def test_scaling_ase(M=4, N=5):
atoms1 = mx2("MoS2")
atoms1.pbc = True
atoms1.cell[2, 2] = 100
atoms2 = mx2("MoS2")
atoms2.pbc = True
atoms2.cell[2, 2] = 100
atoms2.set_cell(atoms2.cell * 1.1, scale_atoms=True)
for m in range(1, M):
atoms = make_supercell(atoms1, m * np.eye(3))
atoms_list = [atoms] * N
for j in atoms_list:
if randint(0, 10) > 0:
j.rotate(randint(0, 90), "z", rotate_cell=True)
j.rattle()
atoms_list[randint(0, len(atoms1) - 1)] = atoms_list[0]
atoms_list[randint(0, len(atoms1) - 1)].translate([0, 0, 100])
atoms_list[randint(0, len(atoms1) - 1)] = atoms2
def compare(a1, other):
# other can be list
comp = SymmetryEquivalenceCheck()
return comp.compare(a1, other)
def del_dups_ase(lst):
"""O(n**2) algorithm, O(1) in memory"""
pos = 0
for item in lst:
if not compare(item, islice(lst, pos)):
# we haven't seen `item` yet
lst[pos] = item
pos += 1
del lst[pos:]
ase_timings = []
for k in range(10):
t1 = time()
del_dups_ase(atoms_list)
t2 = time()
ase_timings.append(t2 - t1)
ase_time = np.average(ase_timings) / len(ase_timings) / 1000
print(f"M={m} N={N} \t ASE {ase_time:e} ms")
def test_scaling_cpp(M=4, N=5):
atoms1 = mx2("MoS2")
atoms1.pbc = True
atoms1.cell[2, 2] = 100
atoms2 = mx2("MoS2")
atoms2.pbc = True
atoms2.cell[2, 2] = 100
atoms2.set_cell(atoms2.cell * 1.1, scale_atoms=True)
for m in range(1, M):
atoms = make_supercell(atoms1, m * np.eye(3))
atoms_list = [atoms] * N
for j in atoms_list:
if randint(0, 10) > 0:
j.rotate(randint(0, 90), "z", rotate_cell=True)
j.rattle()
atoms_list[randint(0, len(atoms1) - 1)] = atoms_list[0]
atoms_list[randint(0, len(atoms1) - 1)].translate([0, 0, 100])
atoms_list[randint(0, len(atoms1) - 1)] = atoms2
cpp1 = [ase_atoms_to_cpp_atoms(j) for j in atoms_list]
def del_dups_cpp(lst):
"""O(n**2) algorithm, O(1) in memory"""
pos = 0
for item in lst:
if not all([(item.compare(item2)) for item2 in islice(lst, pos)]):
# we haven't seen `item` yet
lst[pos] = item
pos += 1
del lst[pos:]
cpp_timings = []
for k in range(10):
t1 = time()
del_dups_cpp(cpp1)
t2 = time()
cpp_timings.append(t2 - t1)
cpp_time = np.average(cpp_timings) / len(cpp_timings) / 1000
print(f"M={m} N={N} \t CPP {cpp_time:e} ms")
def test_xtalcomp():
graphene_cell = cellpar_to_cell(np.array([2.46, 2.46, 100.0, 90.0, 90.0, 120]))
graphene_positions = np.array([[0.0, 1.42028166, -1.6775], [0.0, 0.0, -1.6775]])
atoms1 = Atoms(
symbols=["C", "C"], positions=graphene_positions, cell=graphene_cell, pbc=True
)
atoms2 = mx2("WS2")
atoms2.pbc = True
atoms2.cell[2, 2] = 100
atoms3 = atoms1.copy()
atoms3.rotate(30, "z", rotate_cell=True)
a1 = ase_atoms_to_cpp_atoms(atoms1)
a2 = ase_atoms_to_cpp_atoms(atoms2)
a3 = ase_atoms_to_cpp_atoms(atoms3)
print("a1 -----")
# a1.print()
print("a2 -----")
# a2.print()
print("a3 -----")
# a3.print()
print(a1.compare(a2))
print(a1.compare(a3))
if __name__ == "__main__":
# test_algorithm()
test_xtalcomp()
| Python |
2D | romankempt/hetbuilder | hetbuilder/__init__.py | .py | 460 | 18 | """hetbuilder"""
import sys
from distutils.version import LooseVersion
from pathlib import Path
if sys.version_info[0] == 2:
raise ImportError("Requires Python3. This is Python2.")
__version__ = "0.8.1"
PROJECT_ROOT_DIR = Path(__file__).absolute().parent
from hetbuilder.algorithm import Interface, CoincidenceAlgorithm
from hetbuilder.plotting import InteractivePlot
__all__ = ["__version__", "Interface", "CoincidenceAlgorithm", "InteractivePlot"]
| Python |
2D | romankempt/hetbuilder | hetbuilder/algorithm.py | .py | 14,116 | 385 | import ase.io
from ase.atoms import Atoms
from ase.spacegroup import Spacegroup
from ase.geometry import permute_axes
from ase.geometry.analysis import Analysis
from itertools import combinations_with_replacement
from dataclasses import dataclass
import numpy as np
from scipy.linalg import polar
from hetbuilder.log import *
from hetbuilder.atom_checks import check_atoms, recenter
from ase.neighborlist import (
NeighborList,
natural_cutoffs,
NewPrimitiveNeighborList,
find_mic,
)
import sys
from hetbuilder.hetbuilder_backend import (
double2dVector,
double1dVector,
int1dVector,
int2dVector,
CppAtomsClass,
CppCoincidenceAlgorithmClass,
CppInterfaceClass,
get_number_of_omp_threads,
)
def ase_atoms_to_cpp_atoms(atoms: "ase.atoms.Atoms") -> "CppAtomsClass":
"""Converts :class:`~ase.atoms.Atoms` to the C++ CppAtomsClass."""
lattice = atoms.cell.copy()
positions = atoms.positions.copy()
atomic_numbers = int1dVector([k for k in atoms.numbers])
lattice = double2dVector([double1dVector(k) for k in lattice])
positions = double2dVector([double1dVector(k) for k in positions])
indices = int1dVector([k.index for k in atoms])
magmoms = atoms.get_initial_magnetic_moments()
magmoms = double1dVector([k for k in magmoms])
return CppAtomsClass(lattice, positions, atomic_numbers, indices, magmoms)
def cpp_atoms_to_ase_atoms(cppatoms: "CppAtomsClass") -> "ase.atoms.Atoms":
"""Converts the C++ CppAtomsClass to :class:`~ase.atoms.Atoms`"""
lattice = [[j for j in k] for k in cppatoms.lattice]
positions = [[j for j in k] for k in cppatoms.positions]
numbers = [i for i in cppatoms.atomic_numbers]
magmoms = [i for i in cppatoms.magmoms]
atoms = Atoms(
numbers=numbers,
positions=positions,
cell=lattice,
pbc=[True, True, True],
magmoms=magmoms,
)
return atoms
def check_angles(
angle_stepsize: float = 1, angle_limits: tuple = (0, 180), angles: list = []
) -> list:
""" Helper function to assert correct input of angles."""
if len(angles) == 0:
a1 = angle_limits[0]
a2 = angle_limits[1]
assert a2 > a1, "Second angle must be larger than first one."
assert angle_stepsize > 0, "Angle stepsize must be larger than zero."
assert angle_stepsize < abs(
a2 - a1
), "Angle stepsize must be larger then difference between angles."
angles = list(np.arange(a1, a2, step=angle_stepsize)) + [a2]
logger.info(
"Searching {:d} angles between {:.1f} and {:.1f} degree with a stepsize of {:.1f} degree.".format(
len(angles), a1, a2, angle_stepsize
)
)
return angles
elif angles != None:
msg = ", ".join([str(k) for k in angles])
logger.info("Calculating the following angles: {} in degree.".format(msg))
return list(angles)
else:
logger.error("Angle specifications not recognized.")
def get_bond_data(atoms: "ase.atoms.Atoms", return_bonds=True) -> tuple:
""" Returns a tuple holding bond indices and average bond values.
If the input structure is larger than 1000 atoms, the neighborlist is not computed and not all bonds are determined.
Only a subsample is queried to determine the strain.
"""
atoms = atoms.copy()
symbs = set(atoms.get_chemical_symbols())
bonds = None
if len(atoms) > 1000 or return_bonds == False:
# not computing neighborlists for all bonds here, too slow
from scipy.spatial import KDTree
return_bonds = False
s1 = set(atoms.get_chemical_symbols())
s2 = {}
i = 1
p = atoms.positions
tree = KDTree(atoms.positions)
middle = np.mean(p, axis=0)
middle[2] = np.min(p[:, 2]) # so we are not searching in vacuum
while s1 != s2 and i < 4:
sample = tree.query_ball_point(middle, r=i * 10)
subatoms = atoms[sample]
s2 = set(subatoms.get_chemical_symbols())
i += 1
if i == 3:
raise Exception(
"Could not find all species in the subquery. This should not happen."
)
atoms = subatoms
pairs = list(combinations_with_replacement(symbs, 2))
cutoffs = np.array(natural_cutoffs(atoms)) * 1.25
nl = NeighborList(cutoffs, skin=0.0, primitive=NewPrimitiveNeighborList)
nl.update(atoms)
ana = Analysis(atoms, nl=nl)
if return_bonds:
nbonds = nl.nneighbors + nl.npbcneighbors
bonds = []
for a in range(len(atoms)):
indices, offsets = nl.get_neighbors(a)
for i, offset in zip(indices, offsets):
startvector = atoms.positions[a]
endvector = atoms.positions[i] + offset @ atoms.get_cell()
if np.sum((endvector - startvector) ** 2) > 0:
bonds.append([a, i])
unique_bonds = {}
for p in pairs:
anabonds = ana.get_bonds(*p)
if anabonds == [[]]:
continue
bond_values = ana.get_values(anabonds)
avg = np.average(bond_values)
unique_bonds[p] = avg
return bonds, unique_bonds
@dataclass
class Interface:
"""Exposes the C++ implementation of the CppInterfaceClass.
Attributes:
bottom (ase.atoms.Atoms): Lower layer as supercell.
top (ase.atoms.Atoms): Upper layer as supercell.
stack (ase.atoms.Atoms): Combined lower and upper layer as supercell.
M (numpy.ndarray): Supercell matrix M.
N (numpy.ndarray): Supercell matrix N.
angle (float): Twist angle in degree.
stress (float): Stress measure of the unit cell.
strain (float): Strain measure of the bond lengths.
"""
def __init__(
self, interface: "CppInterfaceClass" = None, weight=0.5, **kwargs
) -> None:
bottom = cpp_atoms_to_ase_atoms(interface.bottom)
top = cpp_atoms_to_ase_atoms(interface.top)
stack = cpp_atoms_to_ase_atoms(interface.stack)
self.bottom = recenter(bottom)
self.top = recenter(top)
self.stack = recenter(stack)
self.M = [[j for j in k] for k in interface.M]
self.N = [[j for j in k] for k in interface.N]
self.angle = interface.angle
self._weight = weight
self._stress = None
self.bbl = kwargs.get("bottom_bond_lengths", None)
self.tbl = kwargs.get("top_bond_lengths", None)
self._bonds, self._bond_lengths = get_bond_data(self.stack)
def __repr__(self):
return "{}(M={}, N={}, angle={:.1f}, stress={:.1f})".format(
self.__class__.__name__, self.M, self.N, self.angle, self.stress,
)
@property
def stress(self) -> float:
"""Returns the stress measure."""
return self.measure_stress()
@property
def strain(self) -> float:
"""Returns the strain measure."""
return self.measure_strain()
def measure_stress(self) -> float:
"""Measures the stress on both unit cells."""
A = self.bottom.cell.copy()[:2, :2]
B = self.top.cell.copy()[:2, :2]
C = A + self._weight * (B - A)
T1 = C @ np.linalg.inv(A)
T2 = C @ np.linalg.inv(B)
def measure(P):
eps = P - np.identity(2)
meps = np.sqrt(
(
eps[0, 0] ** 2
+ eps[1, 1] ** 2
+ eps[0, 0] * eps[1, 1]
+ eps[1, 0] ** 2
)
/ 4
)
return meps
U1, P1 = polar(T1) # this one goes counterclockwise
U2, P2 = polar(T2) # this one goes clockwise
# u is rotation, p is strain
meps1 = measure(P1)
meps2 = measure(P2)
stress = meps1 + meps2
# return (stress, P1 - np.identity(2), P2 - np.identity(2))
return stress
def measure_strain(self) -> float:
"""Measures the average strain on bond lengths on both substructures."""
bond_lengths = self.bond_lengths
bottom_strain = []
top_strain = []
for (k1, b1) in bond_lengths.items():
for k3, b3 in self.bbl.items():
if (k3 == k1) or (k3[::-1] == k1):
d = np.abs((b3 - b1)) / b1 * 100
bottom_strain.append(d)
for k3, b3 in self.tbl.items():
if (k3 == k1) or (k3[::-1] == k1):
d = np.abs((b3 - b1)) / b1 * 100
top_strain.append(d)
strain = np.average(bottom_strain) + np.average(top_strain)
return strain
@property
def bonds(self):
return self._bonds
@property
def bond_lengths(self):
return self._bond_lengths
class CoincidenceAlgorithm:
"""Exposes the C++ implementation of the CppCoincidenceAlgorithmClass.
Args:
bottom (ase.atoms.Atoms): Lower layer, needs to be two-dimensional.
top (ase.atoms.Atoms): Upper layer, needs to be two-dimensional.
"""
def __init__(self, bottom: "ase.atoms.Atoms", top: "ase.atoms.Atoms") -> None:
self.bottom = check_atoms(bottom)
self.top = check_atoms(top)
_, self.bdl = get_bond_data(bottom)
_, self.tbl = get_bond_data(top)
def __repr__(self):
return "{}(bottom={}, top={})".format(
self.__class__.__name__, self.bottom, self.top
)
def run(
self,
Nmax: int = 10,
Nmin: int = 0,
angles: list = [],
angle_limits: tuple = (0, 90),
angle_stepsize: float = 1.0,
tolerance: float = 0.1,
weight: float = 0.5,
distance: float = 4,
standardize: bool = False,
no_idealize: bool = False,
symprec: float = 1e-5,
angle_tolerance: float = 5,
verbosity: int = 0,
) -> list:
"""Executes the coincidence lattice algorithm.
Args:
Nmax (int): Maximum number of translations. Defaults to 10.
Nmin (int): Minimum number of translations. Defaults to 0.
angles (list): List of angles in degree to search. Takes precedence over angle_limits and angle_stepsize.
angle_limits (tuple): Lower and upper bound of angles too look through with given step size by angle_stepsize. Defaults to (0, 90) degree.
angle_stepsize (float): Increment of angles to look through. Defaults to 1.0 degree.
tolerance (float): Tolerance criterion to accept lattice match. Corresponds to a distance in Angström. Defaults to 0.1.
weight (float): The coincidence unit cell is C = A + weight * (B-A). Defaults to 0.5.
distance (float): Interlayer distance of the stacks. Defaults to 4.0 Angström.
standardize (bool): Perform spglib standardization. Defaults to true.
no_idealize (bool): Does not idealize unit cell parameters in the spglib standardization routine. Defaults to False.
symprec (float): Symmetry precision for spglib. Defaults to 1e-5 Angström.
angle_tolerance (float): Angle tolerance fo the spglib `spgat` routines. Defaults to 5.
verbosity (int): Debug level for printout of Coincidence Algorithm. Defaults to 0.
Returns:
list : A list of :class:`~hetbuilder.algorithm.Interface`.
"""
bottom = ase_atoms_to_cpp_atoms(self.bottom)
top = ase_atoms_to_cpp_atoms(self.top)
angles = check_angles(
angle_limits=angle_limits, angle_stepsize=angle_stepsize, angles=angles
)
if (self.bottom == self.top) and (0 in angles):
logger.warning("The bottom and top structure seem to be identical.")
logger.warning(
"Removing the angle 0° from the search because all lattice points would match."
)
angles = [k for k in angles if abs(k) > 1e-4]
assert len(angles) > 0, "List of angles contains no values."
assert Nmin < Nmax, "Nmin must be smaller than Nmax."
assert Nmin >= 0, "Nmin must be larger than or equal 0."
assert Nmax > 0, "Nmax must be larger than 0."
assert distance > 0, "Interlayer distance must be larger than zero."
assert tolerance > 0, "Tolerance must be larger than zero."
assert (
angle_tolerance >= 0
), "Angle tolerance must be larger than or equal zero."
assert (symprec) > 0, "Symmetry precision must be larger than zero."
assert (weight >= 0) and (weight <= 1), "Weight factor must be between 0 and 1."
assert verbosity in [0, 1, 2], "Verbose must be 0, 1, or 2."
angles = double1dVector(angles)
no_idealize = int(no_idealize)
ncombinations = ((2 * (Nmax - Nmin)) ** 4) * len(angles)
nthreads = get_number_of_omp_threads()
logger.info("Using {:d} OpenMP threads.".format(nthreads))
logger.info("Running through {:d} grid points...".format(ncombinations))
alg = CppCoincidenceAlgorithmClass(bottom, top)
results = alg.run(
Nmax,
Nmin,
angles,
tolerance,
weight,
distance,
standardize,
no_idealize,
symprec,
angle_tolerance,
verbosity,
)
if len(results) == 0:
logger.error("Could not find any coincidence pairs for these parameters.")
return None
elif len(results) > 0:
if len(results) == 1:
logger.info("Found 1 result.")
else:
logger.info("Found {:d} results.".format(len(results)))
interfaces = [
Interface(
k,
weight=weight,
bottom_bond_lengths=self.bdl,
top_bond_lengths=self.tbl,
)
for k in results
]
return interfaces
| Python |
2D | romankempt/hetbuilder | hetbuilder/plotting.py | .py | 10,810 | 366 | from dataclasses import dataclass, astuple
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap, Normalize, LinearSegmentedColormap
from matplotlib import path, patches
import numpy as np
from itertools import product
from hetbuilder.log import *
from hetbuilder.algorithm import Interface
from collections import namedtuple
from ase.neighborlist import natural_cutoffs, NeighborList
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
import tkinter as tk
mutedblack = "#1a1a1a"
from hetbuilder.utils import plot_atoms
# from ase.visualize.plot import plot_atoms
def plot_stack(
stack: "ase.atoms.Atoms" = None, supercell_data: "namedtuple" = None, bonds=None,
):
"""Wrapper of the :func:~`ase.visualize.plot.plot_atoms` function."""
fig = plt.gcf()
axes = plt.gca()
canvas = fig.canvas
axes.clear()
axes.set_yticks([])
axes.set_xticks([])
axes.set_xlabel("")
axes.set_ylabel("")
description = r"#{:d}, {:d} atoms, {:.1f} % deformation, $\theta=${:.2f}°".format(
supercell_data.index,
supercell_data.natoms,
supercell_data.stress + supercell_data.strain,
supercell_data.angle,
)
axes.set_title(description, fontsize=12)
plot_atoms(stack, axes, radii=0.3, scale=1, bonds=bonds)
axes.set_frame_on(False)
canvas.draw()
def plot_grid(
basis: "numpy.ndarray" = None,
supercell_matrix: "numpy.ndarray" = None,
Nmax: int = None,
**kwargs,
):
"""Plots lattice points of a unit cell."""
axes = plt.gca()
basis = basis[:2, :2].copy()
a1 = basis[0, :].copy()
a2 = basis[1, :].copy()
# p = product(range(-Nmax, Nmax + 1), range(-Nmax, Nmax + 1))
# points = np.array([n[0] * a1 + n[1] * a2 for n in p])
# axes.scatter(points[:, 0], points[:, 1], **kwargs)
axes.axline(0 * a1, 1 * a1, **kwargs)
axes.axline(0 * a2, 1 * a2, **kwargs)
for n in range(-Nmax, Nmax + 1):
if n != 0:
axes.axline(n * a1 + 0 * a2, n * a1 + n * a2, **kwargs)
axes.axline(0 * a1 + n * a2, n * a1 + n * a2, **kwargs)
def plot_unit_cell_patch(cell: "numpy.ndarray", **kwargs):
"""Plots a face patch for a unit cell."""
axes = plt.gca()
cell = cell.copy()
path1 = [
(0, 0),
(cell[0, :]),
(cell[0, :] + cell[1, :]),
(cell[1, :]),
(0, 0),
]
path1 = path.Path(path1)
patch = patches.PathPatch(path1, **kwargs)
axes.add_patch(patch)
path2 = np.array(path1.vertices)
xlim = (np.min(path2[:, 0]) - 4, np.max(path2[:, 0] + 4))
ylim = (np.min(path2[:, 1]) - 4, np.max(path2[:, 1] + 4))
axes.axis("equal")
axes.set_xlim(xlim)
axes.set_ylim(ylim)
def plot_lattice_points(
basis1: "numpy.ndarray" = None,
basis2: "numpy.ndarray" = None,
supercell_data: "namedtuple" = None,
weight: float = None,
):
"""Plots lattice points of both bases on top of each other, as well as the coincidence supercell."""
fig = plt.gcf()
axes = plt.gca()
canvas = fig.canvas
axes.clear()
axes.set_yticks([])
axes.set_xticks([])
axes.set_xlabel("")
axes.set_ylabel("")
(
natoms,
m1,
m2,
m3,
m4,
n1,
n2,
n3,
n4,
angle,
stress,
strain,
index,
) = supercell_data
sc1 = np.array([[m1, m2], [m3, m4]])
sc2 = np.array([[n1, n2], [n3, n4]])
Nmax = max(abs(j) for j in [m1, m2, m3, m4, n1, n2, n3, n4]) * 2
# first cell
plot_grid(
basis=basis1,
supercell_matrix=sc1,
color="tab:red",
# facecolor="tab:red",
# edgecolor="tab:red",
alpha=0.25,
# s=2,
lw=1,
Nmax=Nmax,
)
A = sc1 @ basis1
# second cell
plot_grid(
basis=basis2,
supercell_matrix=sc2,
color="tab:blue",
# facecolor="tab:blue",
# edgecolor="tab:blue",
alpha=0.25,
# s=2,
lw=1,
Nmax=Nmax,
)
B = sc2 @ basis2
# supercell lattice points
C = A + weight * (B - A)
plot_unit_cell_patch(
C,
facecolor="tab:purple",
alpha=0.5,
edgecolor=mutedblack,
linewidth=1,
linestyle="--",
)
axes.set_frame_on(False)
scdata = """M = ({: 2d}, {: 2d}, {: 2d}, {: 2d})\nN = ({: 2d}, {: 2d}, {: 2d}, {: 2d})""".format(
m1, m2, m3, m4, n1, n2, n3, n4
)
axes.set_title(scdata, fontsize=12)
canvas.draw()
def rand_jitter(arr, jitter):
stdev = jitter * (max(arr) - min(arr)) + 0.01
return arr + np.random.randn(len(arr)) * stdev
@dataclass
class SuperCellData:
natoms: int
m1: int
m2: int
m3: int
m4: int
n1: int
n2: int
n3: int
n4: int
angle: float
stress: float
strain: float
index: int
def __iter__(self):
return iter(astuple(self))
class InteractivePlot:
""" Interactive visualization of the results via matplotlib.
Args:
bottom (ase.atoms.Atoms): Lower layer as primitive.
top (ase.atoms.Atoms): Upper layer as primitive.
results (list): List of :class:`~hetbuilder.algorithm.Interface` returned from the coincidence lattice search.
weight (float, optional): Weight of the supercell.
"""
def __init__(
self,
bottom: "ase.atoms.Atoms" = None,
top: "ase.atoms.Atoms" = None,
results: list = None,
weight: float = 0.5,
) -> None:
self.bottom = bottom
self.top = top
self.results = results
self._weight = weight
def __repr__(self):
return "{}(nresults={})".format(self.__class__.__name__, len(self.results))
def plot_results(self):
""" Plots results interactively.
Generates a matplotlib interface that allows to select the reconstructed stacks and save them to a file.
"""
results = self.results
data = np.array(
[[i.stress + i.strain, len(i.stack)] for i in results], dtype=float
)
color = [i.angle for i in results]
norm = Normalize(vmin=0, vmax=90, clip=True)
cmap = LinearSegmentedColormap.from_list(
"",
[
"darkgreen",
"tab:green",
"lightgreen",
"lightblue",
"tab:blue",
"royalblue",
],
)
mapper = cm.ScalarMappable(norm=norm, cmap=cmap)
color = [mapper.to_rgba(v) for v in color]
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=([6.4 * 3, 6.4]))
ax1.scatter(
data[:, 0], data[:, 1], color=color, alpha=0.75, picker=3.5, marker=".",
)
clb = plt.colorbar(
cm.ScalarMappable(norm=norm, cmap=cmap), ax=ax1, ticks=[0, 30, 60, 90]
)
clb.set_label(
r"Twist angle $\theta$ [°]", rotation=270, labelpad=12, fontsize=12
)
clb.ax.set_yticklabels(["0", "30", "60", "90"])
ax1.set_xlim(0.00, np.max(data[:, 0]) * 1.1)
ax1.set_ylim(np.min(data[:, 1]) * 0.95, np.max(data[:, 1]) * 1.05)
ax1.set_ylim(0, ax1.get_ylim()[1] + 10)
ax1.set_xlabel(r"deformation [%]", fontsize=12)
ax1.set_ylabel("Number of atoms", fontsize=14)
ax1.set_title("Click a point to select a structure.", fontsize=12)
ax1.grid(axis="both", color="lightgray", linestyle="-", linewidth=1, alpha=0.2)
ax2.set_yticks([])
ax2.set_xticks([])
ax2.set_xlabel("")
ax2.set_ylabel("")
ax2.set_frame_on(False)
ax3.set_yticks([])
ax3.set_xticks([])
ax3.set_xlabel("")
ax3.set_ylabel("")
ax3.set_frame_on(False)
axbutton = plt.axes([0.8, 0.05, 0.1, 0.05])
fig.canvas.mpl_connect("pick_event", self.__onpick)
def __save(stack):
try:
sc = self.current_scdata
form = self.current_stack.get_chemical_formula()
info_string = []
info_string.append(form)
M = [sc.m1, sc.m2, sc.m3, sc.m4]
N = [sc.n1, sc.n2, sc.n3, sc.n4]
a = sc.angle
s1 = sc.stress
s2 = sc.strain
info_string.append("M = [{} {} // {} {}]".format(*M))
info_string.append("N = [{} {} // {} {}]".format(*N))
info_string.append(f"angle = {a:.4f} [degree]")
info_string.append(f"stress = {s1:.4f} [%]")
info_string.append(f"strain = {s2:.4f} [%]")
index = sc.index
name = "{}_{:.2f}_degree_{}.in".format(form, a, index)
stack.write(name, info_str=info_string, format="aims")
logger.info("Saved structure to {}".format(name))
except Exception as excpt:
logger.error("You need to select a point first.")
save = Button(axbutton, " Save this structure. ")
save.on_clicked(lambda x: __save(self.current_stack))
plt.show()
def __onpick(self, event):
point = event.artist
mouseevent = event.mouseevent
index = event.ind[0]
fig = point.properties()["figure"]
axes = fig.axes
stack = self.results[index].stack.copy()
M = np.array(self.results[index].M)
N = np.array(self.results[index].N)
angle = self.results[index].angle
stress = self.results[index].stress
strain = self.results[index].strain
m1, m2, m3, m4 = M[0, 0], M[0, 1], M[1, 0], M[1, 1]
n1, n2, n3, n4 = N[0, 0], N[0, 1], N[1, 0], N[1, 1]
scdata = SuperCellData(
natoms=int(len(stack)),
m1=int(m1),
m2=int(m2),
m3=int(m3),
m4=int(m4),
n1=int(n1),
n2=int(n2),
n3=int(n3),
n4=int(n4),
angle=float(angle),
stress=float(stress),
strain=float(strain),
index=int(index),
)
self.current_scdata = scdata
self.current_stack = stack
lower = self.bottom.copy()
upper = self.top.copy()
upper.rotate(angle, v="z", rotate_cell=True)
basis1 = lower.cell.copy()[:2, :2]
basis2 = upper.cell.copy()[:2, :2]
plt.sca(fig.axes[1])
plot_lattice_points(
basis1=basis1, basis2=basis2, supercell_data=scdata, weight=self._weight,
)
plt.sca(fig.axes[2])
plot_stack(
stack=stack, supercell_data=scdata, bonds=self.results[index].bonds,
)
| Python |
2D | romankempt/hetbuilder | hetbuilder/atom_checks.py | .py | 7,875 | 242 | import ase.io
from ase.geometry import permute_axes
from ase import neighborlist
from ase.data import covalent_radii
from ase.build import make_supercell
from ase.neighborlist import NeighborList, NewPrimitiveNeighborList
from spglib import find_primitive
import networkx as nx
import numpy as np
from collections import namedtuple
from hetbuilder.log import *
def find_fragments(atoms, scale=1.0) -> list:
"""Finds unconnected structural fragments by constructing
the first-neighbor topology matrix and the resulting graph
of connected vertices.
Args:
atoms: :class:`~ase.atoms.Atoms` or :class:`~aimstools.structuretools.structure.Structure`.
scale: Scaling factor for covalent radii.
Note:
Requires networkx library.
Returns:
list: NamedTuple with indices and atoms object.
"""
radii = scale * covalent_radii[atoms.get_atomic_numbers()]
nl = NeighborList(
radii,
bothways=True,
self_interaction=False,
skin=0.0,
primitive=NewPrimitiveNeighborList,
)
nl.update(atoms)
connectivity_matrix = nl.get_connectivity_matrix(sparse=False)
edges = np.argwhere(connectivity_matrix == 1)
graph = nx.from_edgelist(edges) # converting to a graph
con_tuples = list(
nx.connected_components(graph)
) # graph theory can be pretty handy
fragments = [
atoms[list(i)] for i in con_tuples
] # the fragments are not always layers
fragments_dict = {}
i = 0
for tup, atom in zip(con_tuples, fragments):
fragment = namedtuple("fragment", ["indices", "atoms"])
indices = []
for entry in tup:
indices.append(entry)
indices = set(indices)
fragments_dict[i] = fragment(indices, atom)
i += 1
fragments_dict = [
v
for k, v in sorted(
fragments_dict.items(),
key=lambda item: np.average(item[1][1].get_positions()[:, 2]),
)
]
return fragments_dict
def find_periodic_axes(atoms: "ase.atoms.Atoms") -> dict:
"""Evaluates if given structure is qualitatively periodic along certain lattice directions.
Args:
atoms: ase.atoms.Atoms object.
Note:
A criterion is a vacuum space of more than 25.0 Anström.
Returns:
dict: Axis : Bool pairs.
"""
atoms = atoms.copy()
sc = make_supercell(atoms, 2 * np.identity(3), wrap=True)
fragments = find_fragments(sc, scale=1.5)
crit1 = True if len(fragments) > 1 else False
pbc = dict(zip([0, 1, 2], [True, True, True]))
if crit1:
for axes in (0, 1, 2):
spans = []
for tup in fragments:
start = np.min(tup.atoms.get_positions()[:, axes])
end = np.max(tup.atoms.get_positions()[:, axes])
spans.append((start, end))
spans = list(set(spans))
spans = sorted(spans, key=lambda x: x[0])
if len(spans) > 1:
for k, l in zip(spans[:-1], spans[1:]):
d1 = abs(k[1] - l[0])
d2 = abs(
k[1] - l[0] - sc.cell.lengths()[axes]
) # check if fragments are separated by a simple translation
nd = np.min([d1, d2])
if nd >= 25.0:
pbc[axes] = False
break
return pbc
def recenter(atoms: "ase.atoms.Atoms") -> "ase.atoms.Atoms":
"""Recenters atoms to be in the unit cell, with vacuum on both sides.
The unit cell length c is always chosen such that it is larger than a and b.
Returns:
atoms : modified atoms object.
Note:
The ase.atoms.center() method is supposed to do that, but sometimes separates the layers. I didn't find a good way to circumvene that.
"""
# have to think about the viewing directions here
atoms = atoms.copy()
atoms.wrap(pretty_translation=True)
atoms.center(axis=(2))
mp = atoms.get_center_of_mass(scaled=False)
cp = (atoms.cell[0] + atoms.cell[1] + atoms.cell[2]) / 2
pos = atoms.get_positions(wrap=False)
pos[:, 2] += np.abs((mp - cp))[2]
for z in range(pos.shape[0]):
lz = atoms.cell.lengths()[2]
if pos[z, 2] >= lz:
pos[z, 2] -= lz
if pos[z, 2] < 0:
pos[z, 2] += lz
atoms.set_positions(pos)
newcell, newpos, newscal, numbers = (
atoms.get_cell(),
atoms.get_positions(wrap=False),
atoms.get_scaled_positions(wrap=False),
atoms.numbers,
)
z_pos = newpos[:, 2]
span = np.max(z_pos) - np.min(z_pos)
newcell[0, 2] = newcell[1, 2] = newcell[2, 0] = newcell[2, 1] = 0.0
newcell[2, 2] = span + 100.0
axes = [0, 1, 2]
lengths = np.linalg.norm(newcell, axis=1)
order = [x for x, y in sorted(zip(axes, lengths), key=lambda pair: pair[1])]
while True:
if (order == [0, 1, 2]) or (order == [1, 0, 2]):
break
newcell[2, 2] += 10.0
lengths = np.linalg.norm(newcell, axis=1)
order = [x for x, y in sorted(zip(axes, lengths), key=lambda pair: pair[1])]
newpos = newscal @ newcell
newpos[:, 2] = z_pos
atoms = ase.Atoms(positions=newpos, numbers=numbers, cell=newcell, pbc=atoms.pbc)
return atoms
def check_if_2d(atoms: "ase.atoms.Atoms") -> bool:
"""Evaluates if structure is qualitatively two-dimensional.
Note:
A structure is considered 2D if only one axis is non-periodic.
Returns:
bool: 2D or not to 2D, that is the question.
"""
pbcax = find_periodic_axes(atoms)
if sum(list(pbcax.values())) == 2:
return True
else:
return False
def check_if_primitive(atoms: "ase.atoms.Atoms") -> None:
""" Checks if input configuration is primitive via spglib.
A warning is raised if not.
"""
cell = (atoms.cell, atoms.get_scaled_positions(), atoms.numbers)
lattice, scaled_positions, numbers = find_primitive(cell, symprec=1e-5)
is_primitive = (np.abs(lattice - atoms.cell) < 1e-4).all()
if not is_primitive:
logger.warning("It seems that the structure {} is not primitive.".format(atoms))
logger.warning("This might lead to unexpected results.")
def check_atoms(atoms: "ase.atoms.Atoms") -> "ase.atoms.Atoms":
""" Runs a series of checks on the input configuration.
This should assert that the input atoms are 2d, oriented in the xy plane, and centered in the middle of the unit cell.
"""
cell = atoms.cell.copy()
zerovecs = np.where(~cell.any(axis=1))[0]
is_2d = False
is_3d = False
if len(zerovecs) == 3:
logger.warning("You cannot specify 0D molecules as structure input.")
elif len(zerovecs) == 2:
logger.warning("You cannot specify 1D chains as structure input.")
elif len(zerovecs) == 1:
is_2d = True
elif len(zerovecs) == 0:
is_3d = True
atoms.cell = atoms.cell.complete()
check_if_primitive(atoms)
# check that cell is oriented in xy
if is_2d:
non_pbc_axis = zerovecs[0]
if non_pbc_axis != 2:
old = list(set([0, 1, 2]) - set([non_pbc_axis]))
new = old + [non_pbc_axis]
atoms = permute_axes(atoms, new)
atoms = recenter(atoms)
# more expensive checks to see if structure is suitably 2d
if is_3d:
is_2d = check_if_2d(atoms)
if not is_2d:
logger.error(
"It seems that the structure {} is not two-dimensional.".format(atoms)
)
logger.error(
"Consider setting one of the lattice vectors to zero or to a suitably large value."
)
raise Exception("Structure does not appear to be 2d.")
else:
atoms = recenter(atoms)
return atoms
| Python |
2D | romankempt/hetbuilder | hetbuilder/log.py | .py | 1,806 | 64 | import logging
import pretty_errors
from rich.logging import RichHandler
pretty_errors.configure(
separator_character="*",
filename_display=pretty_errors.FILENAME_EXTENDED,
line_number_first=True,
display_link=True,
lines_before=2,
lines_after=1,
line_color=pretty_errors.RED + "> " + pretty_errors.default_config.line_color,
code_color=" " + pretty_errors.default_config.line_color,
truncate_code=True,
display_locals=True,
)
pretty_errors.blacklist("c:/python")
class DuplicateFilter(logging.Filter):
def filter(self, record):
# add other fields if you need more granular comparison, depends on your app
current_log = (record.module, record.levelno, record.msg)
if current_log != getattr(self, "last_log", None):
self.last_log = current_log
return True
return False
def setup_custom_logger(name):
formatter = logging.Formatter(fmt="{message:s}", style="{")
handler = RichHandler(
show_time=False, markup=True, rich_tracebacks=True, show_path=False
)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addFilter(DuplicateFilter())
return logger
logger = setup_custom_logger("root")
def set_verbosity_level(verbosity):
logger = logging.getLogger("root")
if verbosity == 0:
level = "CRITICAL"
elif verbosity == 1:
level = "INFO"
else:
level = "DEBUG"
formatter = logging.Formatter(
fmt="{levelname:8s} {module:20s} {funcName:20s} |\n {message:s}", style="{"
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
| Python |
2D | romankempt/hetbuilder | hetbuilder/utils.py | .py | 11,306 | 367 | from gettext import find
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
from itertools import islice
from ase.io.formats import string2index
from ase.utils import rotate
from ase.data import covalent_radii, atomic_numbers
from ase.data.colors import jmol_colors
from hetbuilder.algorithm import ase_atoms_to_cpp_atoms, cpp_atoms_to_ase_atoms
from hetbuilder.hetbuilder_backend import cpp_make_supercell, int1dVector, int2dVector
from ase.neighborlist import find_mic
def cpp_make_supercell(atoms, N):
"""C++ implementation of making a supercell.
Also returns a list of indices holding equivalent atoms.
"""
cppatoms = ase_atoms_to_cpp_atoms(atoms)
N = int2dVector([int1dVector(k) for k in N])
sc = cpp_make_supercell(cppatoms, N)
index_map = sc.get_index_mapping()
index_map = [int(k) for k in index_map]
atoms = cpp_atoms_to_ase_atoms(sc)
return atoms, index_map
class PlottingVariables:
""" Modified from https://gitlab.com/ase/ase/-/blob/master/ase/visualize/plot.py to add bonds."""
# removed writer - self
def __init__(
self,
atoms,
rotation="",
show_unit_cell=2,
radii=None,
bbox=None,
colors=None,
scale=20,
maxwidth=500,
extra_offset=(0.0, 0.0),
bonds=None,
):
self.numbers = atoms.get_atomic_numbers()
self.colors = colors
self.offset = None
show_bonds = True
if bonds == None:
show_bonds = False
self.bonds = bonds
if colors is None:
ncolors = len(jmol_colors)
self.colors = jmol_colors[self.numbers.clip(max=ncolors - 1)]
if radii is None:
radii = covalent_radii[self.numbers]
elif isinstance(radii, float):
radii = covalent_radii[self.numbers] * radii
else:
radii = np.array(radii)
natoms = len(atoms)
if isinstance(rotation, str):
rotation = rotate(rotation)
cell = atoms.get_cell()
disp = atoms.get_celldisp().flatten()
if show_unit_cell > 0:
L, T, D = cell_to_lines(self, cell)
cell_vertices = np.empty((2, 2, 2, 3))
for c1 in range(2):
for c2 in range(2):
for c3 in range(2):
cell_vertices[c1, c2, c3] = np.dot([c1, c2, c3], cell) + disp
cell_vertices.shape = (8, 3)
cell_vertices = np.dot(cell_vertices, rotation)
else:
L = np.empty((0, 3))
T = None
D = None
cell_vertices = None
nlines = len(L)
positions = np.empty((natoms + nlines, 3))
R = atoms.get_positions()
positions[:natoms] = R
positions[natoms:] = L
r2 = radii ** 2
for n in range(nlines):
d = D[T[n]]
if (
(((R - L[n] - d) ** 2).sum(1) < r2)
& (((R - L[n] + d) ** 2).sum(1) < r2)
).any():
T[n] = -1
positions = np.dot(positions, rotation)
R = positions[:natoms]
if bbox is None:
X1 = (R - radii[:, None]).min(0)
X2 = (R + radii[:, None]).max(0)
if show_unit_cell == 2:
X1 = np.minimum(X1, cell_vertices.min(0))
X2 = np.maximum(X2, cell_vertices.max(0))
M = (X1 + X2) / 2
S = 1.05 * (X2 - X1)
w = scale * S[0]
if w > maxwidth:
w = maxwidth
scale = w / S[0]
h = scale * S[1]
offset = np.array([scale * M[0] - w / 2, scale * M[1] - h / 2, 0])
else:
w = (bbox[2] - bbox[0]) * scale
h = (bbox[3] - bbox[1]) * scale
offset = np.array([bbox[0], bbox[1], 0]) * scale
offset[0] = offset[0] - extra_offset[0]
offset[1] = offset[1] - extra_offset[1]
self.w = w + extra_offset[0]
self.h = h + extra_offset[1]
positions *= scale
positions -= offset
if nlines > 0:
D = np.dot(D, rotation)[:, :2] * scale
if cell_vertices is not None:
cell_vertices *= scale
cell_vertices -= offset
cell = np.dot(cell, rotation)
cell *= scale
self.cell = cell
self.positions = positions
self.D = D
self.T = T
self.cell_vertices = cell_vertices
self.natoms = natoms
self.d = 2 * scale * radii
self.constraints = atoms.constraints
# extension for partial occupancies
self.frac_occ = False
self.tags = None
self.occs = None
try:
self.occs = atoms.info["occupancy"]
self.tags = atoms.get_tags()
self.frac_occ = True
except KeyError:
pass
class Matplotlib(PlottingVariables):
""" Modified from https://gitlab.com/ase/ase/-/blob/master/ase/visualize/plot.py to add bonds."""
def __init__(
self,
atoms,
ax,
rotation="",
radii=None,
colors=None,
scale=1,
offset=(0, 0),
bonds=None,
**parameters,
):
PlottingVariables.__init__(
self,
atoms,
rotation=rotation,
radii=radii,
colors=colors,
scale=scale,
extra_offset=offset,
bonds=bonds,
**parameters,
)
self.atoms = atoms
self.ax = ax
self.figure = ax.figure
self.ax.set_aspect("equal")
show_bonds = True if bonds is not None else False
if show_bonds:
self.show_bonds()
def write(self):
self.write_body()
self.ax.set_xlim(0, self.w)
self.ax.set_ylim(0, self.h)
def write_body(self):
patch_list = make_patch_list(self)
for patch in patch_list:
self.ax.add_patch(patch)
def show_bonds(self):
from matplotlib import cm, colors
cmap = colors.ListedColormap(["gray", "black"])
pmin = np.min(self.atoms.positions[:, 2])
pmax = np.max(self.atoms.positions[:, 2])
for i, k in self.bonds:
p1 = self.positions[i]
p2 = self.positions[k]
dr, l = find_mic(p2 - p1, self.cell)
c = self.atoms.positions[k][2]
c = (c - pmin) / (pmax - pmin)
c = cmap(c)
if l < 2.5:
e1 = p1 + dr / 2
e2 = p2 - dr / 2
x1, y1, _ = list(zip(p1, e1))
x2, y2, _ = list(zip(p2, e2))
self.ax.plot(x1, y1, color=c, zorder=0)
self.ax.plot(x2, y2, color=c, zorder=0)
def cell_to_lines(writer, cell):
""" Taken from https://gitlab.com/ase/ase/-/blob/master/ase/visualize/plot.py."""
# XXX this needs to be updated for cell vectors that are zero.
# Cannot read the code though! (What are T and D? nn?)
nlines = 0
nsegments = []
for c in range(3):
d = sqrt((cell[c] ** 2).sum())
n = max(2, int(d / 0.3))
nsegments.append(n)
nlines += 4 * n
positions = np.empty((nlines, 3))
T = np.empty(nlines, int)
D = np.zeros((3, 3))
n1 = 0
for c in range(3):
n = nsegments[c]
dd = cell[c] / (4 * n - 2)
D[c] = dd
P = np.arange(1, 4 * n + 1, 4)[:, None] * dd
T[n1:] = c
for i, j in [(0, 0), (0, 1), (1, 0), (1, 1)]:
n2 = n1 + n
positions[n1:n2] = P + i * cell[c - 2] + j * cell[c - 1]
n1 = n2
return positions, T, D
def make_patch_list(writer):
""" Taken from https://gitlab.com/ase/ase/-/blob/master/ase/visualize/plot.py."""
from matplotlib.path import Path
from matplotlib.patches import Circle, PathPatch, Wedge
indices = writer.positions[:, 2].argsort()
patch_list = []
for a in indices:
xy = writer.positions[a, :2]
if a < writer.natoms:
r = writer.d[a] / 2
if writer.frac_occ:
site_occ = writer.occs[str(writer.tags[a])]
# first an empty circle if a site is not fully occupied
if (np.sum([v for v in site_occ.values()])) < 1.0:
# fill with white
fill = "#ffffff"
patch = Circle(xy, r, facecolor=fill, edgecolor="black")
patch_list.append(patch)
start = 0
# start with the dominant species
for sym, occ in sorted(
site_occ.items(), key=lambda x: x[1], reverse=True
):
if np.round(occ, decimals=4) == 1.0:
patch = Circle(
xy, r, facecolor=writer.colors[a], edgecolor="black"
)
patch_list.append(patch)
else:
# jmol colors for the moment
extent = 360.0 * occ
patch = Wedge(
xy,
r,
start,
start + extent,
facecolor=jmol_colors[atomic_numbers[sym]],
edgecolor="black",
)
patch_list.append(patch)
start += extent
else:
if (
(xy[1] + r > 0)
and (xy[1] - r < writer.h)
and (xy[0] + r > 0)
and (xy[0] - r < writer.w)
):
patch = Circle(xy, r, facecolor=writer.colors[a], edgecolor="black")
patch_list.append(patch)
else:
a -= writer.natoms
c = writer.T[a]
if c != -1:
hxy = writer.D[c]
patch = PathPatch(Path((xy + hxy, xy - hxy)))
patch_list.append(patch)
return patch_list
def plot_atoms(atoms, ax=None, **parameters):
""" Taken from https://gitlab.com/ase/ase/-/blob/master/ase/visualize/plot.py.
Plot an atoms object in a matplotlib subplot.
Parameters
----------
atoms : Atoms object
ax : Matplotlib subplot object
rotation : str, optional
In degrees. In the form '10x,20y,30z'
show_unit_cell : int, optional, default 2
Draw the unit cell as dashed lines depending on value:
0: Don't
1: Do
2: Do, making sure cell is visible
radii : float, optional
The radii of the atoms
colors : list of strings, optional
Color of the atoms, must be the same length as
the number of atoms in the atoms object.
scale : float, optional
Scaling of the plotted atoms and lines.
offset : tuple (float, float), optional
Offset of the plotted atoms and lines.
"""
if isinstance(atoms, list):
assert len(atoms) == 1
atoms = atoms[0]
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
Matplotlib(atoms, ax, **parameters).write()
return ax
| Python |
2D | romankempt/hetbuilder | hetbuilder/cli.py | .py | 8,237 | 262 | #!/usr/bin/env python
from matplotlib.pyplot import step
import typer
from typer.params import Option, Argument
from typing import Optional, Tuple, List
from hetbuilder import __version__, CoincidenceAlgorithm, Interface, InteractivePlot
from hetbuilder.log import logger, set_verbosity_level
from hetbuilder.atom_checks import check_atoms
from pathlib import Path
import ase.io
import numpy as np
app = typer.Typer(add_completion=True)
def version_callback(value: bool):
if value:
typer.echo(f"Hetbuilder Version: {__version__}")
raise typer.Exit()
@app.callback(
help=typer.style(
"""Builds 2D heterostructure interfaces via coincidence lattice theory.\n
Github repository: https://github.com/romankempt/hetbuilder\n
Documentation: https://hetbuilder.readthedocs.io/en/latest\n
Available under the MIT License. Please cite 10.5281/zenodo.4721346.""",
fg=typer.colors.GREEN,
bold=False,
)
)
def callback(
version: Optional[bool] = typer.Option(
None,
"--version",
callback=version_callback,
is_eager=True,
help="Show version and exit.",
)
):
pass
@app.command(
context_settings={"allow_extra_args": False, "ignore_unknown_options": False},
help=typer.style(
"""Build interfaces and show results interactively.""",
fg=typer.colors.GREEN,
bold=False,
),
)
def build(
ctx: typer.Context,
lower: Path = typer.Argument(..., help="Path to lower layer structure file."),
upper: Path = typer.Argument(..., help="Path to upper layer structure file."),
Nmax: int = typer.Option(
10, "-N", "--Nmax", help="Maximum number of translations."
),
Nmin: int = typer.Option(0, "--Nmin", help="Minimum number of translations."),
angle_stepsize: float = typer.Option(
1, "-as", "--angle_stepsize", help="Increment of angles to look through."
),
angle_limits: Tuple[float, float] = typer.Option(
(0, 90),
"-al",
"--angle_limits",
help="Lower and upper bound of angles too look through with given step size.",
),
angles: List[float] = typer.Option(
[],
"-a",
"--angle",
help="Explicitely set angle to look for. Can be called multiple times.",
),
tolerance: float = typer.Option(
0.1,
"-t",
"--tolerance",
help="Tolerance criterion to accept matching lattice points in Angström.",
),
weight: float = typer.Option(
0.5,
"-w",
"--weight",
help="Weight of the coincidence unit cell, given by C=A+weight*(B-A).",
),
distance: float = typer.Option(
4, "-d", "--distance", help="Interlayer distance of the heterostructure."
),
no_idealize: bool = typer.Option(
False, "--no_idealize", help="Disable idealize lattice parameters via spglib."
),
symprec: float = typer.Option(
1e-5, "-sp", "--symprec", help="Symmetry precision for spglib."
),
angle_tolerance: float = typer.Option(
5, "--angle_tolerance", help="Angle tolerance for spglib."
),
verbosity: int = typer.Option(
1, "--verbosity", "-v", count=True, help="Set verbosity level."
),
) -> None:
"""Builds heterostructure interface for given choice of parameters.
Example:
hetbuilder build graphene.xyz MoS2.xyz -N 10 -al 0 30 -as 0.1
"""
set_verbosity_level(verbosity)
bottom = ase.io.read(lower)
top = ase.io.read(upper)
logger.info(
"Building heterostructures from {} and {}.".format(
bottom.get_chemical_formula(), top.get_chemical_formula()
)
)
alg = CoincidenceAlgorithm(bottom, top)
results = alg.run(
Nmax=Nmax,
Nmin=Nmin,
angles=angles,
angle_limits=angle_limits,
angle_stepsize=angle_stepsize,
tolerance=tolerance,
distance=distance,
no_idealize=no_idealize,
symprec=symprec,
angle_tolerance=angle_tolerance,
verbosity=verbosity,
)
if results is not None:
ip = InteractivePlot(bottom, top, results, weight)
ip.plot_results()
@app.command(
context_settings={"allow_extra_args": False, "ignore_unknown_options": False},
help=typer.style(
"""Find lowest-stress coincidence unit cell.""",
fg=typer.colors.GREEN,
bold=False,
),
)
def match(
lower: Path = typer.Argument(..., help="Path to lower layer structure file."),
upper: Path = typer.Argument(..., help="Path to upper layer structure file."),
Nmax: int = typer.Option(
10, "-N", "--Nmax", help="Maximum number of translations."
),
Nmin: int = typer.Option(0, "--Nmin", help="Minimum number of translations."),
angles: List[float] = typer.Option(
[],
"-a",
"--angle",
help="Explicitely set angle to look for. Can be called multiple times.",
),
weight: float = typer.Option(
0.5,
"-w",
"--weight",
help="Weight of the coincidence unit cell, given by C=A+weight*(B-A).",
),
distance: float = typer.Option(
4, "-d", "--distance", help="Interlayer distance of the heterostructure."
),
no_idealize: bool = typer.Option(
False, "--no_idealize", help="Disable idealize lattice parameters via spglib."
),
symprec: float = typer.Option(
1e-5, "-sp", "--symprec", help="Symmetry precision for spglib."
),
angle_tolerance: float = typer.Option(
5, "--angle_tolerance", help="Angle tolerance for spglib."
),
verbosity: int = typer.Option(
1, "--verbosity", "-v", count=True, help="Set verbosity level."
),
):
"""Matches two structures to find lowest-stress coincidence lattice.
Automatically checks different tolerance values.
Example:
hetbuilder match graphene.xyz MoS2.xyz
"""
bottom = ase.io.read(lower)
top = ase.io.read(upper)
logger.info(
"Building heterostructure from {} and {}.".format(
bottom.get_chemical_formula(), top.get_chemical_formula()
)
)
alg = CoincidenceAlgorithm(bottom, top)
set_verbosity_level(verbosity)
if angles == []:
angles = list(range(0, 91, 1))
def circle_loop(
tolerance_stepsize=0.05,
max_tolerance=0.2,
distance=distance,
angles=angles,
no_idealize=no_idealize,
symprec=symprec,
angle_tolerance=angle_tolerance,
weight=weight,
):
interfaces = []
# outer loop over different tolerances
for j, t in enumerate(
np.arange(
tolerance_stepsize,
max_tolerance + tolerance_stepsize,
tolerance_stepsize,
)
):
logger.info("Checking for tolerance {:.2f} ...".format(t))
r = alg.run(
Nmax=Nmax,
Nmin=Nmin,
angles=angles,
tolerance=t,
distance=distance,
no_idealize=no_idealize,
symprec=symprec,
angle_tolerance=angle_tolerance,
weight=weight,
verbosity=verbosity,
)
if r is not None:
stresses = [k.stress for k in r]
idx = stresses.index(min(stresses))
interfaces.append(r[idx])
break
if len(interfaces) > 0:
stresses = [k.stress for k in interfaces]
idx = stresses.index(min(stresses))
intf = interfaces[idx]
set_verbosity_level(1)
logger.info("Found coincidence structure: {}".format(intf))
return intf
else:
logger.critical("Could not find any matching unit cells.")
return None
interface = circle_loop()
atoms = interface.stack.copy()
name = atoms.get_chemical_formula() + "_angle{:.2f}_stress{:.2f}.xyz".format(
interface.angle, interface.stress
)
logger.info(f"Writing structure to {name} ...")
atoms.write(name)
if __name__ == "__main__":
app()
| Python |
2D | romankempt/hetbuilder | docs/intro.md | .md | 2,783 | 93 | # Hetbuilder - builds heterostructure interfaces
[](https://zenodo.org/badge/latestdoi/358881237)
[](https://hetbuilder.readthedocs.io/en/latest/?badge=latest)
[](https://badge.fury.io/py/hetbuilder)
Builds 2D heterostructure interfaces via coincidence lattice theory.
## Installation
### Build-time dependencies
Requires a C++17 compiler and [cmake](https://cmake.org/).
It is also recommended to preinstall [spglib](https://atztogo.github.io/spglib/python-spglib.html) and [pybind11](https://github.com/pybind/pybind11).
Otherwise, these will be built during the installation from the submodules.
#### Installing with Anaconda
Create a clean conda environment:
```bash
conda env create -n hetbuilder python=3.9
```
Then install the build-time dependencies first:
```bash
conda install -c conda-forge cxx-compiler git pip cmake spglib pybind11
```
Then, you can install the project from pip:
```bash
pip install hetbuilder
```
If that does not work, try directly installing from git:
```bash
pip install git+https://github.com/romankempt/hetbuilder.git
```
#### Installing with pip
PyPI does not provide the library files of [spglib](https://atztogo.github.io/spglib/python-spglib.html). These will be built from the submodules at installation time, which might be time-consuming.
On Unix, you can install a `cxx-compiler` with:
```bash
sudo apt install build-essential
```
## First steps
The installation exposes a multi-level [typer](https://github.com/tiangolo/typer) CLI utility called `hetbuilder`:
```bash
hetbuilder --help
```
The `build` utility exposes the results interactively via a matplotlib GUI.
You can use any ASE-readable structure format to specify the lower and upper layer. They should be recognizable as two-dimensional, e.g., by having a zero vector in the *z*-direction.
```bash
hetbuilder build graphene.xyz MoS2.cif
```
This should open a [matplotlib](https://matplotlib.org/) interface looking like this:

## Documentation
Documentation is available at [Read the Docs](https://hetbuilder.readthedocs.io/en/latest/index.html).
## Testing
Tests can be run in the project directory with
```bash
pytest -v tests
```
## Citing
If you use this tool, please cite 10.5281/zenodo.4721346.
## Requirements
- [Atomic Simulation Environment](https://wiki.fysik.dtu.dk/ase/)
- [Space Group Libary](https://atztogo.github.io/spglib/python-spglib.html)
- [SciPy](https://www.scipy.org/)
- [matplotlib](https://matplotlib.org/)
- [pybind11](https://github.com/pybind/pybind11)
- [typer](https://github.com/tiangolo/typer)
| Markdown |
2D | romankempt/hetbuilder | docs/python_interface.md | .md | 1,083 | 36 | # Python Interface
The algorithm can be directly executed from python. This is useful to incorporate the algorithm into other workflows.
```python
from hetbuilder.algorithm import CoincidenceAlgorithm
from hetbuilder.plotting import InteractivePlot
# we read in the structure files via the ASE
import ase.io
bottom = ase.io.read("lower_layer.xyz")
top = ase.io.read("upper_layer.xyz")
# we set up the algorithm class
alg = CoincidenceAlgorithm(bottom, top)
# we run the algorithm for a choice of parameters
results = alg.run(Nmax = 10, Nmin = 0, angles = [0, 10, 15, 20], tolerance = 0.1, weight = 0.5)
```
If the search was not successful, `None` is returned. Otherwise, we can inspect the results like this:
```python
for j in results:
print(j)
```
This allows to filter the results, e.g., if we only want a certain angle:
```python
k = [j for j in results if j.angle == 0.0]
print(k)
```
We can also parse these results to the matplotlib plotting interface:
```python
iplot = InteractivePlot(bottom=bottom, top=top, results=results, weight=0.5)
iplot.plot_results()
``` | Markdown |
2D | romankempt/hetbuilder | docs/cli.md | .md | 3,202 | 78 | # Command-line interface
The options of the CLI utility `build_heterostructure` can be shown via:
```bash
hetbuilder --help
```
In the following, a couple of examples are shown to illustrate different parameter choices.
## The `build` utility
The `build` utility runs the coincidence lattice algorithm once for a given choice of parameters and visualizes the results interactively.
```bash
hetbuilder build --help
```
### Searching specific angles
Specific angles can be passed explicitly with the `-a` option and can be called multiple times:
```bash
hetbuilder build a.xyz b.xyz -a 0.15 -a 20.4 -a 30.1
```
This takes precedence over the specification of angles via the angle limits and the angle stepsize.
### Reducing the angle range
If both layers are highly symmetric, e.g., both have a hexagonal $C_6$ rotation axis, then it does not make sense to search through the entire range between 0 and 90°. It is sufficient in this case to look for angles in the range between 0 and 30°, e.g., with a stepsize of 0.5°:
```bash
hetbuilder build a.xyz b.xyz -al 0 30 -as 0.5
```
### Changing the number of translations
The maximum and minimum number of translations $N_{max}$ and $N_{min}$ are the most performance-relevant parameters. Choosing a large number of translations (e.g., 100) is possible but leads to longer run times. In this case, it might help to make more OpenMP threads available by setting the environment variable `OMP_NUM_THREADS` to a larger value.
Usually, one is interested in the smallest coincidence cell possible with small allowed strain. For most practical purposes, $N_{max}$ is set to $10$ because of that.
Choosing large values for both is useful if one wants to look for very large supercells. For example:
```bash
hetbuilder build a.xyz b.xyz --Nmin 100 --Nmax 125
```
But other parameters might need adjustment for that purpose as well, such as the tolerance. Accessing these large structures via the matplotlib interface might be problematic, so it is recommended to search for these large supercells via the python interface.

### Changing the tolerance
The tolerance corresponds to a distance between lattice points in Angström. Choosing a larger tolerance accepts more lattice points and generates more supercells, but these might have a larger total lattice mismatch.
```bash
hetbuilder build a.xyz b.xyz -t 0.2
```
### Changing the weight factor
Changing the weight factor $w$ only affects the final coincidence supercells, not the results algorithm itself. A weight factor $w=0$ means that the coincidence unit cell is given only by the supercell of the lower layer. A weight factor $w=1$ means that the coincidence unit cell is given only by the supercell of the upper layer. Correspondingly, this allows to remove stresses from one of the layers at expense of the other.
```bash
hetbuilder build a.xyz b.xyz -w 0
```
## The `match` utility
The `match` utility only looks for the smallest stress result from the algorithm and writes it to a file.
```bash
hetbuilder match --help
```
The syntax is similar to the `build` utility. The algorithm is executed automatically for different tolerance values.
| Markdown |
2D | romankempt/hetbuilder | docs/implementation.md | .md | 6,349 | 146 | # Theoretical Background
Coincidence lattices are determined with the algorithm outlined by Schwalbe-Koda ([1]).
[1]: https://doi.org/10.1021/acs.jpcc.6b01496 ". Phys. Chem. C 2016, 120, 20, 10895-10908"
Two 2D lattice bases (lattice vectors are given as column vectors) are given by:
```math
\mathbf{A} = \begin{pmatrix} a_{11} & a_{21} \\ a_{12} & a_{22} \end{pmatrix}
```
```math
\mathbf{B} = \begin{pmatrix} b_{11} & b_{21} \\ b_{12} & b_{22} \end{pmatrix}
```
Each point in the 2D plane is given by the coefficients:
```math
P(m_1, m_2) = m_1 \vec{a}_1 + m_2 \vec{a}_2
```
```math
P(n_1, n_2) = n_1 \vec{b}_1 + n_2 \vec{b}_2
```
The two bases can be rotated with respect to each other:
```math
\mathbf{R}(\theta) = \begin{pmatrix} \cos(\theta) & -\sin(\theta) \\ \sin(\theta) & \cos(\theta) \end{pmatrix}
```
Two lattice points of the two bases coincide under the following condition:
```math
\begin{pmatrix} \vec{a}_1 & \vec{a}_2 \end{pmatrix} \begin{pmatrix} m_1 \\ m_2 \end{pmatrix}
= \mathbf{R}(\theta) \begin{pmatrix} \vec{b}_1 & \vec{b}_2 \end{pmatrix}
\begin{pmatrix} n_1 \\ n_2 \end{pmatrix} \\
```
```math
\mathbf{A} \vec{m} = \mathbf{R}(\theta) \mathbf{B} \vec{n}
```
As a tolerance criterion, coincidence is accepted if the distance between the coinciding lattice points is smaller than a threshold:
```math
| \mathbf{A} \vec{m} - \mathbf{R}(\theta) \mathbf{B} \vec{n} | \leq tolerance
```
Solving this system of linear equations yields a set of associated vectors for each angle:
```math
s(\theta) = \{ (\vec{m_1}, \vec{n_1}), (\vec{m_2}, \vec{n_2}), ..., (\vec{m_s}, \vec{n_s}), ..., (\vec{m_k}, \vec{n_k}) \} \\
```
From any pair of these associated vectors that is linearly independent, one can construct supercell matrices from the row vectors:
```math
\mathbf{M} = \begin{pmatrix} m_{s1} & m_{s2} \\ m_{k1} & m_{k2} \end{pmatrix}~~~~
\mathbf{N} = \begin{pmatrix} n_{s1} & n_{s2} \\ n_{k1} & n_{k2} \end{pmatrix}
```
This yields a set $S(\theta)=\{(\mathbf{M}_i, \mathbf{N}_i)\}$ of supercell matrices.
# Implementation Details
The four coefficients $m_{s1}$, $m_{s2}$, $m_{k1}$ and $m_{k2}$ are determined by a grid search. Therefore, one has to iterate through all possible combinations for all given angles $\theta_i$
```math
(-N_{max} \leq s \leq -N_{min} < N_{min} \leq s < N_{max} \\
\text{ and } -N_{max} \leq k \leq -N_{min} < N_{min} \leq k < N_{max}) ~\forall~\theta_i ~,
```
where $N_{min}$ and $N_{max}$ are the minimum and maximum number of translations, respectively. This yields $((2 \cdot (N_{max} - N_{min})))^4) * N_{angles})$ grid points to search through, which is done in C++ employing OpenMP parallelism.
This results in a set $S(\theta)$ that typically is very large. Before the supercells are generated, it is reduced by practical criteria.
1. All unit cell multiples are removed by ensuring that their absolute greatest common divisor $\text{gcd}$ equals 1:
```math
\text{abs}(\text{gcd}(m_{s1}, m_{s2}, m_{k1}, m_{k2}, n_{s1}, n_{s2}, n_{k1}, n_{k2})) \overset{!}{=} 1
```
2. Two supercell matrices yield the same area if they have the same determinant for a given angle. For all supercell matrices with the same determinant, the ones with positive and symmetric entries are preferred:
```math
\mathbf{M}_i = \mathbf{M}_j ~~\text{if det}(\mathbf{M}_i) =~\text{det}(\mathbf{M}_j)
```
After the set $S(\theta)$ is reduced, the atomic supercell configurations are generated. This may still be a larger number, so further reduction steps are necessary.
3. The lower and upper supercells are given by the product of the supercell matrices $\mathbf{M}_i$ and $\mathbf{N}_i$ with the primitive bases $\mathbf{A}$ and $\mathbf{B}$, respectively. The common supercell $\mathbf{C}_i$ can be built by a linear combination of the two:
```math
\mathbf{M}_i \mathbf{A} \approx \mathbf{N}_i \mathbf{R}(\theta_i) \mathbf{B}
```
```math
\mathbf{C}_i = \mathbf{M}_i \mathbf{A} + w \cdot ( \mathbf{N}_i \mathbf{R}(\theta_i) \mathbf{B} - \mathbf{M}_i \mathbf{A})
```
Where the weight factor $w$ ranges from 0 to 1 and determines if the common unit cell $\mathbf{C}_i$ is either completely given by the lattice of $\mathbf{A}$ or $\mathbf{B}$ or in between. This yields a set of atomic configurations for all angles $P = \{\mathbf{(C}_i,\theta_i)\}$.
4. The set $P$ can further be reduced by removing symmetry-equivalencies. This is achieved via the [XtalComp](https://github.com/allisonvacanti/XtalComp) algorithm.
# Definition of the deformation measure
Dropping the index $i$, one can define the target transformation matrices to measure the stress on the unit cells:
```math
\mathbf{T_A} \mathbf{MA} = \mathbf{C} ~~~~ \mathbf{T_B} \mathbf{N} \mathbf{R}(\theta) \mathbf{B} = \mathbf{C}
```
These are two-dimensional and can be polarly decomposed:
```math
\mathbf{T}_A = \mathbf{U}_A(\phi)\mathbf{P}_A
```
```math
\mathbf{T}_B = \mathbf{U}_B(-\phi)\mathbf{P}_B
```
In two dimensions, this decomposition is unique. We interpret the matrix $\mathbf{P}$ as strain on the lattice vectors with small rotations being removed by the rotation $\mathbf{U}(\phi)$. This allows to define a stress tensor $\varepsilon$ by substracting unity:
```math
\mathbf{\varepsilon}_A = \mathbf{P}_A - \mathbb{I}
```
```math
\mathbf{\varepsilon}_B = \mathbf{P}_B - \mathbb{I}
```
As an average value, we calculate the stress measure $\bar{\varepsilon}$:
```math
\bar{\varepsilon} = \sqrt{\frac{\varepsilon_{xx}^2 + \varepsilon_{yy}^2 +\varepsilon_{xy}^2 + \varepsilon_{xx} \cdot \varepsilon_{yy}}{4}}
```
And a total stress measure on both unit cells:
```math
\bar{\varepsilon}_{tot} = \bar{\varepsilon}_A + \bar{\varepsilon}_B
```
However, this stress measure becomes small for large unit cells by definition. A more meaningful measure is to look at how much the average bond lengths $\bar{|b^{ij}|}$ change given the two species $i$ and $j$ compared to the original cell.
This strain measure describes how much the bonds are strained or compressed in the new unit cell. We define an arbitrary deformation measure then as:
```math
\text{deformation} = \bar{\varepsilon}_A + \bar{\varepsilon}_B + \text{avg}( \frac{\Delta \bar{|b_{A}^{ij}|} }{ \bar{|b_{A}^{ij}} ) + \text{avg}( \frac{\Delta \bar{|b_{B}^{ij}|} }{ \bar{|b_{B}^{ij}} )
``` | Markdown |
2D | romankempt/hetbuilder | docs/conf.py | .py | 4,407 | 141 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
from ase.utils.sphinx import mol_role
sys.path.insert(0, os.path.abspath("../.."))
sys.path.insert(0, os.path.abspath("../hetbuilder"))
sys.path.insert(0, os.path.abspath(".."))
import re
VERSIONFILE = "../hetbuilder/__init__.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
version = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
import recommonmark
from recommonmark.transform import AutoStructify
# -- Project information -----------------------------------------------------
project = "hetbuilder"
copyright = "2021, Roman Kempt"
author = "Roman Kempt"
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
intersphinx_mapping = {
"ase": ("https://wiki.fysik.dtu.dk/ase", None),
"python": ("https://docs.python.org/3.7", None),
}
extensions = [
# "nbsphinx",
"sphinx.ext.autodoc",
# "sphinx_markdown_tables",
"sphinx.ext.todo",
"sphinx.ext.githubpages",
"recommonmark",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.mathjax",
"sphinx.ext.intersphinx",
]
autodoc_mock_imports = ["hetbuilder_backend"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "misc", "**.ipynb_checkpoints"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# html_static_path = ["_static"]
pygments_style = "sphinx"
master_doc = "index"
# -- Extension configuration -------------------------------------------------
source_suffix = {
".rst": "restructuredtext",
".txt": "restructuredtext",
".md": "markdown",
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = True
napoleon_use_admonition_for_notes = True
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = True
napoleon_use_rtype = False
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# At the bottom of conf.py
github_doc_root = "https://github.com/rtfd/recommonmark/tree/master/doc/"
# dummv variables to avoid errors
nbsphinx_allow_errors = True
def setup(app):
app.add_config_value(
"recommonmark_config",
{
"url_resolver": lambda url: github_doc_root + url,
"auto_toc_tree_section": "Contents",
},
True,
)
app.add_transform(AutoStructify)
| Python |
2D | romankempt/hetbuilder | tests/performance_plot.py | .py | 993 | 43 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
timings = [
0.04170293807983398,
0.15635199546813966,
0.22000808715820314,
0.3506165027618408,
0.5720492362976074,
1.0751613140106202,
4.725947093963623,
7.02956256866455,
103.4705885887146,
]
ncombs = [
10000,
160000,
810000,
2560000,
6250000,
12960000,
65610000,
100000000,
1600000000,
]
scaling = [k / timings[0] for k in timings]
fig, axes = plt.subplots(1, 1, figsize=(5, 5))
axes.scatter(ncombs, timings, color="tab:red")
axes.plot(ncombs, timings, color="tab:blue")
# axes.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.1e"))
axes.set_xlim(min(ncombs) * 0.95, max(ncombs) * 1.05)
axes.set_ylim(-1, max(timings) * 1.05)
axes.set_xscale("log")
axes.set_xlabel(r"$((2 \cdot (N_{max} - N_{min})))^4) * N_{angles})$")
axes.set_ylabel("Time scaling factor")
axes.set_title("Scaling with respect to grid points")
plt.show()
| Python |
2D | romankempt/hetbuilder | tests/test_backend.py | .py | 2,295 | 76 | import ase.io
from ase.atoms import Atoms
from ase.build import make_supercell
from pathlib import Path
from hetbuilder import PROJECT_ROOT_DIR
from hetbuilder.algorithm import cpp_atoms_to_ase_atoms, ase_atoms_to_cpp_atoms
from hetbuilder.hetbuilder_backend import (
double2dVector,
double1dVector,
int1dVector,
int2dVector,
CppAtomsClass,
CppCoincidenceAlgorithmClass,
CppInterfaceClass,
get_number_of_omp_threads,
cpp_make_supercell,
)
import spglib
from ase.utils.structure_comparator import SymmetryEquivalenceCheck
def test_backend_supercell():
for i in [
"tests/MoS2_2H_1l.xyz",
"tests/WS2_2H_1l.xyz",
"tests/graphene.xyz",
]:
atoms = ase.io.read(PROJECT_ROOT_DIR.joinpath(i))
cppatoms = ase_atoms_to_cpp_atoms(atoms)
N = [[3, 1, 0], [-1, 2, 0], [0, 0, 1]]
atoms = make_supercell(atoms, N)
N = int2dVector([int1dVector(k) for k in N])
cppatoms = cpp_make_supercell(cppatoms, N)
cppatoms = cpp_atoms_to_ase_atoms(cppatoms)
comp = SymmetryEquivalenceCheck()
is_equal = comp.compare(atoms, cppatoms)
assert (
is_equal
), "Supercell generation in backend and from ASE do not yield same result."
def test_spglib_standardize():
for i in [
"tests/MoS2_2H_1l.xyz",
"tests/WS2_2H_1l.xyz",
"tests/graphene.xyz",
]:
atoms = ase.io.read(PROJECT_ROOT_DIR.joinpath(i))
N = [[3, 1, 0], [-1, 2, 0], [0, 0, 1]]
sc = make_supercell(atoms, N)
cppatoms = ase_atoms_to_cpp_atoms(sc)
cppatoms.standardize(1, 0, 1e-5, 5)
cell = (sc.cell, sc.get_scaled_positions(), sc.numbers)
spgcell = spglib.standardize_cell(
cell, to_primitive=True, no_idealize=False, symprec=1e-5, angle_tolerance=5
)
atoms = Atoms(
cell=spgcell[0],
scaled_positions=spgcell[1],
numbers=spgcell[2],
pbc=[True, True, True],
)
cppatoms = cpp_atoms_to_ase_atoms(cppatoms)
comp = SymmetryEquivalenceCheck()
is_equal = comp.compare(atoms, cppatoms)
assert (
is_equal
), "Standardization in backend and from spglib do not yield same result."
| Python |
2D | romankempt/hetbuilder | tests/test_performance.py | .py | 927 | 32 | import ase.io
from pathlib import Path
from hetbuilder import PROJECT_ROOT_DIR
from hetbuilder.algorithm import CoincidenceAlgorithm
from hetbuilder.plotting import InteractivePlot
import numpy as np
import time
def test_scaling_performance():
bottom = ase.io.read(PROJECT_ROOT_DIR.joinpath("/tests/MoS2_2H_1l.xyz"))
top = ase.io.read(PROJECT_ROOT_DIR.joinpath("/tests/WS2_2H_1l.xyz"))
alg = CoincidenceAlgorithm(bottom, top)
timings = []
ncombs = []
for i in [5, 10, 15]:
subtimings = []
ncombinations = ((2 * (i)) ** 4) * 1
ncombs.append(ncombinations)
for j in range(5):
start = time.time()
results = alg.run(Nmin=0, Nmax=i, angles=[0], tolerance=0.1)
end = time.time()
subtimings.append(end - start)
subtime = sum(subtimings) / len(subtimings)
timings.append(subtime)
return (timings, ncombs)
| Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.