ravimohan19's picture
Upload models/gp_model.py with huggingface_hub
814f98a verified
"""GPyTorch-based Gaussian Process models with physics-informed priors."""
from typing import Callable, Optional, Tuple
import torch
from torch import Tensor
import gpytorch
from gpytorch.models import ExactGP
from gpytorch.means import ConstantMean, ZeroMean
from gpytorch.kernels import ScaleKernel, RBFKernel, MaternKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.distributions import MultivariateNormal
from gpytorch.mlls import ExactMarginalLogLikelihood
from botorch.models.gpytorch import GPyTorchModel
from botorch.posteriors.gpytorch import GPyTorchPosterior
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Standardize
from physics_informed_bo.models.base import SurrogateModel
from physics_informed_bo.models.physics_model import PhysicsMeanFunction
class _ExactGPModel(ExactGP, GPyTorchModel):
"""Core GPyTorch ExactGP model with BoTorch compatibility."""
_num_outputs = 1
def __init__(
self,
train_X: Tensor,
train_y: Tensor,
likelihood: GaussianLikelihood,
mean_module: Optional[gpytorch.means.Mean] = None,
kernel: str = "matern",
ard_num_dims: Optional[int] = None,
):
super().__init__(train_X, train_y.squeeze(-1), likelihood)
self.mean_module = mean_module or ConstantMean()
if kernel == "rbf":
base_kernel = RBFKernel(ard_num_dims=ard_num_dims)
elif kernel == "matern":
base_kernel = MaternKernel(nu=2.5, ard_num_dims=ard_num_dims)
else:
raise ValueError(f"Unknown kernel: {kernel}. Use 'rbf' or 'matern'.")
self.covar_module = ScaleKernel(base_kernel)
def forward(self, X: Tensor) -> MultivariateNormal:
mean = self.mean_module(X)
covar = self.covar_module(X)
return MultivariateNormal(mean, covar)
class StandardGP(SurrogateModel):
"""Standard Gaussian Process model (no physics, pure data-driven).
Uses GPyTorch for the GP and is BoTorch-compatible for optimization.
"""
def __init__(
self,
kernel: str = "matern",
noise_variance: float = 0.01,
learn_noise: bool = True,
normalize_inputs: bool = True,
standardize_outputs: bool = True,
device: str = "cpu",
dtype: torch.dtype = torch.float64,
):
self.kernel = kernel
self.noise_variance = noise_variance
self.learn_noise = learn_noise
self.normalize_inputs = normalize_inputs
self.standardize_outputs = standardize_outputs
self.device = torch.device(device)
self.dtype = dtype
self._model = None
self._likelihood = None
def fit(
self,
X: Tensor,
y: Tensor,
training_iterations: int = 100,
lr: float = 0.1,
) -> None:
"""Fit the GP model by optimizing the marginal log likelihood."""
X = X.to(device=self.device, dtype=self.dtype)
y = y.to(device=self.device, dtype=self.dtype)
if y.dim() == 1:
y = y.unsqueeze(-1)
self._likelihood = GaussianLikelihood()
if not self.learn_noise:
self._likelihood.noise = self.noise_variance
self._likelihood.noise_covar.raw_noise.requires_grad_(False)
self._model = _ExactGPModel(
train_X=X,
train_y=y,
likelihood=self._likelihood,
kernel=self.kernel,
ard_num_dims=X.shape[-1],
).to(device=self.device, dtype=self.dtype)
self._optimize_hyperparameters(X, y, training_iterations, lr)
def _optimize_hyperparameters(
self, X: Tensor, y: Tensor, n_iter: int, lr: float
) -> None:
"""Optimize GP hyperparameters via type-II MLE."""
self._model.train()
self._likelihood.train()
optimizer = torch.optim.Adam(self._model.parameters(), lr=lr)
mll = ExactMarginalLogLikelihood(self._likelihood, self._model)
for _ in range(n_iter):
optimizer.zero_grad()
output = self._model(X)
loss = -mll(output, y.squeeze(-1))
loss.backward()
optimizer.step()
self._model.eval()
self._likelihood.eval()
def predict(self, X: Tensor) -> Tuple[Tensor, Tensor]:
X = X.to(device=self.device, dtype=self.dtype)
self._model.eval()
self._likelihood.eval()
with torch.no_grad(), gpytorch.settings.fast_pred_var():
posterior = self._likelihood(self._model(X))
mean = posterior.mean.unsqueeze(-1)
variance = posterior.variance.unsqueeze(-1)
return mean, variance
def posterior(self, X: Tensor):
self._model.eval()
self._likelihood.eval()
return self._model.posterior(X)
@property
def model(self):
"""Access the underlying BoTorch-compatible GP model."""
return self._model
class PhysicsInformedGP(SurrogateModel):
"""GP with a physics model as the mean function.
The GP prior mean is set to the physics model predictions, so the GP
learns the residual (discrepancy) between the physics model and reality.
This is the core model of the platform.
Architecture:
f(x) = physics(x) + GP_residual(x)
where GP_residual ~ GP(0, k(x, x'))
"""
def __init__(
self,
physics_fn: Callable[[Tensor], Tensor],
kernel: str = "matern",
physics_output_scale: float = 1.0,
learnable_physics_scale: bool = True,
noise_variance: float = 0.01,
learn_noise: bool = True,
device: str = "cpu",
dtype: torch.dtype = torch.float64,
):
self.physics_fn = physics_fn
self.kernel = kernel
self.physics_output_scale = physics_output_scale
self.learnable_physics_scale = learnable_physics_scale
self.noise_variance = noise_variance
self.learn_noise = learn_noise
self.device = torch.device(device)
self.dtype = dtype
self._model = None
self._likelihood = None
def fit(
self,
X: Tensor,
y: Tensor,
training_iterations: int = 200,
lr: float = 0.05,
) -> None:
"""Fit the physics-informed GP model."""
X = X.to(device=self.device, dtype=self.dtype)
y = y.to(device=self.device, dtype=self.dtype)
if y.dim() == 1:
y = y.unsqueeze(-1)
self._likelihood = GaussianLikelihood()
if not self.learn_noise:
self._likelihood.noise = self.noise_variance
self._likelihood.noise_covar.raw_noise.requires_grad_(False)
physics_mean = PhysicsMeanFunction(
physics_fn=self.physics_fn,
output_scale=self.physics_output_scale,
learnable_scale=self.learnable_physics_scale,
)
self._model = _ExactGPModel(
train_X=X,
train_y=y,
likelihood=self._likelihood,
mean_module=physics_mean,
kernel=self.kernel,
ard_num_dims=X.shape[-1],
).to(device=self.device, dtype=self.dtype)
self._optimize_hyperparameters(X, y, training_iterations, lr)
def _optimize_hyperparameters(
self, X: Tensor, y: Tensor, n_iter: int, lr: float
) -> None:
self._model.train()
self._likelihood.train()
optimizer = torch.optim.Adam(self._model.parameters(), lr=lr)
mll = ExactMarginalLogLikelihood(self._likelihood, self._model)
for _ in range(n_iter):
optimizer.zero_grad()
output = self._model(X)
loss = -mll(output, y.squeeze(-1))
loss.backward()
optimizer.step()
self._model.eval()
self._likelihood.eval()
def predict(self, X: Tensor) -> Tuple[Tensor, Tensor]:
X = X.to(device=self.device, dtype=self.dtype)
self._model.eval()
self._likelihood.eval()
with torch.no_grad(), gpytorch.settings.fast_pred_var():
posterior = self._likelihood(self._model(X))
mean = posterior.mean.unsqueeze(-1)
variance = posterior.variance.unsqueeze(-1)
return mean, variance
def posterior(self, X: Tensor):
self._model.eval()
self._likelihood.eval()
return self._model.posterior(X)
@property
def model(self):
return self._model
def get_residuals(self, X: Tensor, y: Tensor) -> Tensor:
"""Compute residuals between physics predictions and observations."""
with torch.no_grad():
physics_pred = self.physics_fn(X)
return y.squeeze() - physics_pred