ravimohan19's picture
Upload examples/polymer_optimization.py with huggingface_hub
7189af3 verified
"""
Example: Physics-Informed Bayesian Optimization for Polymer Design
This example demonstrates optimizing a polymer formulation where:
- A physics model (simplified Flory-Huggins + Arrhenius kinetics) provides
prior knowledge about how composition and temperature affect properties.
- Initial experimental data provides a warm start.
- The BO loop efficiently explores the design space, leveraging both
physics and data to minimize the number of experiments needed.
Objective: Maximize polymer recyclability metric (higher is better).
Parameters:
- monomer_ratio: Ratio of monomer A to B (0.1 to 0.9)
- temperature: Reaction temperature in Kelvin (350 to 500)
- catalyst_loading: Catalyst weight percent (0.5 to 5.0)
"""
import torch
from torch import Tensor
from physics_informed_bo.experiment.parameter_space import ParameterSpace
from physics_informed_bo.experiment.campaign import OptimizationCampaign
from physics_informed_bo.config import OptimizationConfig, AcquisitionType
# ============================================================================
# 1. Define the physics model (simplified polymer recyclability model)
# ============================================================================
def polymer_physics_model(X: Tensor) -> Tensor:
"""Simplified physics model for polymer recyclability.
Based on:
- Flory-Huggins mixing thermodynamics
- Arrhenius reaction kinetics
- Empirical catalyst efficiency model
Args:
X: Tensor of shape (n, 3) with columns:
[monomer_ratio, temperature, catalyst_loading]
Returns:
Predicted recyclability metric (higher = better).
"""
ratio = X[:, 0] # monomer ratio
temp = X[:, 1] # temperature (K)
catalyst = X[:, 2] # catalyst loading (wt%)
# Flory-Huggins: optimal mixing near 50:50 ratio
chi = 0.5 - 0.3 * (ratio - 0.5) ** 2 # chi parameter
mixing_term = -ratio * torch.log(ratio + 1e-8) - (1 - ratio) * torch.log(1 - ratio + 1e-8)
mixing_free_energy = mixing_term - chi * ratio * (1 - ratio)
# Arrhenius: reaction rate dependence on temperature
Ea = 50.0 # kJ/mol activation energy
R = 8.314e-3 # kJ/(mol·K)
rate = torch.exp(-Ea / (R * temp))
# Catalyst efficiency (diminishing returns)
catalyst_eff = 1 - torch.exp(-0.8 * catalyst)
# Combined recyclability metric
recyclability = 5.0 * mixing_free_energy * rate * catalyst_eff + 2.0
return recyclability
# ============================================================================
# 2. Define the "true" function (simulates real experiments with noise)
# ============================================================================
def true_recyclability(params: dict) -> float:
"""Simulate running an actual experiment (physics + discrepancy + noise)."""
X = torch.tensor(
[[params["monomer_ratio"], params["temperature"], params["catalyst_loading"]]],
dtype=torch.float64,
)
# Physics prediction
physics = polymer_physics_model(X).item()
# Add model discrepancy (physics doesn't capture everything)
ratio = params["monomer_ratio"]
temp = params["temperature"]
discrepancy = 0.3 * torch.sin(torch.tensor(10.0 * ratio)).item() \
+ 0.1 * (temp - 400) / 100
# Add measurement noise
noise = 0.05 * torch.randn(1).item()
return physics + discrepancy + noise
# ============================================================================
# 3. Set up the optimization campaign
# ============================================================================
def main():
# Define parameter space
space = ParameterSpace()
space.add_continuous("monomer_ratio", 0.1, 0.9, units="ratio")
space.add_continuous("temperature", 350.0, 500.0, units="K")
space.add_continuous("catalyst_loading", 0.5, 5.0, units="wt%")
# Generate some initial experimental data
torch.manual_seed(42)
X_init = space.sample_latin_hypercube(5)
y_init = torch.tensor(
[true_recyclability(space.to_dict(X_init)[i]) for i in range(5)],
dtype=torch.float64,
).unsqueeze(-1)
print("=== Initial Data ===")
for i, (params, y_val) in enumerate(zip(space.to_dict(X_init), y_init)):
print(f" Exp {i+1}: {params} -> {y_val.item():.4f}")
# Configure optimization
config = OptimizationConfig(
acquisition_type=AcquisitionType.PHYSICS_INFORMED_EI,
n_initial_samples=5,
max_iterations=20,
use_physics_mean=True,
noise_variance=0.01,
)
# Create campaign
campaign = OptimizationCampaign(
name="polymer_recyclability",
parameter_space=space,
physics_fn=polymer_physics_model,
initial_data=(X_init, y_init),
config=config,
maximize=True,
)
print("\n=== Running Optimization ===")
def callback(iteration, best):
print(f" Iteration {iteration}: best = {best['objective']:.4f}")
# Run automated optimization
results_df = campaign.run_automated(
objective_fn=true_recyclability,
max_iterations=15,
callback=callback,
)
# Report results
best = campaign.get_best()
print(f"\n=== Best Result ===")
print(f" Parameters: {best['parameters']}")
print(f" Objective: {best['objective']:.4f}")
print(f"\n=== Campaign Summary ===")
summary = campaign.summary()
print(f" Total experiments: {summary['n_experiments']}")
print(f" Physics model R²: {summary['model_summary'].get('model_quality', {}).get('r2', 'N/A')}")
# Save results
campaign.save("polymer_campaign.json")
results_df.to_csv("polymer_results.csv", index=False)
print("\nResults saved to polymer_campaign.json and polymer_results.csv")
if __name__ == "__main__":
main()