File size: 6,011 Bytes
7189af3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | """
Example: Physics-Informed Bayesian Optimization for Polymer Design
This example demonstrates optimizing a polymer formulation where:
- A physics model (simplified Flory-Huggins + Arrhenius kinetics) provides
prior knowledge about how composition and temperature affect properties.
- Initial experimental data provides a warm start.
- The BO loop efficiently explores the design space, leveraging both
physics and data to minimize the number of experiments needed.
Objective: Maximize polymer recyclability metric (higher is better).
Parameters:
- monomer_ratio: Ratio of monomer A to B (0.1 to 0.9)
- temperature: Reaction temperature in Kelvin (350 to 500)
- catalyst_loading: Catalyst weight percent (0.5 to 5.0)
"""
import torch
from torch import Tensor
from physics_informed_bo.experiment.parameter_space import ParameterSpace
from physics_informed_bo.experiment.campaign import OptimizationCampaign
from physics_informed_bo.config import OptimizationConfig, AcquisitionType
# ============================================================================
# 1. Define the physics model (simplified polymer recyclability model)
# ============================================================================
def polymer_physics_model(X: Tensor) -> Tensor:
"""Simplified physics model for polymer recyclability.
Based on:
- Flory-Huggins mixing thermodynamics
- Arrhenius reaction kinetics
- Empirical catalyst efficiency model
Args:
X: Tensor of shape (n, 3) with columns:
[monomer_ratio, temperature, catalyst_loading]
Returns:
Predicted recyclability metric (higher = better).
"""
ratio = X[:, 0] # monomer ratio
temp = X[:, 1] # temperature (K)
catalyst = X[:, 2] # catalyst loading (wt%)
# Flory-Huggins: optimal mixing near 50:50 ratio
chi = 0.5 - 0.3 * (ratio - 0.5) ** 2 # chi parameter
mixing_term = -ratio * torch.log(ratio + 1e-8) - (1 - ratio) * torch.log(1 - ratio + 1e-8)
mixing_free_energy = mixing_term - chi * ratio * (1 - ratio)
# Arrhenius: reaction rate dependence on temperature
Ea = 50.0 # kJ/mol activation energy
R = 8.314e-3 # kJ/(mol·K)
rate = torch.exp(-Ea / (R * temp))
# Catalyst efficiency (diminishing returns)
catalyst_eff = 1 - torch.exp(-0.8 * catalyst)
# Combined recyclability metric
recyclability = 5.0 * mixing_free_energy * rate * catalyst_eff + 2.0
return recyclability
# ============================================================================
# 2. Define the "true" function (simulates real experiments with noise)
# ============================================================================
def true_recyclability(params: dict) -> float:
"""Simulate running an actual experiment (physics + discrepancy + noise)."""
X = torch.tensor(
[[params["monomer_ratio"], params["temperature"], params["catalyst_loading"]]],
dtype=torch.float64,
)
# Physics prediction
physics = polymer_physics_model(X).item()
# Add model discrepancy (physics doesn't capture everything)
ratio = params["monomer_ratio"]
temp = params["temperature"]
discrepancy = 0.3 * torch.sin(torch.tensor(10.0 * ratio)).item() \
+ 0.1 * (temp - 400) / 100
# Add measurement noise
noise = 0.05 * torch.randn(1).item()
return physics + discrepancy + noise
# ============================================================================
# 3. Set up the optimization campaign
# ============================================================================
def main():
# Define parameter space
space = ParameterSpace()
space.add_continuous("monomer_ratio", 0.1, 0.9, units="ratio")
space.add_continuous("temperature", 350.0, 500.0, units="K")
space.add_continuous("catalyst_loading", 0.5, 5.0, units="wt%")
# Generate some initial experimental data
torch.manual_seed(42)
X_init = space.sample_latin_hypercube(5)
y_init = torch.tensor(
[true_recyclability(space.to_dict(X_init)[i]) for i in range(5)],
dtype=torch.float64,
).unsqueeze(-1)
print("=== Initial Data ===")
for i, (params, y_val) in enumerate(zip(space.to_dict(X_init), y_init)):
print(f" Exp {i+1}: {params} -> {y_val.item():.4f}")
# Configure optimization
config = OptimizationConfig(
acquisition_type=AcquisitionType.PHYSICS_INFORMED_EI,
n_initial_samples=5,
max_iterations=20,
use_physics_mean=True,
noise_variance=0.01,
)
# Create campaign
campaign = OptimizationCampaign(
name="polymer_recyclability",
parameter_space=space,
physics_fn=polymer_physics_model,
initial_data=(X_init, y_init),
config=config,
maximize=True,
)
print("\n=== Running Optimization ===")
def callback(iteration, best):
print(f" Iteration {iteration}: best = {best['objective']:.4f}")
# Run automated optimization
results_df = campaign.run_automated(
objective_fn=true_recyclability,
max_iterations=15,
callback=callback,
)
# Report results
best = campaign.get_best()
print(f"\n=== Best Result ===")
print(f" Parameters: {best['parameters']}")
print(f" Objective: {best['objective']:.4f}")
print(f"\n=== Campaign Summary ===")
summary = campaign.summary()
print(f" Total experiments: {summary['n_experiments']}")
print(f" Physics model R²: {summary['model_summary'].get('model_quality', {}).get('r2', 'N/A')}")
# Save results
campaign.save("polymer_campaign.json")
results_df.to_csv("polymer_results.csv", index=False)
print("\nResults saved to polymer_campaign.json and polymer_results.csv")
if __name__ == "__main__":
main()
|