repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
smt | smt-master/smt/examples/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/examples/b777_engine/run_b777_engine_rmtc.py | from smt.surrogate_models import RMTC
from smt.examples.b777_engine.b777_engine import get_b777_engine, plot_b777_engine
xt, yt, dyt_dxt, xlimits = get_b777_engine()
interp = RMTC(
num_elements=6,
xlimits=xlimits,
nonlinear_maxiter=20,
approx_order=2,
energy_weight=0.0,
regularization_weight=0.0,
extrapolate=True,
)
interp.set_training_values(xt, yt)
interp.set_training_derivatives(xt, dyt_dxt[:, :, 0], 0)
interp.set_training_derivatives(xt, dyt_dxt[:, :, 1], 1)
interp.set_training_derivatives(xt, dyt_dxt[:, :, 2], 2)
interp.train()
plot_b777_engine(xt, yt, xlimits, interp)
| 612 | 26.863636 | 82 | py |
smt | smt-master/smt/examples/b777_engine/run_b777_engine_rmtb.py | from smt.surrogate_models import RMTB
from smt.examples.b777_engine.b777_engine import get_b777_engine, plot_b777_engine
xt, yt, dyt_dxt, xlimits = get_b777_engine()
interp = RMTB(
num_ctrl_pts=15,
xlimits=xlimits,
nonlinear_maxiter=20,
approx_order=2,
energy_weight=0e-14,
regularization_weight=0e-18,
extrapolate=True,
)
interp.set_training_values(xt, yt)
interp.set_training_derivatives(xt, dyt_dxt[:, :, 0], 0)
interp.set_training_derivatives(xt, dyt_dxt[:, :, 1], 1)
interp.set_training_derivatives(xt, dyt_dxt[:, :, 2], 2)
interp.train()
plot_b777_engine(xt, yt, xlimits, interp)
| 617 | 27.090909 | 82 | py |
smt | smt-master/smt/examples/b777_engine/b777_engine.py | import numpy as np
import os
def get_b777_engine():
this_dir = os.path.split(__file__)[0]
nt = 12 * 11 * 8
xt = np.loadtxt(os.path.join(this_dir, "b777_engine_inputs.dat")).reshape((nt, 3))
yt = np.loadtxt(os.path.join(this_dir, "b777_engine_outputs.dat")).reshape((nt, 2))
dyt_dxt = np.loadtxt(os.path.join(this_dir, "b777_engine_derivs.dat")).reshape(
(nt, 2, 3)
)
xlimits = np.array([[0, 0.9], [0, 15], [0, 1.0]])
return xt, yt, dyt_dxt, xlimits
def plot_b777_engine(xt, yt, limits, interp):
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
val_M = np.array(
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9]
) # 12
val_h = np.array(
[0.0, 0.6096, 1.524, 3.048, 4.572, 6.096, 7.62, 9.144, 10.668, 11.8872, 13.1064]
) # 11
val_t = np.array([0.05, 0.2, 0.3, 0.4, 0.6, 0.8, 0.9, 1.0]) # 8
def get_pts(xt, yt, iy, ind_M=None, ind_h=None, ind_t=None):
eps = 1e-5
if ind_M is not None:
M = val_M[ind_M]
keep = abs(xt[:, 0] - M) < eps
xt = xt[keep, :]
yt = yt[keep, :]
if ind_h is not None:
h = val_h[ind_h]
keep = abs(xt[:, 1] - h) < eps
xt = xt[keep, :]
yt = yt[keep, :]
if ind_t is not None:
t = val_t[ind_t]
keep = abs(xt[:, 2] - t) < eps
xt = xt[keep, :]
yt = yt[keep, :]
if ind_M is None:
data = xt[:, 0], yt[:, iy]
elif ind_h is None:
data = xt[:, 1], yt[:, iy]
elif ind_t is None:
data = xt[:, 2], yt[:, iy]
if iy == 0:
data = data[0], data[1] / 1e6
elif iy == 1:
data = data[0], data[1] / 1e-4
return data
num = 100
x = np.zeros((num, 3))
lins_M = np.linspace(0.0, 0.9, num)
lins_h = np.linspace(0.0, 13.1064, num)
lins_t = np.linspace(0.05, 1.0, num)
def get_x(ind_M=None, ind_h=None, ind_t=None):
x = np.zeros((num, 3))
x[:, 0] = lins_M
x[:, 1] = lins_h
x[:, 2] = lins_t
if ind_M:
x[:, 0] = val_M[ind_M]
if ind_h:
x[:, 1] = val_h[ind_h]
if ind_t:
x[:, 2] = val_t[ind_t]
return x
nrow = 6
ncol = 2
ind_M_1 = -2
ind_M_2 = -5
ind_t_1 = 1
ind_t_2 = -1
plt.close()
# --------------------
fig, axs = plt.subplots(6, 2, gridspec_kw={"hspace": 0.5}, figsize=(15, 25))
axs[0, 0].set_title("M={}".format(val_M[ind_M_1]))
axs[0, 0].set(xlabel="throttle", ylabel="thrust (x 1e6 N)")
axs[0, 1].set_title("M={}".format(val_M[ind_M_1]))
axs[0, 1].set(xlabel="throttle", ylabel="SFC (x 1e-3 N/N/s)")
axs[1, 0].set_title("M={}".format(val_M[ind_M_2]))
axs[1, 0].set(xlabel="throttle", ylabel="thrust (x 1e6 N)")
axs[1, 1].set_title("M={}".format(val_M[ind_M_2]))
axs[1, 1].set(xlabel="throttle", ylabel="SFC (x 1e-3 N/N/s)")
# --------------------
axs[2, 0].set_title("throttle={}".format(val_t[ind_t_1]))
axs[2, 0].set(xlabel="altitude (km)", ylabel="thrust (x 1e6 N)")
axs[2, 1].set_title("throttle={}".format(val_t[ind_t_1]))
axs[2, 1].set(xlabel="altitude (km)", ylabel="SFC (x 1e-3 N/N/s)")
axs[3, 0].set_title("throttle={}".format(val_t[ind_t_2]))
axs[3, 0].set(xlabel="altitude (km)", ylabel="thrust (x 1e6 N)")
axs[3, 1].set_title("throttle={}".format(val_t[ind_t_2]))
axs[3, 1].set(xlabel="altitude (km)", ylabel="SFC (x 1e-3 N/N/s)")
# --------------------
axs[4, 0].set_title("throttle={}".format(val_t[ind_t_1]))
axs[4, 0].set(xlabel="Mach number", ylabel="thrust (x 1e6 N)")
axs[4, 1].set_title("throttle={}".format(val_t[ind_t_1]))
axs[4, 1].set(xlabel="Mach number", ylabel="SFC (x 1e-3 N/N/s)")
axs[5, 0].set_title("throttle={}".format(val_t[ind_t_2]))
axs[5, 0].set(xlabel="Mach number", ylabel="thrust (x 1e6 N)")
axs[5, 1].set_title("throttle={}".format(val_t[ind_t_2]))
axs[5, 1].set(xlabel="Mach number", ylabel="SFC (x 1e-3 N/N/s)")
ind_h_list = [0, 4, 7, 10]
ind_h_list = [4, 7, 10]
ind_M_list = [0, 3, 6, 11]
ind_M_list = [3, 6, 11]
colors = ["b", "r", "g", "c", "m"]
# -----------------------------------------------------------------------------
# Throttle slices
for k, ind_h in enumerate(ind_h_list):
ind_M = ind_M_1
x = get_x(ind_M=ind_M, ind_h=ind_h)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_h=ind_h)
axs[0, 0].plot(xt_, yt_, "o" + colors[k])
axs[0, 0].plot(lins_t, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_h=ind_h)
axs[0, 1].plot(xt_, yt_, "o" + colors[k])
axs[0, 1].plot(lins_t, y[:, 1] / 1e-4, colors[k])
ind_M = ind_M_2
x = get_x(ind_M=ind_M, ind_h=ind_h)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_h=ind_h)
axs[1, 0].plot(xt_, yt_, "o" + colors[k])
axs[1, 0].plot(lins_t, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_h=ind_h)
axs[1, 1].plot(xt_, yt_, "o" + colors[k])
axs[1, 1].plot(lins_t, y[:, 1] / 1e-4, colors[k])
# -----------------------------------------------------------------------------
# Altitude slices
for k, ind_M in enumerate(ind_M_list):
ind_t = ind_t_1
x = get_x(ind_M=ind_M, ind_t=ind_t)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_t=ind_t)
axs[2, 0].plot(xt_, yt_, "o" + colors[k])
axs[2, 0].plot(lins_h, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_t=ind_t)
axs[2, 1].plot(xt_, yt_, "o" + colors[k])
axs[2, 1].plot(lins_h, y[:, 1] / 1e-4, colors[k])
ind_t = ind_t_2
x = get_x(ind_M=ind_M, ind_t=ind_t)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_t=ind_t)
axs[3, 0].plot(xt_, yt_, "o" + colors[k])
axs[3, 0].plot(lins_h, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_t=ind_t)
axs[3, 1].plot(xt_, yt_, "o" + colors[k])
axs[3, 1].plot(lins_h, y[:, 1] / 1e-4, colors[k])
# -----------------------------------------------------------------------------
# Mach number slices
for k, ind_h in enumerate(ind_h_list):
ind_t = ind_t_1
x = get_x(ind_t=ind_t, ind_h=ind_h)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_h=ind_h, ind_t=ind_t)
axs[4, 0].plot(xt_, yt_, "o" + colors[k])
axs[4, 0].plot(lins_M, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_h=ind_h, ind_t=ind_t)
axs[4, 1].plot(xt_, yt_, "o" + colors[k])
axs[4, 1].plot(lins_M, y[:, 1] / 1e-4, colors[k])
ind_t = ind_t_2
x = get_x(ind_t=ind_t, ind_h=ind_h)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_h=ind_h, ind_t=ind_t)
axs[5, 0].plot(xt_, yt_, "o" + colors[k])
axs[5, 0].plot(lins_M, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_h=ind_h, ind_t=ind_t)
axs[5, 1].plot(xt_, yt_, "o" + colors[k])
axs[5, 1].plot(lins_M, y[:, 1] / 1e-4, colors[k])
# -----------------------------------------------------------------------------
for k in range(2):
legend_entries = []
for ind_h in ind_h_list:
legend_entries.append("h={}".format(val_h[ind_h]))
legend_entries.append("")
axs[k, 0].legend(legend_entries)
axs[k, 1].legend(legend_entries)
axs[k + 4, 0].legend(legend_entries)
axs[k + 4, 1].legend(legend_entries)
legend_entries = []
for ind_M in ind_M_list:
legend_entries.append("M={}".format(val_M[ind_M]))
legend_entries.append("")
axs[k + 2, 0].legend(legend_entries)
axs[k + 2, 1].legend(legend_entries)
plt.show()
| 8,217 | 30.976654 | 88 | py |
smt | smt-master/smt/examples/b777_engine/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/examples/b777_engine/tests/test_b777_engine.py | import unittest
import matplotlib
matplotlib.use("Agg")
try:
from smt.surrogate_models import RMTB, RMTC
compiled_available = True
except:
compiled_available = False
class Test(unittest.TestCase):
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rmtb(self):
from smt.examples.b777_engine import run_b777_engine_rmtb
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rmtc(self):
from smt.examples.b777_engine import run_b777_engine_rmtc
if __name__ == "__main__":
import matplotlib.pyplot as plt
Test().test_rmtc()
plt.savefig("test.pdf")
| 653 | 20.8 | 68 | py |
smt | smt-master/smt/examples/b777_engine/tests/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/examples/multi_modal/run_genn_demo.py | """
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Description: This program uses the two dimensional Rastrigin function to demonstrate GENN,
which is an egg-crate-looking function that can be challenging to fit because
of its multi-modality.
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
from smt.surrogate_models.genn import GENN, load_smt_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from pyDOE2 import fullfact
SEED = 101
def get_practice_data(random=False):
"""
Return practice data for two-dimensional Rastrigin function
:param: random -- boolean, True = random sampling, False = full-factorial sampling
:return: (X, Y, J) -- np arrays of shapes (n_x, m), (n_y, m), (n_y, n_x, m) where n_x = 2 and n_y = 1 and m = 15^2
"""
# Response (N-dimensional Rastrigin)
f = lambda x: np.sum(x**2 - 10 * np.cos(2 * np.pi * x) + 10, axis=1)
df = lambda x, j: 2 * x[:, j] + 20 * np.pi * np.sin(2 * np.pi * x[:, j])
# Domain
lb = -1.0 # minimum bound (same for all dimensions)
ub = 1.5 # maximum bound (same for all dimensions)
# Design of experiment (full factorial)
n_x = 2 # number of dimensions
n_y = 1 # number of responses
L = 12 # number of levels per dimension
m = L**n_x # number of training examples that will be generated
if random:
doe = np.random.rand(m, n_x)
else:
levels = [L] * n_x
doe = fullfact(levels)
doe = (doe - 0.0) / (L - 1.0) # values normalized such that 0 < doe < 1
assert doe.shape == (m, n_x)
# Apply bounds
X = lb + (ub - lb) * doe
# Evaluate response
Y = f(X).reshape((m, 1))
# Evaluate partials
J = np.zeros((m, n_x, n_y))
for j in range(0, n_x):
J[:, j, :] = df(X, j).reshape((m, 1))
return X.T, Y.T, J.T
def contour_plot(genn, title="GENN"):
"""Make contour plots of 2D Rastrigin function and compare to Neural Net prediction"""
model = genn.model
X_train, _, _ = model.training_data
# Domain
lb = -1.0
ub = 1.5
m = 100
x1 = np.linspace(lb, ub, m)
x2 = np.linspace(lb, ub, m)
X1, X2 = np.meshgrid(x1, x2)
# True response
pi = np.pi
Y_true = (
np.power(X1, 2)
- 10 * np.cos(2 * pi * X1)
+ 10
+ np.power(X2, 2)
- 10 * np.cos(2 * pi * X2)
+ 10
)
# Predicted response
Y_pred = np.zeros((m, m))
for i in range(0, m):
for j in range(0, m):
Y_pred[i, j] = model.evaluate(np.array([X1[i, j], X2[i, j]]).reshape(2, 1))
# Prepare to plot
fig = plt.figure(figsize=(6, 3))
spec = gridspec.GridSpec(ncols=2, nrows=1, wspace=0)
# Plot Truth model
ax1 = fig.add_subplot(spec[0, 0])
ax1.contour(X1, X2, Y_true, 20, cmap="RdGy")
anno_opts = dict(
xy=(0.5, 1.075), xycoords="axes fraction", va="center", ha="center"
)
ax1.annotate("True", **anno_opts)
anno_opts = dict(
xy=(-0.075, 0.5), xycoords="axes fraction", va="center", ha="center"
)
ax1.annotate("X2", **anno_opts)
anno_opts = dict(
xy=(0.5, -0.05), xycoords="axes fraction", va="center", ha="center"
)
ax1.annotate("X1", **anno_opts)
ax1.set_xticks([])
ax1.set_yticks([])
ax1.scatter(X_train[0, :], X_train[1, :], s=5)
ax1.set_xlim(lb, ub)
ax1.set_ylim(lb, ub)
# Plot prediction with gradient enhancement
ax2 = fig.add_subplot(spec[0, 1])
ax2.contour(X1, X2, Y_pred, 20, cmap="RdGy")
anno_opts = dict(
xy=(0.5, 1.075), xycoords="axes fraction", va="center", ha="center"
)
ax2.annotate(title, **anno_opts)
anno_opts = dict(
xy=(0.5, -0.05), xycoords="axes fraction", va="center", ha="center"
)
ax2.annotate("X1", **anno_opts)
ax2.set_xticks([])
ax2.set_yticks([])
plt.show()
def run_demo_2d(
alpha=0.1,
beta1=0.9,
beta2=0.99,
lambd=0.1,
gamma=1.0,
deep=3,
wide=6,
mini_batch_size=None,
iterations=30,
epochs=100,
):
"""
Predict Rastrigin function using neural net and compare against truth model. Provided with proper training data,
the only hyperparameters the user needs to tune are:
:param alpha = learning rate
:param beta1 = adam optimizer parameter
:param beta2 = adam optimizer parameter
:param lambd = regularization coefficient
:param gamma = gradient enhancement coefficient
:param deep = neural net depth
:param wide = neural net width
This restricted list is intentional. The goal was to provide a simple interface for common regression tasks
with the bare necessary tuning parameters. More advanced prediction tasks should consider tensorflow or other
deep learning frameworks. Hopefully, the simplicity of this interface will address a common use case in aerospace
engineering, namely: predicting smooth functions using computational design of experiments.
"""
if gamma > 0.0:
title = "GENN"
else:
title = "NN"
# Practice data
X_train, Y_train, J_train = get_practice_data(random=False)
X_test, Y_test, J_test = get_practice_data(random=True)
# Convert training data to SMT format
xt = X_train.T
yt = Y_train.T
dyt_dxt = J_train[
0
].T # SMT format doesn't handle more than one output at a time, hence J[0]
# Convert test data to SMT format
xv = X_test.T
yv = Y_test.T
dyv_dxv = J_test[
0
].T # SMT format doesn't handle more than one output at a time, hence J[0]
# Initialize GENN object
genn = GENN()
genn.options["alpha"] = alpha
genn.options["beta1"] = beta1
genn.options["beta2"] = beta2
genn.options["lambd"] = lambd
genn.options["gamma"] = gamma
genn.options["deep"] = deep
genn.options["wide"] = wide
genn.options["mini_batch_size"] = mini_batch_size
genn.options["num_epochs"] = epochs
genn.options["num_iterations"] = iterations
genn.options["seed"] = SEED
genn.options["is_print"] = True
# Load data
load_smt_data(
genn, xt, yt, dyt_dxt
) # convenience function that uses SurrogateModel.set_training_values(), etc.
# Train
genn.train()
genn.plot_training_history()
genn.goodness_of_fit(xv, yv, dyv_dxv)
# Contour plot
contour_plot(genn, title=title)
def run_demo_1D(is_gradient_enhancement=True): # pragma: no cover
"""Test and demonstrate GENN using a 1D example"""
# Test function
f = lambda x: x * np.sin(x)
df_dx = lambda x: np.sin(x) + x * np.cos(x)
# Domain
lb = -np.pi
ub = np.pi
# Training data
m = 4
xt = np.linspace(lb, ub, m)
yt = f(xt)
dyt_dxt = df_dx(xt)
# Validation data
xv = lb + np.random.rand(30, 1) * (ub - lb)
yv = f(xv)
dyv_dxv = df_dx(xv)
# Initialize GENN object
genn = GENN()
genn.options["alpha"] = 0.05
genn.options["beta1"] = 0.9
genn.options["beta2"] = 0.99
genn.options["lambd"] = 0.05
genn.options["gamma"] = int(is_gradient_enhancement)
genn.options["deep"] = 2
genn.options["wide"] = 6
genn.options["mini_batch_size"] = 64
genn.options["num_epochs"] = 25
genn.options["num_iterations"] = 100
genn.options["seed"] = SEED
genn.options["is_print"] = True
# Load data
load_smt_data(genn, xt, yt, dyt_dxt)
# Train
genn.train()
genn.plot_training_history()
genn.goodness_of_fit(xv, yv, dyv_dxv)
# Plot comparison
if genn.options["gamma"] == 1.0:
title = "with gradient enhancement"
else:
title = "without gradient enhancement"
x = np.arange(lb, ub, 0.01)
y = f(x)
y_pred = genn.predict_values(x)
fig, ax = plt.subplots()
ax.plot(x, y_pred)
ax.plot(x, y, "k--")
ax.plot(xv, yv, "ro")
ax.plot(xt, yt, "k+", mew=3, ms=10)
ax.set(xlabel="x", ylabel="y", title=title)
ax.legend(["Predicted", "True", "Test", "Train"])
plt.show()
if __name__ == "__main__":
# 1D example: compare with and without gradient enhancement
run_demo_1D(is_gradient_enhancement=False)
run_demo_1D(is_gradient_enhancement=True)
# 2D example: Rastrigin function
run_demo_2d(
alpha=0.1,
beta1=0.9,
beta2=0.99,
lambd=0.1,
gamma=1.0,
deep=3, # 3,
wide=12, # 6,
mini_batch_size=32,
iterations=30,
epochs=25,
)
| 8,592 | 27.453642 | 118 | py |
smt | smt-master/smt/examples/multi_modal/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/examples/one_D_step/run_one_D_step_rmtb.py | from smt.surrogate_models import RMTB
from smt.examples.one_D_step.one_D_step import get_one_d_step, plot_one_d_step
xt, yt, xlimits = get_one_d_step()
interp = RMTB(
num_ctrl_pts=100,
xlimits=xlimits,
nonlinear_maxiter=20,
solver_tolerance=1e-16,
energy_weight=1e-14,
regularization_weight=0.0,
)
interp.set_training_values(xt, yt)
interp.train()
plot_one_d_step(xt, yt, xlimits, interp)
| 416 | 22.166667 | 78 | py |
smt | smt-master/smt/examples/one_D_step/run_one_D_step_rmtc.py | from smt.surrogate_models import RMTC
from smt.examples.one_D_step.one_D_step import get_one_d_step, plot_one_d_step
xt, yt, xlimits = get_one_d_step()
interp = RMTC(
num_elements=40,
xlimits=xlimits,
nonlinear_maxiter=20,
solver_tolerance=1e-16,
energy_weight=1e-14,
regularization_weight=0.0,
)
interp.set_training_values(xt, yt)
interp.train()
plot_one_d_step(xt, yt, xlimits, interp)
| 415 | 22.111111 | 78 | py |
smt | smt-master/smt/examples/one_D_step/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/examples/one_D_step/one_D_step.py | import numpy as np
def get_one_d_step():
xt = np.array(
[
0.0000,
0.4000,
0.6000,
0.7000,
0.7500,
0.7750,
0.8000,
0.8500,
0.8750,
0.9000,
0.9250,
0.9500,
0.9750,
1.0000,
1.0250,
1.0500,
1.1000,
1.2000,
1.3000,
1.4000,
1.6000,
1.8000,
2.0000,
],
dtype=np.float64,
)
yt = np.array(
[
0.0130,
0.0130,
0.0130,
0.0130,
0.0130,
0.0130,
0.0130,
0.0132,
0.0135,
0.0140,
0.0162,
0.0230,
0.0275,
0.0310,
0.0344,
0.0366,
0.0396,
0.0410,
0.0403,
0.0390,
0.0360,
0.0350,
0.0345,
],
dtype=np.float64,
)
xlimits = np.array([[0.0, 2.0]])
return xt, yt, xlimits
def plot_one_d_step(xt, yt, limits, interp):
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
num = 500
x = np.linspace(0.0, 2.0, num)
y = interp.predict_values(x)[:, 0]
plt.plot(x, y)
plt.plot(xt, yt, "o")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
| 1,518 | 17.301205 | 44 | py |
smt | smt-master/smt/examples/one_D_step/tests/test_one_D_step.py | import unittest
import matplotlib
matplotlib.use("Agg")
try:
from smt.surrogate_models import RMTB, RMTC
compiled_available = True
except:
compiled_available = False
class Test(unittest.TestCase):
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rmtb(self):
from smt.examples.one_D_step import run_one_D_step_rmtb
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rmtc(self):
from smt.examples.one_D_step import run_one_D_step_rmtc
| 532 | 22.173913 | 68 | py |
smt | smt-master/smt/examples/one_D_step/tests/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/examples/rans_crm_wing/run_rans_crm_wing_rmtb.py | from smt.surrogate_models import RMTB
from smt.examples.rans_crm_wing.rans_crm_wing import (
get_rans_crm_wing,
plot_rans_crm_wing,
)
xt, yt, xlimits = get_rans_crm_wing()
interp = RMTB(
num_ctrl_pts=20, xlimits=xlimits, nonlinear_maxiter=100, energy_weight=1e-12
)
interp.set_training_values(xt, yt)
interp.train()
plot_rans_crm_wing(xt, yt, xlimits, interp)
| 375 | 22.5 | 80 | py |
smt | smt-master/smt/examples/rans_crm_wing/run_rans_crm_wing_rmtc.py | from smt.surrogate_models import RMTC
from smt.examples.rans_crm_wing.rans_crm_wing import (
get_rans_crm_wing,
plot_rans_crm_wing,
)
xt, yt, xlimits = get_rans_crm_wing()
interp = RMTC(
num_elements=20, xlimits=xlimits, nonlinear_maxiter=100, energy_weight=1e-10
)
interp.set_training_values(xt, yt)
interp.train()
plot_rans_crm_wing(xt, yt, xlimits, interp)
| 375 | 22.5 | 80 | py |
smt | smt-master/smt/examples/rans_crm_wing/rans_crm_wing.py | import numpy as np
raw = np.array(
[
[
2.000000000000000000e00,
4.500000000000000111e-01,
1.536799999999999972e-02,
3.674239999999999728e-01,
5.592279999999999474e-01,
-1.258039999999999992e-01,
-1.248699999999999984e-02,
],
[
3.500000000000000000e00,
4.500000000000000111e-01,
1.985100000000000059e-02,
4.904470000000000218e-01,
7.574600000000000222e-01,
-1.615260000000000029e-01,
8.987000000000000197e-03,
],
[
5.000000000000000000e00,
4.500000000000000111e-01,
2.571000000000000021e-02,
6.109189999999999898e-01,
9.497949999999999449e-01,
-1.954619999999999969e-01,
4.090900000000000092e-02,
],
[
6.500000000000000000e00,
4.500000000000000111e-01,
3.304200000000000192e-02,
7.266120000000000356e-01,
1.131138999999999895e00,
-2.255890000000000117e-01,
8.185399999999999621e-02,
],
[
8.000000000000000000e00,
4.500000000000000111e-01,
4.318999999999999923e-02,
8.247250000000000414e-01,
1.271487000000000034e00,
-2.397040000000000004e-01,
1.217659999999999992e-01,
],
[
0.000000000000000000e00,
5.799999999999999600e-01,
1.136200000000000057e-02,
2.048760000000000026e-01,
2.950280000000000125e-01,
-7.882100000000000217e-02,
-2.280099999999999835e-02,
],
[
1.500000000000000000e00,
5.799999999999999600e-01,
1.426000000000000011e-02,
3.375619999999999732e-01,
5.114130000000000065e-01,
-1.189420000000000061e-01,
-1.588200000000000028e-02,
],
[
3.000000000000000000e00,
5.799999999999999600e-01,
1.866400000000000003e-02,
4.687450000000000228e-01,
7.240400000000000169e-01,
-1.577669999999999906e-01,
3.099999999999999891e-03,
],
[
4.500000000000000000e00,
5.799999999999999600e-01,
2.461999999999999952e-02,
5.976639999999999731e-01,
9.311709999999999710e-01,
-1.944160000000000055e-01,
3.357500000000000068e-02,
],
[
6.000000000000000000e00,
5.799999999999999600e-01,
3.280700000000000283e-02,
7.142249999999999988e-01,
1.111707999999999918e00,
-2.205870000000000053e-01,
7.151699999999999724e-02,
],
[
0.000000000000000000e00,
6.800000000000000488e-01,
1.138800000000000055e-02,
2.099310000000000065e-01,
3.032230000000000203e-01,
-8.187899999999999345e-02,
-2.172699999999999979e-02,
],
[
1.500000000000000000e00,
6.800000000000000488e-01,
1.458699999999999927e-02,
3.518569999999999753e-01,
5.356630000000000003e-01,
-1.257649999999999879e-01,
-1.444800000000000077e-02,
],
[
3.000000000000000000e00,
6.800000000000000488e-01,
1.952800000000000022e-02,
4.924879999999999813e-01,
7.644769999999999621e-01,
-1.678040000000000087e-01,
6.023999999999999841e-03,
],
[
4.500000000000000000e00,
6.800000000000000488e-01,
2.666699999999999973e-02,
6.270339999999999803e-01,
9.801630000000000065e-01,
-2.035240000000000105e-01,
3.810000000000000192e-02,
],
[
6.000000000000000000e00,
6.800000000000000488e-01,
3.891800000000000120e-02,
7.172730000000000494e-01,
1.097855999999999943e00,
-2.014620000000000022e-01,
6.640000000000000069e-02,
],
[
0.000000000000000000e00,
7.500000000000000000e-01,
1.150699999999999987e-02,
2.149069999999999869e-01,
3.115740000000000176e-01,
-8.498999999999999611e-02,
-2.057700000000000154e-02,
],
[
1.250000000000000000e00,
7.500000000000000000e-01,
1.432600000000000019e-02,
3.415969999999999840e-01,
5.199390000000000400e-01,
-1.251009999999999900e-01,
-1.515400000000000080e-02,
],
[
2.500000000000000000e00,
7.500000000000000000e-01,
1.856000000000000011e-02,
4.677589999999999804e-01,
7.262499999999999512e-01,
-1.635169999999999957e-01,
3.989999999999999949e-04,
],
[
3.750000000000000000e00,
7.500000000000000000e-01,
2.472399999999999945e-02,
5.911459999999999493e-01,
9.254930000000000101e-01,
-1.966150000000000120e-01,
2.524900000000000061e-02,
],
[
5.000000000000000000e00,
7.500000000000000000e-01,
3.506800000000000195e-02,
7.047809999999999908e-01,
1.097736000000000045e00,
-2.143069999999999975e-01,
5.321300000000000335e-02,
],
[
0.000000000000000000e00,
8.000000000000000444e-01,
1.168499999999999921e-02,
2.196390000000000009e-01,
3.197160000000000002e-01,
-8.798200000000000465e-02,
-1.926999999999999894e-02,
],
[
1.250000000000000000e00,
8.000000000000000444e-01,
1.481599999999999931e-02,
3.553939999999999877e-01,
5.435950000000000504e-01,
-1.317419999999999980e-01,
-1.345599999999999921e-02,
],
[
2.500000000000000000e00,
8.000000000000000444e-01,
1.968999999999999917e-02,
4.918299999999999894e-01,
7.669930000000000359e-01,
-1.728079999999999894e-01,
3.756999999999999923e-03,
],
[
3.750000000000000000e00,
8.000000000000000444e-01,
2.785599999999999882e-02,
6.324319999999999942e-01,
9.919249999999999456e-01,
-2.077100000000000057e-01,
3.159800000000000109e-02,
],
[
5.000000000000000000e00,
8.000000000000000444e-01,
4.394300000000000289e-02,
7.650689999999999991e-01,
1.188355999999999968e00,
-2.332680000000000031e-01,
5.645000000000000018e-02,
],
[
0.000000000000000000e00,
8.299999999999999600e-01,
1.186100000000000002e-02,
2.232899999999999885e-01,
3.261100000000000110e-01,
-9.028400000000000314e-02,
-1.806500000000000120e-02,
],
[
1.000000000000000000e00,
8.299999999999999600e-01,
1.444900000000000004e-02,
3.383419999999999761e-01,
5.161710000000000464e-01,
-1.279530000000000112e-01,
-1.402400000000000001e-02,
],
[
2.000000000000000000e00,
8.299999999999999600e-01,
1.836799999999999891e-02,
4.554270000000000262e-01,
7.082190000000000429e-01,
-1.642339999999999911e-01,
-1.793000000000000106e-03,
],
[
3.000000000000000000e00,
8.299999999999999600e-01,
2.466899999999999996e-02,
5.798410000000000508e-01,
9.088819999999999677e-01,
-2.004589999999999983e-01,
1.892900000000000138e-02,
],
[
4.000000000000000000e00,
8.299999999999999600e-01,
3.700400000000000217e-02,
7.012720000000000065e-01,
1.097366000000000064e00,
-2.362420000000000075e-01,
3.750699999999999867e-02,
],
[
0.000000000000000000e00,
8.599999999999999867e-01,
1.224300000000000041e-02,
2.278100000000000125e-01,
3.342720000000000136e-01,
-9.307600000000000595e-02,
-1.608400000000000107e-02,
],
[
1.000000000000000000e00,
8.599999999999999867e-01,
1.540700000000000056e-02,
3.551839999999999997e-01,
5.433130000000000459e-01,
-1.364730000000000110e-01,
-1.162200000000000039e-02,
],
[
2.000000000000000000e00,
8.599999999999999867e-01,
2.122699999999999934e-02,
4.854620000000000046e-01,
7.552919999999999634e-01,
-1.817850000000000021e-01,
1.070999999999999903e-03,
],
[
3.000000000000000000e00,
8.599999999999999867e-01,
3.178899999999999781e-02,
6.081849999999999756e-01,
9.510380000000000500e-01,
-2.252020000000000133e-01,
1.540799999999999982e-02,
],
[
4.000000000000000000e00,
8.599999999999999867e-01,
4.744199999999999806e-02,
6.846989999999999466e-01,
1.042564000000000046e00,
-2.333600000000000119e-01,
2.035400000000000056e-02,
],
]
)
def get_rans_crm_wing():
# data structure:
# alpha, mach, cd, cl, cmx, cmy, cmz
deg2rad = np.pi / 180.0
xt = np.array(raw[:, 0:2])
yt = np.array(raw[:, 2:4])
xlimits = np.array([[-3.0, 10.0], [0.4, 0.90]])
xt[:, 0] *= deg2rad
xlimits[0, :] *= deg2rad
return xt, yt, xlimits
def plot_rans_crm_wing(xt, yt, limits, interp):
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
rad2deg = 180.0 / np.pi
num = 500
num_a = 50
num_M = 50
x = np.zeros((num, 2))
colors = ["b", "g", "r", "c", "m", "k", "y"]
nrow = 3
ncol = 2
plt.close()
fig, axs = plt.subplots(3, 2, figsize=(15, 15))
# -----------------------------------------------------------------------------
mach_numbers = [0.45, 0.68, 0.80, 0.86]
legend_entries = []
alpha_sweep = np.linspace(0.0, 8.0, num)
for ind, mach in enumerate(mach_numbers):
x[:, 0] = alpha_sweep / rad2deg
x[:, 1] = mach
CD = interp.predict_values(x)[:, 0]
CL = interp.predict_values(x)[:, 1]
mask = np.abs(xt[:, 1] - mach) < 1e-10
axs[0, 0].plot(xt[mask, 0] * rad2deg, yt[mask, 0], "o" + colors[ind])
axs[0, 0].plot(alpha_sweep, CD, colors[ind])
mask = np.abs(xt[:, 1] - mach) < 1e-10
axs[0, 1].plot(xt[mask, 0] * rad2deg, yt[mask, 1], "o" + colors[ind])
axs[0, 1].plot(alpha_sweep, CL, colors[ind])
legend_entries.append("M={}".format(mach))
legend_entries.append("exact")
axs[0, 0].set(xlabel="alpha (deg)", ylabel="CD")
axs[0, 0].legend(legend_entries)
axs[0, 1].set(xlabel="alpha (deg)", ylabel="CL")
axs[0, 1].legend(legend_entries)
# -----------------------------------------------------------------------------
alphas = [2.0, 4.0, 6.0]
legend_entries = []
mach_sweep = np.linspace(0.45, 0.86, num)
for ind, alpha in enumerate(alphas):
x[:, 0] = alpha / rad2deg
x[:, 1] = mach_sweep
CD = interp.predict_values(x)[:, 0]
CL = interp.predict_values(x)[:, 1]
axs[1, 0].plot(mach_sweep, CD, colors[ind])
axs[1, 1].plot(mach_sweep, CL, colors[ind])
legend_entries.append("alpha={}".format(alpha))
axs[1, 0].set(xlabel="Mach number", ylabel="CD")
axs[1, 0].legend(legend_entries)
axs[1, 1].set(xlabel="Mach number", ylabel="CL")
axs[1, 1].legend(legend_entries)
# -----------------------------------------------------------------------------
x = np.zeros((num_a, num_M, 2))
x[:, :, 0] = np.outer(np.linspace(0.0, 8.0, num_a), np.ones(num_M)) / rad2deg
x[:, :, 1] = np.outer(np.ones(num_a), np.linspace(0.45, 0.86, num_M))
CD = interp.predict_values(x.reshape((num_a * num_M, 2)))[:, 0].reshape(
(num_a, num_M)
)
CL = interp.predict_values(x.reshape((num_a * num_M, 2)))[:, 1].reshape(
(num_a, num_M)
)
axs[2, 0].plot(xt[:, 1], xt[:, 0] * rad2deg, "o")
axs[2, 0].contour(x[:, :, 1], x[:, :, 0] * rad2deg, CD, 20)
pcm1 = axs[2, 0].pcolormesh(
x[:, :, 1],
x[:, :, 0] * rad2deg,
CD,
cmap=plt.get_cmap("rainbow"),
shading="auto",
)
fig.colorbar(pcm1, ax=axs[2, 0])
axs[2, 0].set(xlabel="Mach number", ylabel="alpha (deg)")
axs[2, 0].set_title("CD")
axs[2, 1].plot(xt[:, 1], xt[:, 0] * rad2deg, "o")
axs[2, 1].contour(x[:, :, 1], x[:, :, 0] * rad2deg, CL, 20)
pcm2 = axs[2, 1].pcolormesh(
x[:, :, 1],
x[:, :, 0] * rad2deg,
CL,
cmap=plt.get_cmap("rainbow"),
shading="auto",
)
fig.colorbar(pcm2, ax=axs[2, 1])
axs[2, 1].set(xlabel="Mach number", ylabel="alpha (deg)")
axs[2, 1].set_title("CL")
plt.show()
| 13,903 | 29.491228 | 83 | py |
smt | smt-master/smt/examples/rans_crm_wing/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/examples/rans_crm_wing/tests/test_rans_crm_wing.py | import unittest
import matplotlib
matplotlib.use("Agg")
try:
from smt.surrogate_models import RMTB, RMTC
compiled_available = True
except:
compiled_available = False
class Test(unittest.TestCase):
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rmtb(self):
from smt.examples.rans_crm_wing import run_rans_crm_wing_rmtb
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rmtc(self):
from smt.examples.rans_crm_wing import run_rans_crm_wing_rmtc
if __name__ == "__main__":
import matplotlib.pyplot as plt
Test().test_rmtb()
plt.savefig("test.pdf")
| 661 | 21.066667 | 69 | py |
smt | smt-master/smt/examples/rans_crm_wing/tests/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/examples/airfoil_parameters/learning_airfoil_parameters.py | import os
import numpy as np
import csv
WORKDIR = os.path.dirname(os.path.abspath(__file__))
def load_NACA4412_modeshapes():
return np.loadtxt(open(os.path.join(WORKDIR, "modes_NACA4412_ct.txt")))
def load_cd_training_data():
with open(os.path.join(WORKDIR, "cd_x_y.csv")) as file:
reader = csv.reader(file, delimiter=";")
values = np.array(list(reader), dtype=np.float32)
dim_values = values.shape
x = values[:, : dim_values[1] - 1]
y = values[:, -1]
with open(os.path.join(WORKDIR, "cd_dy.csv")) as file:
reader = csv.reader(file, delimiter=";")
dy = np.array(list(reader), dtype=np.float32)
return x, y, dy
def plot_predictions(airfoil_modeshapes, Ma, cd_model):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# alpha is linearily distributed over the range of -1 to 7 degrees
# while Ma is kept constant
inputs = np.zeros(shape=(1, 15))
inputs[0, :14] = airfoil_modeshapes
inputs[0, -1] = Ma
inputs = np.tile(inputs, (50, 1))
alpha = np.atleast_2d([-1 + 0.16 * i for i in range(50)]).T
inputs = np.concatenate((inputs, alpha), axis=1)
# Predict Cd
cd_pred = cd_model.predict_values(inputs)
# Load ADflow Cd reference
with open(os.path.join(WORKDIR, "NACA4412-ADflow-alpha-cd.csv")) as file:
reader = csv.reader(file, delimiter=" ")
cd_adflow = np.array(list(reader)[1:], dtype=np.float32)
plt.plot(alpha, cd_pred)
plt.plot(cd_adflow[:, 0], cd_adflow[:, 1])
plt.grid(True)
plt.legend(["Surrogate", "ADflow"])
plt.title("Drag coefficient")
plt.xlabel("Alpha")
plt.ylabel("Cd")
plt.show()
| 1,709 | 28.482759 | 77 | py |
smt | smt-master/smt/examples/airfoil_parameters/run_genn.py | """
Predicting Airfoil Aerodynamics through data by Raul Carreira Rufato and Prof. Joseph Morlier
"""
import os
import numpy as np
import csv
from smt.examples.airfoil_parameters.learning_airfoil_parameters import (
load_cd_training_data,
load_NACA4412_modeshapes,
plot_predictions,
)
from sklearn.model_selection import train_test_split
from smt.surrogate_models.genn import GENN, load_smt_data
x, y, dy = load_cd_training_data()
# splitting the dataset
x_train, x_test, y_train, y_test, dy_train, dy_test = train_test_split(
x, y, dy, train_size=0.8
)
# building and training the GENN
genn = GENN(print_global=False)
# learning rate that controls optimizer step size
genn.options["alpha"] = 0.001
# lambd = 0. = no regularization, lambd > 0 = regularization
genn.options["lambd"] = 0.1
# gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement
genn.options["gamma"] = 1.0
# number of hidden layers
genn.options["deep"] = 2
# number of nodes per hidden layer
genn.options["wide"] = 6
# used to divide data into training batches (use for large data sets)
genn.options["mini_batch_size"] = 256
# number of passes through data
genn.options["num_epochs"] = 5
# number of optimizer iterations per mini-batch
genn.options["num_iterations"] = 10
# print output (or not)
genn.options["is_print"] = False
# convenience function to read in data that is in SMT format
load_smt_data(genn, x_train, y_train, dy_train)
genn.train()
## non-API function to plot training history (to check convergence)
# genn.plot_training_history()
## non-API function to check accuracy of regression
# genn.goodness_of_fit(x_test, y_test, dy_test)
# API function to predict values at new (unseen) points
y_pred = genn.predict_values(x_test)
# Now we will use the trained model to make a prediction with a not-learned form.
# Example Prediction for NACA4412.
# Airfoil mode shapes should be determined according to Bouhlel, M.A., He, S., and Martins,
# J.R.R.A., mSANN Model Benchmarks, Mendeley Data, 2019. https://doi.org/10.17632/ngpd634smf.1
# Comparison of results with Adflow software for an alpha range from -1 to 7 degrees. Re = 3000000
airfoil_modeshapes = load_NACA4412_modeshapes()
Ma = 0.3
alpha = 0
# input in neural network is created out of airfoil mode shapes, Mach number and alpha
# airfoil_modeshapes: computed mode_shapes of random airfol geometry with parameterise_airfoil
# Ma: desired Mach number for evaluation in range [0.3,0.6]
# alpha: scalar in range [-1, 6]
input = np.zeros(shape=(1, 16))
input[0, :14] = airfoil_modeshapes
input[0, 14] = Ma
input[0, -1] = alpha
# prediction
cd_pred = genn.predict_values(input)
print("Drag coefficient prediction (cd): ", cd_pred[0, 0])
plot_predictions(airfoil_modeshapes, Ma, genn)
| 2,752 | 33.848101 | 98 | py |
smt | smt-master/smt/sampling_methods/lhs.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
LHS sampling; uses the pyDOE2 package.
"""
from pyDOE2 import lhs
from scipy.spatial.distance import pdist, cdist
import numpy as np
from smt.sampling_methods.sampling_method import ScaledSamplingMethod
class LHS(ScaledSamplingMethod):
def _initialize(self, **kwargs):
self.options.declare(
"criterion",
"c",
values=[
"center",
"maximin",
"centermaximin",
"correlation",
"c",
"m",
"cm",
"corr",
"ese",
],
types=str,
desc="criterion used to construct the LHS design "
+ "c, m, cm and corr are abbreviation of center, maximin, centermaximin and correlation, respectively",
)
self.options.declare(
"random_state",
types=(type(None), int, np.random.RandomState),
desc="Numpy RandomState object or seed number which controls random draws",
)
# Update options values passed by the user here to get 'random_state' option
self.options.update(kwargs)
# RandomState is and has to be initialized once at constructor time,
# not in _compute to avoid yielding the same dataset again and again
if isinstance(self.options["random_state"], np.random.RandomState):
self.random_state = self.options["random_state"]
elif isinstance(self.options["random_state"], int):
self.random_state = np.random.RandomState(self.options["random_state"])
else:
self.random_state = np.random.RandomState()
def _compute(self, nt):
"""
Implemented by sampling methods to compute the requested number of sampling points.
The number of dimensions (nx) is determined based on `xlimits.shape[0]`.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the unit hypercube.
"""
xlimits = self.options["xlimits"]
nx = xlimits.shape[0]
if self.options["criterion"] != "ese":
return lhs(
nx,
samples=nt,
criterion=self.options["criterion"],
random_state=self.random_state,
)
elif self.options["criterion"] == "ese":
return self._ese(nx, nt)
def _maximinESE(
self,
X,
T0=None,
outer_loop=None,
inner_loop=None,
J=20,
tol=1e-3,
p=10,
return_hist=False,
fixed_index=[],
):
"""
Returns an optimized design starting from design X. For more information,
see R. Jin, W. Chen and A. Sudjianto (2005):
An efficient algorithm for constructing optimal design of computer
experiments. Journal of Statistical Planning and Inference, 134:268-287.
Parameters
----------
X : array
The design to be optimized
T0 : double, optional
Initial temperature of the algorithm.
If set to None, a standard temperature is used.
outer_loop : integer, optional
The number of iterations of the outer loop. If None, set to
min(1.5*dimension of LHS, 30)
inner_loop : integer, optional
The number of iterations of the inner loop. If None, set to
min(20*dimension of LHS, 100)
J : integer, optional
Number of replications of the plan in the inner loop. Default to 20
tol : double, optional
Tolerance for modification of Temperature T. Default to 0.001
p : integer, optional
Power used in the calculation of the PhiP criterion. Default to 10
return_hist : boolean, optional
If set to True, the function returns information about the behaviour of
temperature, PhiP criterion and probability of acceptance during the
process of optimization. Default to False
Returns
------
X_best : array
The optimized design
hist : dictionnary
If return_hist is set to True, returns a dictionnary containing the phiP
('PhiP') criterion, the temperature ('T') and the probability of
acceptance ('proba') during the optimization.
"""
# Initialize parameters if not defined
if T0 is None:
T0 = 0.005 * self._PhiP(X, p=p)
if inner_loop is None:
inner_loop = min(20 * X.shape[1], 100)
if outer_loop is None:
outer_loop = min(int(1.5 * X.shape[1]), 30)
T = T0
X_ = X[:] # copy of initial plan
X_best = X_[:]
d = X.shape[1]
PhiP_ = self._PhiP(X_best, p=p)
PhiP_best = PhiP_
hist_T = list()
hist_proba = list()
hist_PhiP = list()
hist_PhiP.append(PhiP_best)
# Outer loop
for z in range(outer_loop):
PhiP_oldbest = PhiP_best
n_acpt = 0
n_imp = 0
# Inner loop
for i in range(inner_loop):
modulo = (i + 1) % d
l_X = list()
l_PhiP = list()
# Build J different plans with a single exchange procedure
# See description of PhiP_exchange procedure
for j in range(J):
l_X.append(X_.copy())
l_PhiP.append(
self._PhiP_exchange(
l_X[j], k=modulo, PhiP_=PhiP_, p=p, fixed_index=fixed_index
)
)
l_PhiP = np.asarray(l_PhiP)
k = np.argmin(l_PhiP)
PhiP_try = l_PhiP[k]
# Threshold of acceptance
if PhiP_try - PhiP_ <= T * self.random_state.rand(1)[0]:
PhiP_ = PhiP_try
n_acpt = n_acpt + 1
X_ = l_X[k]
# Best plan retained
if PhiP_ < PhiP_best:
X_best = X_
PhiP_best = PhiP_
n_imp = n_imp + 1
hist_PhiP.append(PhiP_best)
p_accpt = float(n_acpt) / inner_loop # probability of acceptance
p_imp = float(n_imp) / inner_loop # probability of improvement
hist_T.extend(inner_loop * [T])
hist_proba.extend(inner_loop * [p_accpt])
if PhiP_best - PhiP_oldbest < tol:
# flag_imp = 1
if p_accpt >= 0.1 and p_imp < p_accpt:
T = 0.8 * T
elif p_accpt >= 0.1 and p_imp == p_accpt:
pass
else:
T = T / 0.8
else:
# flag_imp = 0
if p_accpt <= 0.1:
T = T / 0.7
else:
T = 0.9 * T
hist = {"PhiP": hist_PhiP, "T": hist_T, "proba": hist_proba}
if return_hist:
return X_best, hist
else:
return X_best
def _PhiP(self, X, p=10):
"""
Calculates the PhiP criterion of the design X with power p.
X : array_like
The design where to calculate PhiP
p : integer
The power used for the calculation of PhiP (default to 10)
"""
return ((pdist(X) ** (-p)).sum()) ** (1.0 / p)
def _PhiP_exchange(self, X, k, PhiP_, p, fixed_index):
"""
Modifies X with a single exchange algorithm and calculates the corresponding
PhiP criterion. Internal use.
Optimized calculation of the PhiP criterion. For more information, see:
R. Jin, W. Chen and A. Sudjianto (2005):
An efficient algorithm for constructing optimal design of computer
experiments. Journal of Statistical Planning and Inference, 134:268-287.
Parameters
----------
X : array_like
The initial design (will be modified during procedure)
k : integer
The column where the exchange is proceeded
PhiP_ : double
The PhiP criterion of the initial design X
p : integer
The power used for the calculation of PhiP
Returns
------
res : double
The PhiP criterion of the modified design X
"""
# Choose two (different) random rows to perform the exchange
i1 = self.random_state.randint(X.shape[0])
while i1 in fixed_index:
i1 = self.random_state.randint(X.shape[0])
i2 = self.random_state.randint(X.shape[0])
while i2 == i1 or i2 in fixed_index:
i2 = self.random_state.randint(X.shape[0])
X_ = np.delete(X, [i1, i2], axis=0)
dist1 = cdist([X[i1, :]], X_)
dist2 = cdist([X[i2, :]], X_)
d1 = np.sqrt(
dist1**2 + (X[i2, k] - X_[:, k]) ** 2 - (X[i1, k] - X_[:, k]) ** 2
)
d2 = np.sqrt(
dist2**2 - (X[i2, k] - X_[:, k]) ** 2 + (X[i1, k] - X_[:, k]) ** 2
)
res = (
PhiP_**p + (d1 ** (-p) - dist1 ** (-p) + d2 ** (-p) - dist2 ** (-p)).sum()
) ** (1.0 / p)
X[i1, k], X[i2, k] = X[i2, k], X[i1, k]
return res
def _ese(self, dim, nt, fixed_index=[], P0=[]):
"""
Parameters
----------
fixed_index : list
When running an "ese" optimization, we can fix the indexes of
the points that we do not want to modify
"""
# Parameters of maximinESE procedure
if len(fixed_index) == 0:
P0 = lhs(dim, nt, criterion=None, random_state=self.random_state)
else:
P0 = P0
self.random_state = np.random.RandomState()
J = 20
outer_loop = min(int(1.5 * dim), 30)
inner_loop = min(20 * dim, 100)
P, _ = self._maximinESE(
P0,
outer_loop=outer_loop,
inner_loop=inner_loop,
J=J,
tol=1e-3,
p=10,
return_hist=True,
fixed_index=fixed_index,
)
return P
def expand_lhs(self, x, n_points, method="basic"):
"""
Given a Latin Hypercube Sample (LHS) "x", returns an expanded LHS
by adding "n_points" new points.
Parameters
----------
x : array
Initial LHS.
n_points : integer
Number of points that are to be added to the expanded LHS.
method : str, optional
Methodoly for the construction of the expanded LHS.
The default is "basic". The other option is "ese" to use the
ese optimization
Returns
-------
x_new : array
Expanded LHS.
"""
xlimits = self.options["xlimits"]
new_num = len(x) + n_points
if new_num % len(x) != 0:
print(
"WARNING: The added number of points is not a "
"multiple of the initial number of points."
"Thus, it cannot be ensured that the output is an LHS."
)
# Evenly spaced intervals with the final dimension of the LHS
intervals = []
for i in range(len(xlimits)):
intervals.append(np.linspace(xlimits[i][0], xlimits[i][1], new_num + 1))
# Creates a subspace with the rows and columns that have no points
# in the new space
subspace_limits = [[]] * len(xlimits)
subspace_bool = []
for i in range(len(xlimits)):
subspace_limits[i] = []
subspace_bool.append(
[
[
intervals[i][j] < x[kk][i] < intervals[i][j + 1]
for kk in range(len(x))
]
for j in range(len(intervals[i]) - 1)
]
)
[
subspace_limits[i].append([intervals[i][ii], intervals[i][ii + 1]])
for ii in range(len(subspace_bool[i]))
if not (True in subspace_bool[i][ii])
]
# Sampling of the new subspace
sampling_new = LHS(xlimits=np.array([[0.0, 1.0]] * len(xlimits)))
x_subspace = sampling_new(n_points)
column_index = 0
sorted_arr = x_subspace[x_subspace[:, column_index].argsort()]
for j in range(len(xlimits)):
for i in range(len(sorted_arr)):
sorted_arr[i, j] = subspace_limits[j][i][0] + sorted_arr[i, j] * (
subspace_limits[j][i][1] - subspace_limits[j][i][0]
)
H = np.zeros_like(sorted_arr)
for j in range(len(xlimits)):
order = np.random.permutation(len(sorted_arr))
H[:, j] = sorted_arr[order, j]
x_new = np.concatenate((x, H), axis=0)
if method == "ese":
# Sampling of the new subspace
sampling_new = LHS(xlimits=xlimits, criterion="ese")
x_new = sampling_new._ese(
len(x_new), len(x_new), fixed_index=np.arange(0, len(x), 1), P0=x_new
)
return x_new
| 13,432 | 30.385514 | 115 | py |
smt | smt-master/smt/sampling_methods/sampling_method.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Base class for sampling algorithms.
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from smt.utils.options_dictionary import OptionsDictionary
class SamplingMethod(metaclass=ABCMeta):
def __init__(self, **kwargs):
"""
Constructor where values of options can be passed in.
For the list of options, see the documentation for the problem being used.
Parameters
----------
**kwargs : named arguments
Set of options that can be optionally set; each option must have been declared.
Examples
--------
>>> import numpy as np
>>> from smt.sampling_methods import Random
>>> sampling = Random(xlimits=np.arange(2).reshape((1, 2)))
"""
self.options = OptionsDictionary()
self.options.declare(
"xlimits",
types=np.ndarray,
desc="The interval of the domain in each dimension with shape nx x 2 (required)",
)
self._initialize(**kwargs)
self.options.update(kwargs)
def _initialize(self, **kwargs) -> None:
"""
Implemented by sampling methods to declare options
and/or use these optional values for initialization (optional)
Parameters
----------
**kwargs : named arguments passed by the user
Set of options that can be optionally set
Examples
--------
self.options.declare('option_name', default_value, types=(bool, int), desc='description')
"""
pass
def __call__(self, nt: int) -> np.ndarray:
"""
Compute the requested number of sampling points.
The number of dimensions (nx) is determined based on `xlimits.shape[0]`.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the input space.
"""
return self._compute(nt)
@abstractmethod
def _compute(self, nt: int) -> np.ndarray:
"""
Implemented by sampling methods to compute the requested number of sampling points.
The number of dimensions (nx) is determined based on `xlimits.shape[0]`.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the input space.
"""
raise Exception("This sampling method has not been implemented correctly")
class ScaledSamplingMethod(SamplingMethod):
"""This class describes an sample method which generates samples in the unit hypercube.
The __call__ method does scale the generated samples accordingly to the defined xlimits.
"""
def __call__(self, nt: int) -> np.ndarray:
"""
Compute the requested number of sampling points.
The number of dimensions (nx) is determined based on `xlimits.shape[0]`.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the input space.
"""
return _scale_to_xlimits(self._compute(nt), self.options["xlimits"])
@abstractmethod
def _compute(self, nt: int) -> np.ndarray:
"""
Implemented by sampling methods to compute the requested number of sampling points.
The number of dimensions (nx) is determined based on `xlimits.shape[0]`.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the unit hypercube.
"""
raise Exception("This sampling method has not been implemented correctly")
def _scale_to_xlimits(samples: np.ndarray, xlimits: np.ndarray) -> np.ndarray:
"""Scales the samples from the unit hypercube to the specified limits.
Parameters
----------
samples : np.ndarray
The samples with coordinates in [0,1]
xlimits : np.ndarray
The xlimits
Returns
-------
np.ndarray
The scaled samples.
"""
nx = xlimits.shape[0]
for kx in range(nx):
samples[:, kx] = xlimits[kx, 0] + samples[:, kx] * (
xlimits[kx, 1] - xlimits[kx, 0]
)
return samples
| 4,516 | 27.23125 | 97 | py |
smt | smt-master/smt/sampling_methods/full_factorial.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Full-factorial sampling.
"""
import numpy as np
from smt.sampling_methods.sampling_method import ScaledSamplingMethod
class FullFactorial(ScaledSamplingMethod):
def _initialize(self, **kwargs):
self.options.declare(
"weights",
values=None,
types=(list, np.ndarray),
desc="relative sampling weights for each nx dimensions",
)
self.options.declare(
"clip",
default=False,
types=bool,
desc="round number of samples to the sampling number product of each nx dimensions (> asked nt)",
)
def _compute(self, nt):
"""
Implemented by sampling methods to compute the requested number of sampling points.
The number of dimensions (nx) is determined based on `xlimits.shape[0]`.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the unit hypercube.
"""
xlimits = self.options["xlimits"]
nx = xlimits.shape[0]
if self.options["weights"] is None:
weights = np.ones(nx) / nx
else:
weights = np.atleast_1d(self.options["weights"])
weights /= np.sum(weights)
num_list = np.ones(nx, int)
while np.prod(num_list) < nt:
ind = np.argmax(weights - num_list / np.sum(num_list))
num_list[ind] += 1
lins_list = [np.linspace(0.0, 1.0, num_list[kx]) for kx in range(nx)]
x_list = np.meshgrid(*lins_list, indexing="ij")
if self.options["clip"]:
nt = np.prod(num_list)
x = np.zeros((nt, nx))
for kx in range(nx):
x[:, kx] = x_list[kx].reshape(np.prod(num_list))[:nt]
return x
| 1,948 | 27.246377 | 109 | py |
smt | smt-master/smt/sampling_methods/random.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Random sampling.
"""
import numpy as np
from smt.sampling_methods.sampling_method import ScaledSamplingMethod
class Random(ScaledSamplingMethod):
def _compute(self, nt):
"""
Implemented by sampling methods to compute the requested number of sampling points.
The number of dimensions (nx) is determined based on `xlimits.shape[0]`.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the unit hypercube.
"""
xlimits = self.options["xlimits"]
nx = xlimits.shape[0]
return np.random.rand(nt, nx)
| 797 | 23.181818 | 91 | py |
smt | smt-master/smt/sampling_methods/__init__.py | from .random import Random
from .lhs import LHS
from .full_factorial import FullFactorial
| 90 | 21.75 | 41 | py |
smt | smt-master/smt/sampling_methods/tests/test_fullfactorial.py | import unittest
import numpy as np
from smt.sampling_methods import FullFactorial
class Test(unittest.TestCase):
def test_ff_weights(self):
xlimits = np.array([[0.0, 1.0], [0.0, 1.0]])
sampling = FullFactorial(xlimits=xlimits, weights=[0.25, 0.75])
num = 10
x = sampling(num)
self.assertEqual((10, 2), x.shape)
def test_ff_rectify(self):
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = FullFactorial(xlimits=xlimits, clip=True)
num = 50
x = sampling(num)
self.assertEqual((56, 2), x.shape)
if __name__ == "__main__":
unittest.main()
| 640 | 23.653846 | 71 | py |
smt | smt-master/smt/sampling_methods/tests/test_sampling_method_examples.py | import unittest
import matplotlib
matplotlib.use("Agg")
class Test(unittest.TestCase):
def test_random(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.sampling_methods import Random
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = Random(xlimits=xlimits)
num = 50
x = sampling(num)
print(x.shape)
plt.plot(x[:, 0], x[:, 1], "o")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_lhs(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.sampling_methods import LHS
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = LHS(xlimits=xlimits)
num = 50
x = sampling(num)
print(x.shape)
plt.plot(x[:, 0], x[:, 1], "o")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_full_factorial(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.sampling_methods import FullFactorial
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = FullFactorial(xlimits=xlimits)
num = 50
x = sampling(num)
print(x.shape)
plt.plot(x[:, 0], x[:, 1], "o")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
if __name__ == "__main__":
unittest.main()
| 1,403 | 19.347826 | 54 | py |
smt | smt-master/smt/sampling_methods/tests/test_lhs.py | import os
import unittest
import numpy as np
from smt.sampling_methods import LHS
class Test(unittest.TestCase):
def test_lhs_ese(self):
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = LHS(xlimits=xlimits, criterion="ese")
num = 50
x = sampling(num)
self.assertEqual((50, 2), x.shape)
def test_random_state(self):
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
num = 10
sampling = LHS(xlimits=xlimits, criterion="ese", random_state=42)
doe1 = sampling(num)
doe2 = sampling(num)
# Should not generate the same doe
self.assertFalse(np.allclose(doe1, doe2))
# Another LHS with same initialization should generate the same sequence of does
sampling = LHS(
xlimits=xlimits, criterion="ese", random_state=np.random.RandomState(42)
)
doe3 = sampling(num)
doe4 = sampling(num)
self.assertTrue(np.allclose(doe1, doe3))
self.assertTrue(np.allclose(doe2, doe4))
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_expand_lhs(self):
import numpy as np
num = 100
new_list = np.linspace(1, 5, 5) * num
for i in range(len(new_list)):
xlimits = np.array([[0.0, 4.0], [0.0, 3.0], [0.0, 3.0], [1.0, 5.0]])
sampling = LHS(xlimits=xlimits, criterion="ese")
x = sampling(num)
new = int(new_list[i])
new_num = num + new
# We check the functionality with the "ese" optimization
x_new = sampling.expand_lhs(x, new, method="ese")
intervals = []
subspace_bool = []
for i in range(len(xlimits)):
intervals.append(np.linspace(xlimits[i][0], xlimits[i][1], new_num + 1))
subspace_bool.append(
[
[
intervals[i][j] < x_new[kk][i] < intervals[i][j + 1]
for kk in range(len(x_new))
]
for j in range(len(intervals[i]) - 1)
]
)
self.assertEqual(
True,
all(
[
subspace_bool[i][k].count(True) == 1
for k in range(len(subspace_bool[i]))
]
),
)
if __name__ == "__main__":
unittest.main()
| 2,541 | 30 | 88 | py |
smt | smt-master/smt/sampling_methods/tests/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/tests/test_training_derivs.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct
from smt.sampling_methods import LHS, FullFactorial
from smt.utils.design_space import DesignSpace
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
print_output = False
class Test(SMTestCase):
def setUp(self):
ndim = 3
nt = 5000
ne = 500
problems = OrderedDict()
problems["sphere"] = Sphere(ndim=ndim)
problems["exp"] = TensorProduct(ndim=ndim, func="exp")
problems["tanh"] = TensorProduct(ndim=ndim, func="tanh")
problems["cos"] = TensorProduct(ndim=ndim, func="cos")
sms = OrderedDict()
if compiled_available:
sms["RMTC"] = RMTC()
sms["RMTB"] = RMTB()
t_errors = {}
t_errors["RMTC"] = 1e-1
t_errors["RMTB"] = 1e-1
e_errors = {}
e_errors["RMTC"] = 1e-1
e_errors["RMTB"] = 1e-1
ge_t_errors = {}
ge_t_errors["RMTC"] = 1e-2
ge_t_errors["RMTB"] = 1e-2
ge_e_errors = {}
ge_e_errors["RMTC"] = 1e-2
ge_e_errors["RMTB"] = 1e-2
self.nt = nt
self.ne = ne
self.problems = problems
self.sms = sms
self.t_errors = t_errors
self.e_errors = e_errors
self.ge_t_errors = ge_t_errors
self.ge_e_errors = ge_e_errors
def run_test(self):
method_name = inspect.stack()[1][3]
pname = method_name.split("_")[1]
sname = method_name.split("_")[2]
prob = self.problems[pname]
sampling = FullFactorial(xlimits=prob.xlimits, clip=True)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
dyt = {}
for kx in range(prob.xlimits.shape[0]):
dyt[kx] = prob(xt, kx=kx)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
dye = {}
for kx in range(prob.xlimits.shape[0]):
dye[kx] = prob(xe, kx=kx)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("design_space"):
sm.options["design_space"] = DesignSpace(prob.xlimits)
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
sm.set_training_values(xt, yt)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("design_space"):
sm.options["design_space"] = DesignSpace(prob.xlimits)
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
sm.set_training_values(xt, yt)
for kx in range(prob.xlimits.shape[0]):
sm.set_training_derivatives(xt, dyt[kx], kx)
with Silence():
sm.train()
ge_t_error = compute_rms_error(sm)
ge_e_error = compute_rms_error(sm, xe, ye)
if print_output:
print(
"%8s %6s %18.9e %18.9e %18.9e %18.9e"
% (pname[:6], sname, t_error, e_error, ge_t_error, ge_e_error)
)
self.assert_error(t_error, 0.0, self.t_errors[sname])
self.assert_error(e_error, 0.0, self.e_errors[sname])
self.assert_error(ge_t_error, 0.0, self.ge_t_errors[sname])
self.assert_error(ge_e_error, 0.0, self.ge_e_errors[sname])
# --------------------------------------------------------------------
# Function: sphere
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_sphere_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_sphere_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: exp
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: tanh
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: cos
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RMTB(self):
self.run_test()
if __name__ == "__main__":
print_output = True
print(
"%6s %8s %18s %18s %18s %18s"
% (
"SM",
"Problem",
"Train. pt. error",
"Test pt. error",
"GE tr. pt. error",
"GE test pt. error",
)
)
unittest.main()
| 5,812 | 27.777228 | 88 | py |
smt | smt-master/smt/tests/test_extrap.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct
from smt.sampling_methods import LHS
from smt.utils.design_space import DesignSpace
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
print_output = False
class Test(SMTestCase):
def setUp(self):
ndim = 3
nt = 500
ne = 100
problems = OrderedDict()
problems["sphere"] = Sphere(ndim=ndim)
sms = OrderedDict()
if compiled_available:
sms["RMTC"] = RMTC(num_elements=6, extrapolate=True)
sms["RMTB"] = RMTB(order=4, num_ctrl_pts=10, extrapolate=True)
self.nt = nt
self.ne = ne
self.problems = problems
self.sms = sms
def run_test(self, sname, extrap_train=False, extrap_predict=False):
prob = self.problems["sphere"]
sampling = LHS(xlimits=prob.xlimits)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("design_space"):
sm.options["design_space"] = DesignSpace(prob.xlimits)
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
x = np.zeros((1, xt.shape[1]))
x[0, :] = prob.xlimits[:, 1] + 1.0
y = prob(x)
sm.set_training_values(xt, yt)
if extrap_train:
sm.set_training_values(x, y)
with Silence():
sm.train()
if extrap_predict:
sm.predict_values(x)
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_RMTC(self):
self.run_test("RMTC", False, False)
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_RMTC_train(self):
with self.assertRaises(Exception) as context:
self.run_test("RMTC", True, False)
self.assertEqual(str(context.exception), "Training points above max for 0")
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_RMTC_predict(self):
self.run_test("RMTC", False, True)
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_RMTB(self):
self.run_test("RMTB", False, False)
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_RMTB_train(self):
with self.assertRaises(Exception) as context:
self.run_test("RMTB", True, False)
self.assertEqual(str(context.exception), "Training points above max for 0")
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_RMTB_predict(self):
self.run_test("RMTB", False, True)
if __name__ == "__main__":
unittest.main()
| 3,271 | 27.955752 | 88 | py |
smt | smt-master/smt/tests/test_problems.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
from smt.problems import (
CantileverBeam,
Sphere,
ReducedProblem,
RobotArm,
Rosenbrock,
Branin,
LpNorm,
)
from smt.problems import (
TensorProduct,
TorsionVibration,
WaterFlow,
WeldedBeam,
WingWeight,
)
from smt.problems import (
NdimCantileverBeam,
NdimRobotArm,
NdimRosenbrock,
NdimStepFunction,
)
from smt.utils.sm_test_case import SMTestCase
class Test(SMTestCase):
def run_test(self, problem):
problem.options["return_complex"] = True
# Test xlimits
ndim = problem.options["ndim"]
xlimits = problem.xlimits
self.assertEqual(xlimits.shape, (ndim, 2))
# Test evaluation of multiple points at once
x = np.zeros((10, ndim))
for ind in range(10):
x[ind, :] = 0.5 * (xlimits[:, 0] + xlimits[:, 1])
y = problem(x)
ny = y.shape[1]
self.assertEqual(x.shape[0], y.shape[0])
# Test derivatives
x = np.zeros((4, ndim), complex)
x[0, :] = 0.2 * xlimits[:, 0] + 0.8 * xlimits[:, 1]
x[1, :] = 0.4 * xlimits[:, 0] + 0.6 * xlimits[:, 1]
x[2, :] = 0.6 * xlimits[:, 0] + 0.4 * xlimits[:, 1]
x[3, :] = 0.8 * xlimits[:, 0] + 0.2 * xlimits[:, 1]
y0 = problem(x)
dydx_FD = np.zeros(4)
dydx_CS = np.zeros(4)
dydx_AN = np.zeros(4)
print()
h = 1e-5
ch = 1e-16
for iy in range(ny):
for idim in range(ndim):
x[:, idim] += h
y_FD = problem(x)
x[:, idim] -= h
x[:, idim] += complex(0, ch)
y_CS = problem(x)
x[:, idim] -= complex(0, ch)
dydx_FD[:] = (y_FD[:, iy] - y0[:, iy]) / h
dydx_CS[:] = np.imag(y_CS[:, iy]) / ch
dydx_AN[:] = problem(x, idim)[:, iy]
abs_rms_error_FD = np.linalg.norm(dydx_FD - dydx_AN)
rel_rms_error_FD = np.linalg.norm(dydx_FD - dydx_AN) / np.linalg.norm(
dydx_FD
)
abs_rms_error_CS = np.linalg.norm(dydx_CS - dydx_AN)
rel_rms_error_CS = np.linalg.norm(dydx_CS - dydx_AN) / np.linalg.norm(
dydx_CS
)
msg = (
"{:16s} iy {:2} dim {:2} of {:2} "
+ "abs_FD {:16.9e} rel_FD {:16.9e} abs_CS {:16.9e} rel_CS {:16.9e}"
)
print(
msg.format(
problem.options["name"],
iy,
idim,
ndim,
abs_rms_error_FD,
rel_rms_error_FD,
abs_rms_error_CS,
rel_rms_error_CS,
)
)
self.assertTrue(rel_rms_error_FD < 1e-3 or abs_rms_error_FD < 1e-5)
def test_sphere(self):
self.run_test(Sphere(ndim=1))
self.run_test(Sphere(ndim=3))
def test_exp(self):
self.run_test(TensorProduct(name="TP-exp", ndim=1, func="exp"))
self.run_test(TensorProduct(name="TP-exp", ndim=3, func="exp"))
def test_tanh(self):
self.run_test(TensorProduct(name="TP-tanh", ndim=1, func="tanh"))
self.run_test(TensorProduct(name="TP-tanh", ndim=3, func="tanh"))
def test_cos(self):
self.run_test(TensorProduct(name="TP-cos", ndim=1, func="cos"))
self.run_test(TensorProduct(name="TP-cos", ndim=3, func="cos"))
def test_gaussian(self):
self.run_test(TensorProduct(name="TP-gaussian", ndim=1, func="gaussian"))
self.run_test(TensorProduct(name="TP-gaussian", ndim=3, func="gaussian"))
def test_branin(self):
self.run_test(Branin(ndim=2))
def test_lp_norm(self):
self.run_test(LpNorm(ndim=2))
def test_rosenbrock(self):
self.run_test(Rosenbrock(ndim=2))
self.run_test(Rosenbrock(ndim=3))
def test_cantilever_beam(self):
self.run_test(CantileverBeam(ndim=3))
self.run_test(CantileverBeam(ndim=6))
self.run_test(CantileverBeam(ndim=9))
self.run_test(CantileverBeam(ndim=12))
def test_robot_arm(self):
self.run_test(RobotArm(ndim=2))
self.run_test(RobotArm(ndim=4))
self.run_test(RobotArm(ndim=6))
def test_torsion_vibration(self):
self.run_test(TorsionVibration(ndim=15))
self.run_test(ReducedProblem(TorsionVibration(ndim=15), dims=[5, 10, 12, 13]))
def test_water_flow(self):
self.run_test(WaterFlow(ndim=8))
self.run_test(ReducedProblem(WaterFlow(ndim=8), dims=[0, 1, 6]))
def test_welded_beam(self):
self.run_test(WeldedBeam(ndim=3))
def test_wing_weight(self):
self.run_test(WingWeight(ndim=10))
self.run_test(ReducedProblem(WingWeight(ndim=10), dims=[0, 2, 3, 5]))
def test_ndim_cantilever_beam(self):
self.run_test(NdimCantileverBeam(ndim=1))
self.run_test(NdimCantileverBeam(ndim=2))
def test_ndim_robot_arm(self):
self.run_test(NdimRobotArm(ndim=1))
self.run_test(NdimRobotArm(ndim=2))
def test_ndim_rosenbrock(self):
self.run_test(NdimRosenbrock(ndim=1))
self.run_test(NdimRosenbrock(ndim=2))
def test_ndim_step_function(self):
self.run_test(NdimStepFunction(ndim=1))
self.run_test(NdimStepFunction(ndim=2))
if __name__ == "__main__":
unittest.main()
| 5,656 | 29.912568 | 87 | py |
smt | smt-master/smt/tests/test_array_outputs.py | import numpy as np
import unittest
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.surrogate_models import QP, KRG
from smt.examples.rans_crm_wing.rans_crm_wing import (
get_rans_crm_wing,
plot_rans_crm_wing,
)
def setup_sm(sm_name, settings={}):
xt, yt, xlimits = get_rans_crm_wing()
_tmp = __import__("smt", globals(), locals(), ["surrogate_models"], 0)
interp = getattr(_tmp.surrogate_models, sm_name)(**settings)
interp.set_training_values(xt, yt)
with Silence():
interp.train()
return xt, yt, interp
class ArrayOutputTest(SMTestCase):
def test_QP(self):
xt, yt, interp = setup_sm(sm_name="QP")
with Silence():
d0 = interp.predict_derivatives(np.atleast_2d(xt[10, :]), 0)
self.assert_error(
d0, np.array([[0.02588578, 5.86555448]]), atol=1e-6, rtol=1e-6
)
def test_KRG(self):
xt, yt, interp = setup_sm(sm_name="KRG")
with Silence():
d0 = interp.predict_derivatives(np.atleast_2d(xt[10, :]), 0)
self.assert_error(
d0, np.array([[0.06874097, 4.366292277996716]]), atol=0.55, rtol=0.15
)
def test_RBF(self):
xt, yt, interp = setup_sm(sm_name="RBF")
with Silence():
d0 = interp.predict_derivatives(np.atleast_2d(xt[10, :]), 0)
self.assert_error(d0, np.array([[0.15741522, 4.80265154]]), atol=0.2, rtol=0.03)
def test_LS(self):
xt, yt, interp = setup_sm(sm_name="LS")
with Silence():
d0 = interp.predict_derivatives(np.atleast_2d(xt[10, :]), 0)
self.assert_error(d0, np.array([[0.2912748, 5.39911101]]), atol=0.2, rtol=0.03)
def test_IDW(self):
xt, yt, interp = setup_sm(sm_name="IDW")
with Silence():
d0 = interp.predict_derivatives(np.atleast_2d(xt[10, :]), 0)
self.assert_error(d0, np.array([[0.0, 0.0]]), atol=0.2, rtol=0.03)
if __name__ == "__main__":
xt, yt, sm = setup_sm("QP")
unittest.main()
| 2,051 | 28.73913 | 88 | py |
smt | smt-master/smt/tests/test_high_dim.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import os
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct
from smt.sampling_methods import LHS
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import LS, QP, KPLS, KRG
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
print_output = False
class Test(SMTestCase):
def setUp(self):
ndim = 10
nt = 500
ne = 100
problems = OrderedDict()
problems["sphere"] = Sphere(ndim=ndim)
problems["exp"] = TensorProduct(ndim=ndim, func="exp")
problems["tanh"] = TensorProduct(ndim=ndim, func="tanh")
problems["cos"] = TensorProduct(ndim=ndim, func="cos")
sms = OrderedDict()
sms["LS"] = LS()
sms["QP"] = QP()
sms["KRG"] = KRG(theta0=[4e-1] * ndim)
sms["KPLS"] = KPLS()
if compiled_available:
sms["IDW"] = IDW()
sms["RBF"] = RBF()
t_errors = {}
t_errors["LS"] = 1.0
t_errors["QP"] = 1.0
t_errors["KRG"] = 1e-4
t_errors["IDW"] = 1e-15
t_errors["RBF"] = 1e-2
t_errors["KPLS"] = 1e-3
e_errors = {}
e_errors["LS"] = 2.5
e_errors["QP"] = 2.0
e_errors["KRG"] = 2.0
e_errors["IDW"] = 4
e_errors["RBF"] = 2
e_errors["KPLS"] = 2.5
self.nt = nt
self.ne = ne
self.problems = problems
self.sms = sms
self.t_errors = t_errors
self.e_errors = e_errors
def run_test(self):
method_name = inspect.stack()[1][3]
pname = method_name.split("_")[1]
sname = method_name.split("_")[2]
prob = self.problems[pname]
sampling = LHS(xlimits=prob.xlimits, random_state=42)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
sm.set_training_values(xt, yt)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
if print_output:
print("%8s %6s %18.9e %18.9e" % (pname[:6], sname, t_error, e_error))
self.assert_error(t_error, 0.0, self.t_errors[sname], 1e-5)
self.assert_error(e_error, 0.0, self.e_errors[sname], 1e-5)
# --------------------------------------------------------------------
# Function: sphere
def test_sphere_LS(self):
self.run_test()
def test_sphere_QP(self):
self.run_test()
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_sphere_KRG(self):
self.run_test()
def test_sphere_KPLS(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_sphere_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_sphere_RBF(self):
self.run_test()
# --------------------------------------------------------------------
# Function: exp
def test_exp_LS(self):
self.run_test()
def test_exp_QP(self):
self.run_test()
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_exp_KRG(self):
self.run_test()
def test_exp_KPLS(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RBF(self):
self.run_test()
# --------------------------------------------------------------------
# Function: tanh
def test_tanh_LS(self):
self.run_test()
def test_tanh_QP(self):
self.run_test()
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_tanh_KRG(self):
self.run_test()
def test_tanh_KPLS(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RBF(self):
self.run_test()
# --------------------------------------------------------------------
# Function: cos
def test_cos_LS(self):
self.run_test()
def test_cos_QP(self):
self.run_test()
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_cos_KRG(self):
self.run_test()
def test_cos_KPLS(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RBF(self):
self.run_test()
if __name__ == "__main__":
print_output = True
print("%6s %8s %18s %18s" % ("SM", "Problem", "Train. pt. error", "Test pt. error"))
unittest.main()
| 5,712 | 25.327189 | 88 | py |
smt | smt-master/smt/tests/test_kpls_auto.py | """
Author: Paul Saves
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct, Rosenbrock, Branin
from smt.sampling_methods import LHS
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import KPLS
print_output = False
class Test(SMTestCase):
def setUp(self):
ndim = 10
nt = 50
ne = 100
problems = OrderedDict()
problems["Branin"] = Branin(ndim=2)
problems["Rosenbrock"] = Rosenbrock(ndim=3)
problems["sphere"] = Sphere(ndim=ndim)
problems["exp"] = TensorProduct(ndim=ndim, func="exp")
problems["tanh"] = TensorProduct(ndim=ndim, func="tanh")
problems["cos"] = TensorProduct(ndim=ndim, func="cos")
sms = OrderedDict()
sms["KPLS"] = KPLS(eval_n_comp=True)
t_errors = {}
e_errors = {}
t_errors["KPLS"] = 1e-3
e_errors["KPLS"] = 2.5
n_comp_opt = {}
n_comp_opt["Branin"] = 2
n_comp_opt["Rosenbrock"] = 1
n_comp_opt["sphere"] = 1
n_comp_opt["exp"] = 3
n_comp_opt["tanh"] = 1
n_comp_opt["cos"] = 1
self.nt = nt
self.ne = ne
self.problems = problems
self.sms = sms
self.t_errors = t_errors
self.e_errors = e_errors
self.n_comp_opt = n_comp_opt
def run_test(self):
method_name = inspect.stack()[1][3]
pname = method_name.split("_")[1]
sname = method_name.split("_")[2]
prob = self.problems[pname]
sampling = LHS(xlimits=prob.xlimits, random_state=42)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
sm.set_training_values(xt, yt)
with Silence():
sm.train()
l = sm.options["n_comp"]
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
if print_output:
print("%8s %6s %18.9e %18.9e" % (pname[:6], sname, t_error, e_error))
self.assert_error(t_error, 0.0, self.t_errors[sname], 1e-5)
self.assert_error(e_error, 0.0, self.e_errors[sname], 1e-5)
self.assertEqual(l, self.n_comp_opt[pname])
# --------------------------------------------------------------------
# Function: sphere
def test_Branin_KPLS(self):
self.run_test()
def test_Rosenbrock_KPLS(self):
self.run_test()
def test_sphere_KPLS(self):
self.run_test()
def test_exp_KPLS(self):
self.run_test()
def test_tanh_KPLS(self):
self.run_test()
def test_cos_KPLS(self):
self.run_test()
if __name__ == "__main__":
print_output = True
print("%6s %8s %18s %18s" % ("SM", "Problem", "Train. pt. error", "Test pt. error"))
unittest.main()
| 3,287 | 24.292308 | 88 | py |
smt | smt-master/smt/tests/test_low_dim.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct
from smt.sampling_methods import LHS
from smt.utils.design_space import DesignSpace
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import LS, QP, KPLS, KRG
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
print_output = False
class Test(SMTestCase):
def setUp(self):
ndim = 2
nt = 10000
ne = 1000
problems = OrderedDict()
problems["sphere"] = Sphere(ndim=ndim)
problems["exp"] = TensorProduct(ndim=ndim, func="exp", width=5)
problems["tanh"] = TensorProduct(ndim=ndim, func="tanh", width=5)
problems["cos"] = TensorProduct(ndim=ndim, func="cos", width=5)
sms = OrderedDict()
sms["LS"] = LS()
sms["QP"] = QP()
if compiled_available:
sms["RMTC"] = RMTC(num_elements=20, energy_weight=1e-10)
sms["RMTB"] = RMTB(num_ctrl_pts=40, energy_weight=1e-10)
t_errors = {}
t_errors["LS"] = 1.0
t_errors["QP"] = 1.0
t_errors["RMTC"] = 1.0
t_errors["RMTB"] = 1.0
e_errors = {}
e_errors["LS"] = 1.5
e_errors["QP"] = 1.5
e_errors["RMTC"] = 1.0
e_errors["RMTB"] = 1.0
self.nt = nt
self.ne = ne
self.problems = problems
self.sms = sms
self.t_errors = t_errors
self.e_errors = e_errors
def run_test(self):
method_name = inspect.stack()[1][3]
pname = method_name.split("_")[1]
sname = method_name.split("_")[2]
prob = self.problems[pname]
sampling = LHS(xlimits=prob.xlimits)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("design_space"):
sm.options["design_space"] = DesignSpace(prob.xlimits)
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
sm.set_training_values(xt, yt)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
if print_output:
print("%8s %6s %18.9e %18.9e" % (pname[:6], sname, t_error, e_error))
self.assert_error(t_error, 0.0, self.t_errors[sname])
self.assert_error(e_error, 0.0, self.e_errors[sname])
# --------------------------------------------------------------------
# Function: sphere
def test_sphere_LS(self):
self.run_test()
def test_sphere_QP(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_sphere_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_sphere_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: exp
def test_exp_LS(self):
self.run_test()
def test_exp_QP(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: tanh
def test_tanh_LS(self):
self.run_test()
def test_tanh_QP(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: cos
def test_cos_LS(self):
self.run_test()
def test_cos_QP(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RMTB(self):
self.run_test()
if __name__ == "__main__":
print_output = True
print("%6s %8s %18s %18s" % ("SM", "Problem", "Train. pt. error", "Test pt. error"))
unittest.main()
| 5,047 | 26.434783 | 88 | py |
smt | smt-master/smt/tests/test_output_derivs.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.utils.design_space import DesignSpace
from smt.problems import Sphere
from smt.sampling_methods import FullFactorial
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
print_output = False
class Test(SMTestCase):
def setUp(self):
ndim = 2
self.nt = 50
self.ne = 10
self.problem = Sphere(ndim=ndim)
self.sms = sms = OrderedDict()
if compiled_available:
sms["IDW"] = IDW()
sms["RBF"] = RBF()
sms["RMTB"] = RMTB(
regularization_weight=1e-8,
nonlinear_maxiter=100,
solver_tolerance=1e-16,
)
sms["RMTC"] = RMTC(
regularization_weight=1e-8,
nonlinear_maxiter=100,
solver_tolerance=1e-16,
)
def run_test(self):
method_name = inspect.stack()[1][3]
sname = method_name.split("_")[1]
prob = self.problem
sampling = FullFactorial(xlimits=prob.xlimits, clip=False)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
# dyt = {}
# for kx in range(prob.xlimits.shape[0]):
# dyt[kx] = prob(xt, kx=kx)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("design_space"):
sm.options["design_space"] = DesignSpace(prob.xlimits)
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
sm.set_training_values(xt, yt)
sm.update_training_values(yt)
with Silence():
sm.train()
ye0 = sm.predict_values(xe)
h = 1e-3
jac_fd = np.zeros((self.ne, self.nt))
for ind in range(self.nt):
sm.update_training_values(yt + h * np.eye(self.nt, M=1, k=-ind))
with Silence():
sm.train()
ye = sm.predict_values(xe)
jac_fd[:, ind] = (ye - ye0)[:, 0] / h
jac_fd = jac_fd.reshape((self.ne, self.nt, 1))
jac_an = sm.predict_output_derivatives(xe)[None]
if print_output:
print(np.linalg.norm(jac_fd - jac_an))
self.assert_error(jac_fd, jac_an, rtol=5e-2)
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_RBF(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_RMTB(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_RMTC(self):
self.run_test()
if __name__ == "__main__":
print_output = True
unittest.main()
| 3,409 | 25.850394 | 88 | py |
smt | smt-master/smt/tests/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/tests/test_all.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
Dr. Mohamed A. Bouhlel <mbouhlel@umich>
This package is distributed under New BSD license.
"""
import os
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import TensorProduct
from smt.sampling_methods import LHS, FullFactorial
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import (
LS,
QP,
KPLS,
KRG,
KPLSK,
GEKPLS,
GENN,
MGP,
DesignSpace,
)
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
print_output = False
def genn():
neural_net = GENN()
neural_net.options["alpha"] = 0.1 # learning rate that controls optimizer step size
neural_net.options["beta1"] = 0.9 # tuning parameter to control ADAM optimization
neural_net.options["beta2"] = 0.99 # tuning parameter to control ADAM optimization
neural_net.options[
"lambd"
] = 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization
neural_net.options[
"gamma"
] = 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement
neural_net.options["deep"] = 2 # number of hidden layers
neural_net.options["wide"] = 12 # number of nodes per hidden layer
neural_net.options[
"mini_batch_size"
] = 10000 # used to divide data into training batches (use for large data sets)
neural_net.options["num_epochs"] = 25 # number of passes through data
neural_net.options[
"num_iterations"
] = 100 # number of optimizer iterations per mini-batch
neural_net.options["is_print"] = True
return neural_net
class Test(SMTestCase):
def setUp(self):
ndim = 3
nt = 100
ne = 100
ncomp = 1
problems = OrderedDict()
problems["exp"] = TensorProduct(ndim=ndim, func="exp")
problems["tanh"] = TensorProduct(ndim=ndim, func="tanh")
problems["cos"] = TensorProduct(ndim=ndim, func="cos")
sms = OrderedDict()
sms["LS"] = LS()
sms["QP"] = QP()
sms["KRG"] = KRG(theta0=[1e-2] * ndim)
sms["KPLS"] = KPLS(theta0=[1e-2] * ncomp, n_comp=ncomp)
sms["KPLSK"] = KPLSK(theta0=[1] * ncomp, n_comp=ncomp)
sms["MGP"] = KPLSK(theta0=[1e-2] * ncomp, n_comp=ncomp)
sms["GEKPLS"] = GEKPLS(theta0=[1e-2] * 2, n_comp=2, delta_x=1e-1)
sms["GENN"] = genn()
if compiled_available:
sms["IDW"] = IDW()
sms["RBF"] = RBF()
sms["RMTC"] = RMTC()
sms["RMTB"] = RMTB()
t_errors = {}
t_errors["LS"] = 1.0
t_errors["QP"] = 1.0
t_errors["KRG"] = 1.2
t_errors["MFK"] = 1e0
t_errors["KPLS"] = 1.2
t_errors["KPLSK"] = 1e0
t_errors["MGP"] = 1e0
t_errors["GEKPLS"] = 1.4
t_errors["GENN"] = 1.2
if compiled_available:
t_errors["IDW"] = 1e0
t_errors["RBF"] = 1e-2
t_errors["RMTC"] = 1e-1
t_errors["RMTB"] = 1e-1
e_errors = {}
e_errors["LS"] = 1.5
e_errors["QP"] = 1.5
e_errors["KRG"] = 2e-2
e_errors["MFK"] = 2e-2
e_errors["KPLS"] = 2e-2
e_errors["KPLSK"] = 2e-2
e_errors["MGP"] = 2e-2
e_errors["GEKPLS"] = 2e-2
e_errors["GENN"] = 2e-2
if compiled_available:
e_errors["IDW"] = 1e0
e_errors["RBF"] = 1e0
e_errors["RMTC"] = 2e-1
e_errors["RMTB"] = 3e-1
self.nt = nt
self.ne = ne
self.ndim = ndim
self.problems = problems
self.sms = sms
self.t_errors = t_errors
self.e_errors = e_errors
def run_test(self):
method_name = inspect.stack()[1][3]
pname = method_name.split("_")[1]
sname = method_name.split("_")[2]
prob = self.problems[pname]
sampling = LHS(xlimits=prob.xlimits, random_state=42)
xt = sampling(self.nt)
yt = prob(xt)
print(prob(xt, kx=0).shape)
for i in range(self.ndim):
yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)
xe = sampling(self.ne)
ye = prob(xe)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("design_space"):
sm.options["design_space"] = DesignSpace(prob.xlimits)
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
if sname in ["KPLS", "KRG", "KPLSK", "GEKPLS"]:
optname = method_name.split("_")[3]
sm.options["hyper_opt"] = optname
sm.set_training_values(xt, yt[:, 0])
if sm.supports["training_derivatives"]:
for i in range(self.ndim):
sm.set_training_derivatives(xt, yt[:, i + 1], i)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
if sm.supports["variances"]:
sm.predict_variances(xe)
# Some test case tolerance relaxations wrt to global tolerance values
if pname == "cos":
self.assertLessEqual(e_error, self.e_errors[sname] + 1.6)
elif pname == "tanh" and sname in ["KPLS", "GENN", "RMTB"]:
self.assertLessEqual(e_error, self.e_errors[sname] + 0.4)
elif pname == "exp" and sname in ["GENN"]:
self.assertLessEqual(e_error, self.e_errors[sname] + 0.2)
elif pname == "exp" and sname in ["RMTB"]:
self.assertLessEqual(e_error, self.e_errors[sname] + 0.5)
else:
self.assertLessEqual(e_error, self.e_errors[sname])
self.assertLessEqual(t_error, self.t_errors[sname])
def test_exp_LS(self):
self.run_test()
def test_exp_QP(self):
self.run_test()
def test_exp_KRG_Cobyla(self):
self.run_test()
def test_exp_KRG_TNC(self):
self.run_test()
def test_exp_KPLS_Cobyla(self):
self.run_test()
def test_exp_KPLS_TNC(self):
self.run_test()
def test_exp_KPLSK_Cobyla(self):
self.run_test()
def test_exp_KPLSK_TNC(self):
self.run_test()
def test_exp_MGP(self):
self.run_test()
def test_exp_GEKPLS_Cobyla(self):
self.run_test()
def test_exp_GEKPLS_TNC(self):
self.run_test()
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_exp_GENN(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RBF(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: tanh
def test_tanh_LS(self):
self.run_test()
def test_tanh_QP(self):
self.run_test()
def test_tanh_KRG_Cobyla(self):
self.run_test()
def test_tanh_KRG_TNC(self):
self.run_test()
def test_tanh_KPLS_Cobyla(self):
self.run_test()
def test_tanh_KPLS_TNC(self):
self.run_test()
def test_tanh_KPLSK_Cobyla(self):
self.run_test()
def test_tanh_KPLSK_TNC(self):
self.run_test()
def test_tanh_MGP(self):
self.run_test()
def test_tanh_GEKPLS_Cobyla(self):
self.run_test()
def test_tanh_GEKPLS_TNC(self):
self.run_test()
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_tanh_GENN(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RBF(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_tanh_RMTB(self):
self.run_test()
# --------------------------------------------------------------------
# Function: cos
def test_cos_LS(self):
self.run_test()
def test_cos_QP(self):
self.run_test()
def test_cos_KRG_Cobyla(self):
self.run_test()
def test_cos_KRG_TNC(self):
self.run_test()
def test_cos_KPLS_Cobyla(self):
self.run_test()
def test_cos_KPLS_TNC(self):
self.run_test()
def test_cos_KPLSK_Cobyla(self):
self.run_test()
def test_cos_KPLSK_TNC(self):
self.run_test()
def test_cos_MGP(self):
self.run_test()
def test_cos_GEKPLS_Cobyla(self):
self.run_test()
def test_cos_GEKPLS_TNC(self):
self.run_test()
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_cos_GENN(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_IDW(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RBF(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_cos_RMTB(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_exp_RMTB(self):
self.run_test()
if __name__ == "__main__":
print_output = True
print("%6s %8s %18s %18s" % ("SM", "Problem", "Train. pt. error", "Test pt. error"))
unittest.main()
| 10,473 | 27.461957 | 88 | py |
smt | smt-master/smt/tests/test_derivs.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere
from smt.sampling_methods import LHS
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.utils.design_space import DesignSpace
from smt.applications import MFK
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
print_output = False
class Test(SMTestCase):
def setUp(self):
ndim = 2
nt = 5000
ne = 100
problems = OrderedDict()
problems["sphere"] = Sphere(ndim=ndim)
sms = OrderedDict()
if compiled_available:
sms["RBF"] = RBF()
sms["RMTC"] = RMTC()
sms["RMTB"] = RMTB()
sms["MFK"] = MFK(theta0=[1e-2] * ndim)
self.nt = nt
self.ne = ne
self.problems = problems
self.sms = sms
def run_test(self):
method_name = inspect.stack()[1][3]
pname = method_name.split("_")[1]
sname = method_name.split("_")[2]
prob = self.problems[pname]
sampling = LHS(xlimits=prob.xlimits)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
dyt = {}
for kx in range(prob.xlimits.shape[0]):
dyt[kx] = prob(xt, kx=kx)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
dye = {}
for kx in range(prob.xlimits.shape[0]):
dye[kx] = prob(xe, kx=kx)
sm0 = self.sms[sname]
sm = sm0.__class__()
sm.options = sm0.options.clone()
if sm.options.is_declared("design_space"):
sm.options["design_space"] = DesignSpace(prob.xlimits)
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
sm.set_training_values(xt, yt)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
e_error0 = compute_rms_error(sm, xe, dye[0], 0)
e_error1 = compute_rms_error(sm, xe, dye[1], 1)
if print_output:
print(
"%8s %6s %18.9e %18.9e %18.9e %18.9e"
% (pname[:6], sname, t_error, e_error, e_error0, e_error1)
)
self.assert_error(e_error0, 0.0, 25e-1)
self.assert_error(e_error1, 0.0, 25e-1)
# --------------------------------------------------------------------
# Function: sphere
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_sphere_RBF(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_sphere_RMTC(self):
self.run_test()
@unittest.skipIf(not compiled_available, "Compiled Fortran libraries not available")
def test_sphere_RMTB(self):
self.run_test()
if __name__ == "__main__":
print_output = True
print(
"%6s %8s %18s %18s %18s %18s"
% (
"SM",
"Problem",
"Train. pt. error",
"Test pt. error",
"Deriv 0 error",
"Deriv 1 error",
)
)
unittest.main()
| 3,494 | 24.698529 | 88 | py |
smt | smt-master/smt/problems/problem.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Base class for benchmarking/test problems.
"""
from typing import Optional
import numpy as np
from smt.utils.options_dictionary import OptionsDictionary
from smt.utils.checks import ensure_2d_array
from smt.utils.design_space import BaseDesignSpace, DesignSpace
class Problem:
def __init__(self, **kwargs):
"""
Constructor where values of options can be passed in.
For the list of options, see the documentation for the problem being used.
Parameters
----------
**kwargs : named arguments
Set of options that can be optionally set; each option must have been declared.
Examples
--------
>>> from smt.problems import Sphere
>>> prob = Sphere(ndim=3)
"""
self.options = OptionsDictionary()
self.options.declare("ndim", 1, types=int)
self.options.declare("return_complex", False, types=bool)
self._initialize()
self.options.update(kwargs)
self.xlimits = np.zeros((self.options["ndim"], 2))
self._design_space = None
self.eval_x = None
self.eval_is_acting = None
self._setup()
def _initialize(self) -> None:
"""
Implemented by problem to declare options (optional).
Examples
--------
self.options.declare('option_name', default_value, types=(bool, int), desc='description')
"""
pass
def _setup(self) -> None:
pass
def _set_design_space(self, design_space: BaseDesignSpace):
"""
Set the design space definition (best is to use the smt.utils.design_space.DesignSpace class directly) of
this problem from the _setup function. If used, there is no need to set xlimits.
"""
self._design_space = design_space
self.options["ndim"] = len(design_space.design_variables)
self.xlimits = design_space.get_num_bounds()
@property
def design_space(self) -> BaseDesignSpace:
"""Gets the design space definitions as an instance of BaseDesignSpace"""
if self._design_space is None:
self._design_space = DesignSpace(self.xlimits)
return self._design_space
def sample(self, n):
x, _ = self.design_space.sample_valid_x(n)
return x
def __call__(self, x: np.ndarray, kx: Optional[int] = None) -> np.ndarray:
"""
Evaluate the function.
The input vectors might be corrected if it is a hierarchical design space. You can get the corrected x and
information about which variables are acting from: problem.eval_x and problem.eval_is_acting
Parameters
----------
x : ndarray[n, nx] or ndarray[n]
Evaluation points where n is the number of evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[n, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
x = ensure_2d_array(x, "x")
if x.shape[1] != self.options["ndim"]:
raise ValueError(
"The second dimension of x should be %i" % self.options["ndim"]
)
if kx is not None:
if not isinstance(kx, int) or kx < 0:
raise TypeError("kx should be None or a non-negative int.")
# Correct the design vector and get information about which design variables are active
x_corr, self.eval_is_acting = self.design_space.correct_get_acting(x)
self.eval_x = x_corr
y = self._evaluate(x_corr, kx)
if self.options["return_complex"]:
return y
else:
return np.real(y)
def _evaluate(self, x: np.ndarray, kx: Optional[int] = None) -> np.ndarray:
"""
Implemented by surrogate models to evaluate the function.
Parameters
----------
x : ndarray[n, nx]
Evaluation points where n is the number of evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[n, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
raise Exception("This problem has not been implemented correctly")
| 4,616 | 32.215827 | 114 | py |
smt | smt-master/smt/problems/water_flow.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>
Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Water flow problem from:
Liu, H., Xu, S., & Wang, X. Sampling strategies and metamodeling techniques for engineering design: comparison and application. In ASME Turbo Expo 2016: Turbomachinery Technical Conference and Exposition. American Society of Mechanical Engineers. June, 2016.
Morris, M. D., Mitchell, T. J., and Ylvisaker, D. Bayesian Design and Analysis of Computer Experiments: Use of Derivatives in Surface Prediction. Technometrics, 35(3), pp. 243-255. 1993.
"""
import numpy as np
from scipy.misc import derivative
from smt.problems.problem import Problem
class WaterFlow(Problem):
def _initialize(self):
self.options.declare("name", "WaterFlow", types=str)
self.options.declare("use_FD", False, types=bool)
self.options["ndim"] = 8
def _setup(self):
assert self.options["ndim"] == 8, "ndim must be 8"
self.xlimits[:, 0] = [0.05, 100, 63070, 990, 63.1, 700, 1120, 9855]
self.xlimits[:, 1] = [0.15, 50000, 115600, 1110, 116, 820, 1680, 12045]
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
y = np.zeros((ne, 1), complex)
def partial_derivative(function, var=0, point=[]):
args = point[:]
def wraps(x):
args[var] = x
return func(*args)
return derivative(wraps, point[var], dx=1e-6)
def func(x0, x1, x2, x3, x4, x5, x6, x7):
return (
2
* np.pi
* x2
* (x3 - x5)
/ (
np.log(x1 / x0)
* (1 + 2 * x6 * x2 / (np.log(x1 / x0) * x0**2 * x7) + x2 / x4)
)
)
for i in range(ne):
x0 = x[i, 0]
x1 = x[i, 1]
x2 = x[i, 2]
x3 = x[i, 3]
x4 = x[i, 4]
x5 = x[i, 5]
x6 = x[i, 6]
x7 = x[i, 7]
if kx is None:
y[i, 0] = func(x0, x1, x2, x3, x4, x5, x6, x7)
else:
point = [x0, x1, x2, x3, x4, x5, x6, x7]
if self.options["use_FD"]:
point = np.real(np.array(point))
y[i, 0] = partial_derivative(func, var=kx, point=point)
else:
ch = 1e-20
point[kx] += complex(0, ch)
y[i, 0] = np.imag(func(*point)) / ch
point[kx] -= complex(0, ch)
return y
| 3,066 | 32.336957 | 258 | py |
smt | smt-master/smt/problems/wing_weight.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>
Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Aircraft wing weight problem from:
Liu, H., Xu, S., & Wang, X. Sampling strategies and metamodeling techniques for engineering design: comparison and application. In ASME Turbo Expo 2016: Turbomachinery Technical Conference and Exposition. American Society of Mechanical Engineers. June, 2016.
Forrester, A., Sobester, A., and Keane, A., 2008,
Engineering Design Via Surrogate Modelling: A Practical Guide, John Wiley & Sons, United Kingdom.
"""
import numpy as np
from scipy.misc import derivative
from smt.problems.problem import Problem
class WingWeight(Problem):
def _initialize(self):
self.options.declare("name", "WingWeight", types=str)
self.options.declare("use_FD", False, types=bool)
self.options["ndim"] = 10
def _setup(self):
assert self.options["ndim"] == 10, "ndim must be 10"
self.xlimits[:, 0] = [150, 220, 6, -10, 16, 0.5, 0.08, 2.5, 1700, 0.025]
self.xlimits[:, 1] = [200, 300, 10, 10, 45, 1, 0.18, 6, 2500, 0.08]
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
y = np.zeros((ne, 1), complex)
def deg2rad(deg):
rad = deg / 180.0 * np.pi
return rad
def partial_derivative(function, var=0, point=[]):
args = point[:]
def wraps(x):
args[var] = x
return func(*args)
return derivative(wraps, point[var], dx=1e-6)
def func(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9):
return (
0.036
* x0**0.758
* x1**0.0035
* (x2 / np.cos(deg2rad(x3)) ** 2) ** (0.6)
* x4**0.006
* x5**0.04
* (100 * x6 / np.cos(deg2rad(x3))) ** (-0.3)
* (x7 * x8) ** 0.49
+ x0 * x9
)
for i in range(ne):
x0 = x[i, 0]
x1 = x[i, 1]
x2 = x[i, 2]
x3 = x[i, 3]
x4 = x[i, 4]
x5 = x[i, 5]
x6 = x[i, 6]
x7 = x[i, 7]
x8 = x[i, 8]
x9 = x[i, 9]
if kx is None:
y[i, 0] = func(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
else:
point = [x0, x1, x2, x3, x4, x5, x6, x7, x8, x9]
if self.options["use_FD"]:
point = np.real(np.array(point))
y[i, 0] = partial_derivative(func, var=kx, point=point)
else:
ch = 1e-20
point[kx] += complex(0, ch)
y[i, 0] = np.imag(func(*point)) / ch
point[kx] -= complex(0, ch)
return y
| 3,273 | 31.74 | 258 | py |
smt | smt-master/smt/problems/tensor_product.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Tensor-product of cos, exp, or tanh.
"""
import numpy as np
from smt.problems.problem import Problem
class TensorProduct(Problem):
def _initialize(self):
self.options.declare("name", "TP", types=str)
self.options.declare("func", values=["cos", "exp", "tanh", "gaussian"])
self.options.declare("width", 1.0, types=(float, int))
def _setup(self):
self.xlimits[:, 0] = -1.0
self.xlimits[:, 1] = 1.0
a = self.options["width"]
if self.options["func"] == "cos":
self.func = lambda v: np.cos(a * np.pi * v)
self.dfunc = lambda v: -a * np.pi * np.sin(a * np.pi * v)
elif self.options["func"] == "exp":
self.func = lambda v: np.exp(a * v)
self.dfunc = lambda v: a * np.exp(a * v)
elif self.options["func"] == "tanh":
self.func = lambda v: np.tanh(a * v)
self.dfunc = lambda v: a / np.cosh(a * v) ** 2
elif self.options["func"] == "gaussian":
self.func = lambda v: np.exp(-2.0 * a * v**2)
self.dfunc = lambda v: -4.0 * a * v * np.exp(-2.0 * a * v**2)
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
y = np.ones((ne, 1), complex)
if kx is None:
y[:, 0] = np.prod(self.func(x), 1).T
else:
for ix in range(nx):
if kx == ix:
y[:, 0] *= self.dfunc(x[:, ix])
else:
y[:, 0] *= self.func(x[:, ix])
return y
| 2,044 | 30.461538 | 79 | py |
smt | smt-master/smt/problems/ndim_rosenbrock.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
N-dimensional Rosenbrock problem.
"""
import numpy as np
from smt.utils.options_dictionary import OptionsDictionary
from smt.problems.problem import Problem
from smt.problems.reduced_problem import ReducedProblem
from smt.problems.rosenbrock import Rosenbrock
class NdimRosenbrock(Problem):
def __init__(self, ndim=1, w=0.2):
super().__init__()
self.problem = ReducedProblem(
Rosenbrock(ndim=ndim + 1), np.arange(1, ndim + 1), w=w
)
self.options = OptionsDictionary()
self.options.declare("ndim", ndim, types=int)
self.options.declare("return_complex", False, types=bool)
self.options.declare("name", "NdimRosenbrock", types=str)
self.xlimits = self.problem.xlimits
def _evaluate(self, x, kx):
return self.problem._evaluate(x, kx)
| 932 | 28.15625 | 66 | py |
smt | smt-master/smt/problems/rosenbrock.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Multi-dimensional Rosenbrock function.
"""
import numpy as np
from smt.problems.problem import Problem
class Rosenbrock(Problem):
def _initialize(self):
self.options.declare("name", "Rosenbrock", types=str)
def _setup(self):
self.xlimits[:, 0] = -2.0
self.xlimits[:, 1] = 2.0
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
y = np.zeros((ne, 1), complex)
if kx is None:
for ix in range(nx - 1):
y[:, 0] += (
100.0 * (x[:, ix + 1] - x[:, ix] ** 2) ** 2 + (1 - x[:, ix]) ** 2
)
else:
if kx < nx - 1:
y[:, 0] += -400.0 * (x[:, kx + 1] - x[:, kx] ** 2) * x[:, kx] - 2 * (
1 - x[:, kx]
)
if kx > 0:
y[:, 0] += 200.0 * (x[:, kx] - x[:, kx - 1] ** 2)
return y
| 1,415 | 25.716981 | 85 | py |
smt | smt-master/smt/problems/hierarchical_goldstein.py | """
Author: P.Saves and J.H. Bussemaker
This package is distributed under New BSD license.
Cantilever beam problem from:
P. Saves, Y. Diouane, N. Bartoli, T. Lefebvre, and J. Morlier. A mixed-categorical correlation kernel for gaussian process, 2022
"""
import numpy as np
from smt.problems.problem import Problem
from smt.utils.design_space import (
DesignSpace,
OrdinalVariable,
FloatVariable,
CategoricalVariable,
IntegerVariable,
)
class HierarchicalGoldstein(Problem):
def _setup(self):
ds = DesignSpace(
[
CategoricalVariable(values=[0, 1, 2, 3]), # meta
OrdinalVariable(values=[0, 1]), # x1
FloatVariable(0, 100), # x2
FloatVariable(0, 100),
FloatVariable(0, 100),
FloatVariable(0, 100),
FloatVariable(0, 100),
IntegerVariable(0, 2), # x7
IntegerVariable(0, 2),
IntegerVariable(0, 2),
IntegerVariable(0, 2),
]
)
# x4 is acting if meta == 1, 3
ds.declare_decreed_var(decreed_var=4, meta_var=0, meta_value=[1, 3])
# x5 is acting if meta == 2, 3
ds.declare_decreed_var(decreed_var=5, meta_var=0, meta_value=[2, 3])
# x7 is acting if meta == 0, 2
ds.declare_decreed_var(decreed_var=7, meta_var=0, meta_value=[0, 2])
# x8 is acting if meta == 0, 1
ds.declare_decreed_var(decreed_var=8, meta_var=0, meta_value=[0, 1])
self._set_design_space(ds)
def _evaluate(self, x: np.ndarray, kx=0) -> np.ndarray:
def H(x1, x2, x3, x4, z3, z4, x5, cos_term):
h = (
53.3108
+ 0.184901 * x1
- 5.02914 * x1**3 * 10 ** (-6)
+ 7.72522 * x1**z3 * 10 ** (-8)
- 0.0870775 * x2
- 0.106959 * x3
+ 7.98772 * x3**z4 * 10 ** (-6)
+ 0.00242482 * x4
+ 1.32851 * x4**3 * 10 ** (-6)
- 0.00146393 * x1 * x2
- 0.00301588 * x1 * x3
- 0.00272291 * x1 * x4
+ 0.0017004 * x2 * x3
+ 0.0038428 * x2 * x4
- 0.000198969 * x3 * x4
+ 1.86025 * x1 * x2 * x3 * 10 ** (-5)
- 1.88719 * x1 * x2 * x4 * 10 ** (-6)
+ 2.50923 * x1 * x3 * x4 * 10 ** (-5)
- 5.62199 * x2 * x3 * x4 * 10 ** (-5)
)
if cos_term:
h += 5.0 * np.cos(2.0 * np.pi * (x5 / 100.0)) - 2.0
return h
def f1(x1, x2, z1, z2, z3, z4, x5, cos_term):
c1 = z1 == 0
c2 = z1 == 1
c3 = z1 == 2
c4 = z2 == 0
c5 = z2 == 1
c6 = z2 == 2
y = (
c4
* (
c1 * H(x1, x2, 20, 20, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, 20, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, 20, z3, z4, x5, cos_term)
)
+ c5
* (
c1 * H(x1, x2, 20, 50, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, 50, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, 50, z3, z4, x5, cos_term)
)
+ c6
* (
c1 * H(x1, x2, 20, 80, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, 80, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, 80, z3, z4, x5, cos_term)
)
)
return y
def f2(x1, x2, x3, z2, z3, z4, x5, cos_term):
c4 = z2 == 0
c5 = z2 == 1
c6 = z2 == 2
y = (
c4 * H(x1, x2, x3, 20, z3, z4, x5, cos_term)
+ c5 * H(x1, x2, x3, 50, z3, z4, x5, cos_term)
+ c6 * H(x1, x2, x3, 80, z3, z4, x5, cos_term)
)
return y
def f3(x1, x2, x4, z1, z3, z4, x5, cos_term):
c1 = z1 == 0
c2 = z1 == 1
c3 = z1 == 2
y = (
c1 * H(x1, x2, 20, x4, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, x4, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, x4, z3, z4, x5, cos_term)
)
return y
y = []
for xi in x:
if xi[0] == 0:
y.append(
f1(xi[2], xi[3], xi[7], xi[8], xi[9], xi[10], xi[6], cos_term=xi[1])
)
elif xi[0] == 1:
y.append(
f2(xi[2], xi[3], xi[4], xi[8], xi[9], xi[10], xi[6], cos_term=xi[1])
)
elif xi[0] == 2:
y.append(
f3(xi[2], xi[3], xi[5], xi[7], xi[9], xi[10], xi[6], cos_term=xi[1])
)
elif xi[0] == 3:
y.append(
H(xi[2], xi[3], xi[4], xi[5], xi[9], xi[10], xi[6], cos_term=xi[1])
)
else:
raise ValueError(f"Unexpected x0: {xi[0]}")
return np.array(y)
| 5,183 | 33.105263 | 128 | py |
smt | smt-master/smt/problems/robot_arm.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Robot arm problem from:
Liu, H., Xu, S., & Wang, X. Sampling strategies and metamodeling techniques for engineering design: comparison and application. In ASME Turbo Expo 2016: Turbomachinery Technical Conference and Exposition. American Society of Mechanical Engineers. June, 2016.
An, J., and Owen, A. Quasi-Regression. Journal of complexity, 17(4), pp. 588-607, 2001.
"""
import numpy as np
from smt.problems.problem import Problem
class RobotArm(Problem):
def _initialize(self):
self.options.declare("name", "RobotArm", types=str)
self.options.declare("ndim", 2, types=int)
def _setup(self):
assert self.options["ndim"] % 2 == 0, "ndim must be divisible by 2"
# Length L
self.xlimits[0::2, 0] = 0.0
self.xlimits[0::2, 1] = 1.0
# Angle theta
self.xlimits[1::2, 0] = 0.0
self.xlimits[1::2, 1] = 2 * np.pi
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
nseg = int(self.options["ndim"] / 2)
pos_x = np.zeros(ne, complex)
pos_y = np.zeros(ne, complex)
for iseg in range(nseg):
L = x[:, 2 * iseg + 0]
pos_x += L * np.cos(np.sum(x[:, 1 : 2 * iseg + 2 : 2], axis=1))
pos_y += L * np.sin(np.sum(x[:, 1 : 2 * iseg + 2 : 2], axis=1))
y = np.zeros((ne, 1), complex)
d_pos_x = np.zeros(ne, complex)
d_pos_y = np.zeros(ne, complex)
if kx is None:
y[:, 0] = (pos_x**2 + pos_y**2) ** 0.5
else:
kseg = int(np.floor(kx / 2))
if kx % 2 == 0:
d_pos_x[:] += np.cos(np.sum(x[:, 1 : 2 * kseg + 2 : 2], axis=1))
d_pos_y[:] += np.sin(np.sum(x[:, 1 : 2 * kseg + 2 : 2], axis=1))
y[:, 0] += pos_x / (pos_x**2 + pos_y**2) ** 0.5 * d_pos_x
y[:, 0] += pos_y / (pos_x**2 + pos_y**2) ** 0.5 * d_pos_y
elif kx % 2 == 1:
for iseg in range(nseg):
L = x[:, 2 * iseg + 0]
if kseg <= iseg:
d_pos_x[:] -= L * np.sin(
np.sum(x[:, 1 : 2 * iseg + 2 : 2], axis=1)
)
d_pos_y[:] += L * np.cos(
np.sum(x[:, 1 : 2 * iseg + 2 : 2], axis=1)
)
y[:, 0] += pos_x / (pos_x**2 + pos_y**2) ** 0.5 * d_pos_x
y[:, 0] += pos_y / (pos_x**2 + pos_y**2) ** 0.5 * d_pos_y
return y
| 3,018 | 35.373494 | 258 | py |
smt | smt-master/smt/problems/neural_network.py | """
Author: P.Saves
This package is distributed under New BSD license.
Multi-Layer Perceptron problem from:
C. Audet, E. Hall e-Hannan, and S. Le Digabel. A general mathematical framework for constrained mixed-variable blackbox optimization problems with meta and categorical variables. Operations Research Forum,499
4:137, 2023.
"""
import numpy as np
from smt.problems.problem import Problem
from smt.utils.design_space import (
DesignSpace,
OrdinalVariable,
FloatVariable,
CategoricalVariable,
IntegerVariable,
)
class HierarchicalNeuralNetwork(Problem):
def _initialize(self):
self.options.declare("name", "HierarchicalNeuralNetwork", types=str)
def _setup(self):
design_space = DesignSpace(
[
OrdinalVariable(values=[1, 2, 3]), # x0
FloatVariable(-5, 2),
FloatVariable(-5, 2),
OrdinalVariable(values=[8, 16, 32, 64, 128, 256]), # x3
CategoricalVariable(values=["ReLU", "SELU", "ISRLU"]), # x4
IntegerVariable(0, 5), # x5
IntegerVariable(0, 5), # x6
IntegerVariable(0, 5), # x7
]
)
# x6 is active when x0 >= 2
design_space.declare_decreed_var(decreed_var=6, meta_var=0, meta_value=[2, 3])
# x7 is active when x0 >= 3
design_space.declare_decreed_var(decreed_var=7, meta_var=0, meta_value=3)
self._set_design_space(design_space)
def _evaluate(self, x, kx=0):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
Returns
-------
ndarray[ne, 1]
Functions values.
"""
ds = self.design_space
def f_neu(x1, x2, x3, x4):
if x4 == "ReLU":
return 2 * x1 + x2 - 0.5 * x3
elif x4 == "SELU":
return -x1 + 2 * x2 - 0.5 * x3
elif x4 == "ISRLU":
return -x1 + x2 + 0.5 * x3
else:
raise ValueError(f"Unexpected x4: {x4}")
def f1(x1, x2, x3, x4, x5):
return f_neu(x1, x2, x3, x4) + x5**2
def f2(x1, x2, x3, x4, x5, x6):
return f_neu(x1, x2, x3, x4) + (x5**2) + 0.3 * x6
def f3(x1, x2, x3, x4, x5, x6, x7):
return f_neu(x1, x2, x3, x4) + (x5**2) + 0.3 * x6 - 0.1 * x7**3
def f(X):
y = []
x0_decoded = ds.decode_values(X, i_dv=0)
x3_decoded = ds.decode_values(X, i_dv=3)
x4_decoded = ds.decode_values(X, i_dv=4)
for i, x in enumerate(X):
x0 = x0_decoded[i]
x3 = x3_decoded[i]
x4 = x4_decoded[i]
if x0 == 1:
y.append(f1(x[1], x[2], x3, x4, x[5]))
elif x0 == 2:
y.append(f2(x[1], x[2], x3, x4, x[5], x[6]))
elif x0 == 3:
y.append(f3(x[1], x[2], x3, x4, x[5], x[6], x[7]))
else:
raise ValueError(f"Unexpected x0 value: {x0}")
return np.array(y)
return f(x)
| 3,178 | 31.111111 | 208 | py |
smt | smt-master/smt/problems/branin.py | """
Author: Remi Lafage <remi.lafage@onera.fr>
This package is distributed under New BSD license.
Branin function.
"""
import numpy as np
from smt.problems.problem import Problem
class Branin(Problem):
def _initialize(self):
self.options.declare("ndim", 2, values=[2], types=int)
self.options.declare("name", "Branin", types=str)
def _setup(self):
assert self.options["ndim"] == 2, "ndim must be 2"
self.xlimits[0, :] = [-5.0, 10]
self.xlimits[1, :] = [0.0, 15]
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, 2]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
assert nx == 2, "x.shape[1] must be 2"
y = np.zeros((ne, 1), complex)
b = 5.1 / (4.0 * (np.pi) ** 2)
c = 5.0 / np.pi
t = 1.0 / (8.0 * np.pi)
u = x[:, 1] - b * x[:, 0] ** 2 + c * x[:, 0] - 6
if kx is None:
r = 10.0 * (1.0 - t) * np.cos(x[:, 0]) + 10
y[:, 0] = u**2 + r
else:
assert kx in [0, 1], "kx must be None, 0 or 1"
if kx == 0:
du_dx0 = -2 * b * x[:, 0] + c
dr_dx0 = -10.0 * (1.0 - t) * np.sin(x[:, 0])
y[:, 0] = 2 * du_dx0 * u + dr_dx0
else: # kx == 1
y[:, 0] = 2 * u
return y
| 1,679 | 27 | 77 | py |
smt | smt-master/smt/problems/mixed_cantilever_beam.py | """
Author: P.Saves
This package is distributed under New BSD license.
Cantilever beam problem from:
P. Saves, Y. Diouane, N. Bartoli, T. Lefebvre, and J. Morlier. A mixed-categorical correlation kernel for gaussian process, 2022
"""
import numpy as np
from smt.problems.problem import Problem
from smt.utils.design_space import DesignSpace, FloatVariable, CategoricalVariable
class MixedCantileverBeam(Problem):
def _initialize(self):
self.options.declare("name", "MixedCantileverBeam", types=str)
self.options.declare("P", 50e3, types=(int, float), desc="Tip load (50 kN)")
self.options.declare(
"E", 200e9, types=(int, float), desc="Modulus of elast. (200 GPa)"
)
def _setup(self):
self.options["ndim"] = 3
self.listI = [
0.0833,
0.139,
0.380,
0.0796,
0.133,
0.363,
0.0859,
0.136,
0.360,
0.0922,
0.138,
0.369,
]
self._set_design_space(
DesignSpace(
[
CategoricalVariable(values=[str(i + 1) for i in range(12)]),
FloatVariable(10.0, 20.0),
FloatVariable(1.0, 2.0),
]
)
)
def _evaluate(self, x, kx=0):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
Returns
-------
ndarray[ne, 1]
Functions values.
"""
P = self.options["P"]
E = self.options["E"]
I = np.int64(x[:, 0]) - 1
L = x[:, 1]
S = x[:, 2]
Ival = np.array([self.listI[i] for i in I])
y = (P * L**3) / (3 * E * S**2 * Ival)
return y
| 1,822 | 25.42029 | 128 | py |
smt | smt-master/smt/problems/reduced_problem.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Reduced problem class - selects a subset of input variables.
"""
import numpy as np
from smt.utils.options_dictionary import OptionsDictionary
from smt.problems.problem import Problem
class ReducedProblem(Problem):
def __init__(self, problem, dims, w=0.2):
"""
Arguments
---------
problem : Problem
Pointer to the Problem object being wrapped.
dims : int or list/tuple of ints
Either the number of dimensions or a list of the dimension indices that this
problem uses.
w : float
The value to use for all unaccounted for inputs where 0/1 is lower/upper bound.
"""
super().__init__()
self.problem = problem
self.w = w
if isinstance(dims, int):
self.dims = np.arange(dims)
assert dims <= problem.options["ndim"]
elif isinstance(dims, (list, tuple, np.ndarray)):
self.dims = np.array(dims, int)
assert np.max(dims) < problem.options["ndim"]
else:
raise ValueError("dims is invalid")
self.options = OptionsDictionary()
self.options.declare("ndim", len(self.dims), types=int)
self.options.declare("return_complex", False, types=bool)
self.options.declare("name", "R_" + self.problem.options["name"], types=str)
self.xlimits = np.zeros((self.options["ndim"], 2))
for idim, idim_reduced in enumerate(self.dims):
self.xlimits[idim, :] = problem.xlimits[idim_reduced, :]
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
nx_prob = self.problem.options["ndim"]
x_prob = np.zeros((ne, nx_prob), complex)
for ix in range(nx_prob):
x_prob[:, ix] = (1 - self.w) * self.problem.xlimits[
ix, 0
] + self.w * self.problem.xlimits[ix, 1]
for ix in range(nx):
x_prob[:, self.dims[ix]] = x[:, ix]
if kx is None:
y = self.problem._evaluate(x_prob, None)
else:
y = self.problem._evaluate(x_prob, self.dims[kx])
return y
| 2,647 | 31.292683 | 91 | py |
smt | smt-master/smt/problems/water_flow_lfidelity.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>
Dr. John T. Hwang <hwangjt@umich.edu>
Water flow problem from:
Xiong, S., Qian, P. Z., & Wu, C. J. (2013). Sequential design and analysis of high-accuracy and low-accuracy computer codes. Technometrics, 55(1), 37-46.
"""
import numpy as np
from scipy.misc import derivative
from smt.problems.problem import Problem
class WaterFlowLFidelity(Problem):
def _initialize(self):
self.options.declare("name", "WaterFlowLFidelity", types=str)
self.options.declare("use_FD", False, types=bool)
self.options["ndim"] = 8
def _setup(self):
assert self.options["ndim"] == 8, "ndim must be 8"
self.xlimits[:, 0] = [0.05, 100, 63070, 990, 63.1, 700, 1120, 9855]
self.xlimits[:, 1] = [0.15, 50000, 115600, 1110, 116, 820, 1680, 12045]
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
y = np.zeros((ne, 1), complex)
def partial_derivative(function, var=0, point=[]):
args = point[:]
def wraps(x):
args[var] = x
return func(*args)
return derivative(wraps, point[var], dx=1e-6)
def func(x0, x1, x2, x3, x4, x5, x6, x7):
return (
5
* x2
* (x3 - x5)
/ (
np.log(x1 / x0)
* (1.5 + 2 * x6 * x2 / (np.log(x1 / x0) * x0**2 * x7) + x2 / x4)
)
)
for i in range(ne):
x0 = x[i, 0]
x1 = x[i, 1]
x2 = x[i, 2]
x3 = x[i, 3]
x4 = x[i, 4]
x5 = x[i, 5]
x6 = x[i, 6]
x7 = x[i, 7]
if kx is None:
y[i, 0] = func(x0, x1, x2, x3, x4, x5, x6, x7)
else:
point = [x0, x1, x2, x3, x4, x5, x6, x7]
if self.options["use_FD"]:
point = np.real(np.array(point))
y[i, 0] = partial_derivative(func, var=kx, point=point)
else:
ch = 1e-20
point[kx] += complex(0, ch)
y[i, 0] = np.imag(func(*point)) / ch
point[kx] -= complex(0, ch)
return y
| 2,718 | 29.897727 | 153 | py |
smt | smt-master/smt/problems/torsion_vibration.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>
Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Torsion vibration problem from:
Liu, H., Xu, S., & Wang, X. Sampling strategies and metamodeling techniques for engineering design: comparison and application. In ASME Turbo Expo 2016: Turbomachinery Technical Conference and Exposition. American Society of Mechanical Engineers. June, 2016.
Wang, L., Beeson, D., Wiggs, G., and Rayasam, M. A Comparison of Metamodeling Methods Using Practical Industry Requirements. In Proceedings of the 47th AIAA/ASME/ASCE/AHS/ASC structures, structural dynamics, and materials conference, Newport, RI, pp. AIAA 2006-1811.
"""
import numpy as np
from scipy.misc import derivative
from smt.problems.problem import Problem
class TorsionVibration(Problem):
def _initialize(self):
self.options.declare("name", "TorsionVibration", types=str)
self.options.declare("use_FD", False, types=bool)
self.options["ndim"] = 15
def _setup(self):
assert self.options["ndim"] == 15, "ndim must be 15"
self.xlimits[:, 0] = [
1.8,
9,
10530000,
7.2,
3510000,
10.8,
1.6425,
10.8,
5580000,
2.025,
2.7,
0.252,
12.6,
3.6,
0.09,
]
self.xlimits[:, 1] = [
2.2,
11,
12870000,
8.8,
4290000,
13.2,
2.0075,
13.2,
6820000,
2.475,
3.3,
0.308,
15.4,
4.4,
0.11,
]
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
y = np.zeros((ne, 1), complex)
def partial_derivative(function, var=0, point=[]):
args = point[:]
def wraps(x):
args[var] = x
return func(*args)
return derivative(wraps, point[var], dx=1e-6)
def func(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14):
K1 = np.pi * x2 * x0 / (32 * x1)
K2 = np.pi * x8 * x6 / (32 * x7)
K3 = np.pi * x4 * x9 / (32 * x3)
M1 = x11 * np.pi * x10 * x5 / (4 * 9.80665)
M2 = x14 * np.pi * x13 * x12 / (4 * 9.80665)
J1 = 0.5 * M1 * (x5 / 2) ** 2
J2 = 0.5 * M2 * (x12 / 2) ** 2
a = 1
b = -((K1 + K2) / J1 + (K2 + K3) / J2)
c = (K1 * K2 + K2 * K3 + K3 * K1) / (J1 * J2)
return np.sqrt((-b - np.sqrt(b**2 - 4 * a * c)) / (2 * a)) / (2 * np.pi)
for i in range(ne):
x0 = x[i, 0]
x1 = x[i, 1]
x2 = x[i, 2]
x3 = x[i, 3]
x4 = x[i, 4]
x5 = x[i, 5]
x6 = x[i, 6]
x7 = x[i, 7]
x8 = x[i, 8]
x9 = x[i, 9]
x10 = x[i, 10]
x11 = x[i, 11]
x12 = x[i, 12]
x13 = x[i, 13]
x14 = x[i, 14]
if kx is None:
y[i, 0] = func(
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14
)
else:
point = [
x0,
x1,
x2,
x3,
x4,
x5,
x6,
x7,
x8,
x9,
x10,
x11,
x12,
x13,
x14,
]
if self.options["use_FD"]:
point = np.real(np.array(point))
y[i, 0] = partial_derivative(func, var=kx, point=point)
else:
ch = 1e-20
point[kx] += complex(0, ch)
y[i, 0] = np.imag(func(*point)) / ch
point[kx] -= complex(0, ch)
return y
| 4,554 | 29.366667 | 266 | py |
smt | smt-master/smt/problems/__init__.py | from .branin import Branin
from .cantilever_beam import CantileverBeam
from .sphere import Sphere
from .reduced_problem import ReducedProblem
from .robot_arm import RobotArm
from .rosenbrock import Rosenbrock
from .tensor_product import TensorProduct
from .torsion_vibration import TorsionVibration
from .water_flow import WaterFlow
from .water_flow_lfidelity import WaterFlowLFidelity
from .welded_beam import WeldedBeam
from .wing_weight import WingWeight
from .ndim_cantilever_beam import NdimCantileverBeam
from .mixed_cantilever_beam import MixedCantileverBeam
from .neural_network import HierarchicalNeuralNetwork
from .hierarchical_goldstein import HierarchicalGoldstein
from .ndim_robot_arm import NdimRobotArm
from .ndim_rosenbrock import NdimRosenbrock
from .ndim_step_function import NdimStepFunction
from .lp_norm import LpNorm
| 840 | 39.047619 | 57 | py |
smt | smt-master/smt/problems/cantilever_beam.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Cantilever beam problem from:
Liu, H., Xu, S., & Wang, X. Sampling strategies and metamodeling techniques for engineering design: comparison and application. In ASME Turbo Expo 2016: Turbomachinery Technical Conference and Exposition. American Society of Mechanical Engineers. June, 2016.
Cheng, G. H., Younis, A., Hajikolaei, K. H., and Wang, G. G. Trust Region Based Mode Pursuing Sampling Method for Global Optimization of High Dimensional Design Problems. Journal of Mechanical Design, 137(2). 2015.
"""
import numpy as np
from smt.problems.problem import Problem
class CantileverBeam(Problem):
def _initialize(self):
self.options.declare("name", "CantileverBeam", types=str)
self.options.declare("ndim", 3, types=int)
self.options.declare("P", 50e3, types=(int, float), desc="Tip load (50 kN)")
self.options.declare(
"E", 200e9, types=(int, float), desc="Modulus of elast. (200 GPa)"
)
def _setup(self):
assert self.options["ndim"] % 3 == 0, "ndim must be divisible by 3"
# Width b
self.xlimits[0::3, 0] = 0.01
self.xlimits[0::3, 1] = 0.05
# Height h
self.xlimits[1::3, 0] = 0.30
self.xlimits[1::3, 1] = 0.65
# Length l
self.xlimits[2::3, 0] = 0.5
self.xlimits[2::3, 1] = 1.0
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
nelem = int(self.options["ndim"] / 3)
P = self.options["P"]
E = self.options["E"]
y = np.zeros((ne, 1), complex)
if kx is None:
for ielem in range(nelem):
b = x[:, 3 * ielem + 0]
h = x[:, 3 * ielem + 1]
y[:, 0] += (
12.0 / b / h**3 * np.sum(x[:, 2 + 3 * ielem :: 3], axis=1) ** 3
)
y[:, 0] -= (
12.0 / b / h**3 * np.sum(x[:, 5 + 3 * ielem :: 3], axis=1) ** 3
)
else:
kelem = int(np.floor(kx / 3))
if kx % 3 == 0:
b = x[:, 3 * kelem + 0]
h = x[:, 3 * kelem + 1]
y[:, 0] += (
-12.0
/ b**2
/ h**3
* np.sum(x[:, 2 + 3 * kelem :: 3], axis=1) ** 3
)
y[:, 0] -= (
-12.0
/ b**2
/ h**3
* np.sum(x[:, 5 + 3 * kelem :: 3], axis=1) ** 3
)
elif kx % 3 == 1:
b = x[:, 3 * kelem + 0]
h = x[:, 3 * kelem + 1]
y[:, 0] += (
-36.0 / b / h**4 * np.sum(x[:, 2 + 3 * kelem :: 3], axis=1) ** 3
)
y[:, 0] -= (
-36.0 / b / h**4 * np.sum(x[:, 5 + 3 * kelem :: 3], axis=1) ** 3
)
elif kx % 3 == 2:
for ielem in range(kelem + 1):
b = x[:, 3 * ielem + 0]
h = x[:, 3 * ielem + 1]
y[:, 0] += (
36.0
/ b
/ h**3
* np.sum(x[:, 2 + 3 * ielem :: 3], axis=1) ** 2
)
if kelem > ielem:
y[:, 0] -= (
36.0
/ b
/ h**3
* np.sum(x[:, 5 + 3 * ielem :: 3], axis=1) ** 2
)
return (P / 3 / E) * y
| 4,082 | 33.601695 | 258 | py |
smt | smt-master/smt/problems/sphere.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>
Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Sphere function.
"""
import numpy as np
from smt.problems.problem import Problem
class Sphere(Problem):
def _initialize(self):
self.options.declare("name", "Sphere", types=str)
def _setup(self):
self.xlimits[:, 0] = -10.0
self.xlimits[:, 1] = 10.0
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
y = np.zeros((ne, 1), complex)
if kx is None:
y[:, 0] = np.sum(x**2, 1).T
else:
y[:, 0] = 2 * x[:, kx]
return y
| 1,101 | 22.956522 | 77 | py |
smt | smt-master/smt/problems/welded_beam.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>
Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Welded beam problem from:
Liu, H., Xu, S., & Wang, X. Sampling strategies and metamodeling techniques for engineering design: comparison and application. In ASME Turbo Expo 2016: Turbomachinery Technical Conference and Exposition. American Society of Mechanical Engineers. June, 2016.
Deb, K. An Efficient Constraint Handling Method for Genetic Algorithms. Computer methods in applied mechanics and engineering, 186(2), pp. 311-338. 2000.
"""
import numpy as np
from scipy.misc import derivative
from smt.problems.problem import Problem
class WeldedBeam(Problem):
def _initialize(self):
self.options.declare("name", "WeldedBeam", types=str)
self.options.declare("use_FD", False, types=bool)
self.options["ndim"] = 3
def _setup(self):
assert self.options["ndim"] == 3, "ndim must be 3" # t, h, l
self.xlimits[:, 0] = [5, 0.125, 5]
self.xlimits[:, 1] = [10, 1, 10]
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, nx]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
y = np.zeros((ne, 1), complex)
def partial_derivative(function, var=0, point=[]):
args = point[:]
def wraps(x):
args[var] = x
return func(*args)
return derivative(wraps, point[var], dx=1e-6)
def func(x0, x1, x2):
tau1 = 6000 / (np.sqrt(2) * x1 * x2)
tau2 = (
6000
* (14 + 0.5 * x2)
* np.sqrt(0.25 * (x2**2 + (x1 + x0) ** 2))
/ (2 * (0.707 * x1 * x2 * (x2 / 12.0 + 0.25 * (x1 + x0) ** 2)))
)
return np.sqrt(
tau1**2
+ tau2**2
+ x2 * tau1 * tau2 / np.sqrt(0.25 * (x2**2 + (x1 + x0) ** 2))
)
for i in range(ne):
x0 = x[i, 0]
x1 = x[i, 1]
x2 = x[i, 2]
if kx is None:
y[i, 0] = func(x0, x1, x2)
else:
point = [x0, x1, x2]
if self.options["use_FD"]:
point = np.real(np.array(point))
y[i, 0] = partial_derivative(func, var=kx, point=point)
else:
ch = 1e-20
point[kx] += complex(0, ch)
y[i, 0] = np.imag(func(*point)) / ch
point[kx] -= complex(0, ch)
return y
| 2,951 | 33.325581 | 258 | py |
smt | smt-master/smt/problems/lp_norm.py | """
Author: Remi Lafage <remi.lafage@onera.fr>
This package is distributed under New BSD license.
Norm function.
"""
import numpy as np
from smt.problems.problem import Problem
class LpNorm(Problem):
def _initialize(self, ndim=1):
self.options.declare("order", default=2, types=int)
self.options.declare("name", "LpNorm", types=str)
def _setup(self):
self.xlimits[:, 0] = -1.0
self.xlimits[:, 1] = 1.0
def _evaluate(self, x, kx):
"""
Arguments
---------
x : ndarray[ne, ndim]
Evaluation points.
kx : int or None
Index of derivative (0-based) to return values with respect to.
None means return function value rather than derivative.
Returns
-------
ndarray[ne, 1]
Functions values if kx=None or derivative values if kx is an int.
"""
ne, nx = x.shape
p = self.options["order"]
assert p > 0
y = np.zeros((ne, 1), complex)
lp_norm = np.sum(np.abs(x) ** p, axis=-1) ** (1.0 / p)
if kx is None:
y[:, 0] = lp_norm
else:
norm_p = np.linalg.norm(x, ord=p)
y[:, 0] = np.sign(x[:, kx]) * (np.absolute(x[:, kx]) / lp_norm) ** (p - 1)
return y
| 1,308 | 24.666667 | 86 | py |
smt | smt-master/smt/problems/ndim_step_function.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
N-dimensional step function problem.
"""
import numpy as np
from smt.utils.options_dictionary import OptionsDictionary
from smt.problems.problem import Problem
from smt.problems.tensor_product import TensorProduct
class NdimStepFunction(Problem):
def __init__(self, ndim=1, width=10.0):
super().__init__()
self.problem = TensorProduct(ndim=ndim, func="tanh", width=width)
self.options = OptionsDictionary()
self.options.declare("ndim", ndim, types=int)
self.options.declare("return_complex", False, types=bool)
self.options.declare("name", "NdimStepFunction", types=str)
self.xlimits = self.problem.xlimits
def _evaluate(self, x, kx):
return self.problem._evaluate(x, kx)
| 853 | 28.448276 | 73 | py |
smt | smt-master/smt/problems/ndim_cantilever_beam.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
N-dimensional cantilever beam problem.
"""
import numpy as np
from smt.utils.options_dictionary import OptionsDictionary
from smt.problems.problem import Problem
from smt.problems.reduced_problem import ReducedProblem
from smt.problems.cantilever_beam import CantileverBeam
class NdimCantileverBeam(Problem):
def __init__(self, ndim=1, w=0.2):
super().__init__()
self.problem = ReducedProblem(
CantileverBeam(ndim=3 * ndim), np.arange(1, 3 * ndim, 3), w=w
)
self.options = OptionsDictionary()
self.options.declare("ndim", ndim, types=int)
self.options.declare("return_complex", False, types=bool)
self.options.declare("name", "NdimCantileverBeam", types=str)
self.xlimits = self.problem.xlimits
def _evaluate(self, x, kx):
return self.problem._evaluate(x, kx)
| 961 | 29.0625 | 73 | py |
smt | smt-master/smt/problems/ndim_robot_arm.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
N-dimensional robot arm problem.
"""
import numpy as np
from smt.utils.options_dictionary import OptionsDictionary
from smt.problems.problem import Problem
from smt.problems.reduced_problem import ReducedProblem
from smt.problems.robot_arm import RobotArm
class NdimRobotArm(Problem):
def __init__(self, ndim=1, w=0.2):
super().__init__()
self.problem = ReducedProblem(
RobotArm(ndim=2 * (ndim + 1)), np.arange(3, 2 * (ndim + 1), 2), w=w
)
self.options = OptionsDictionary()
self.options.declare("ndim", ndim, types=int)
self.options.declare("return_complex", False, types=bool)
self.options.declare("name", "NdimRobotArm", types=str)
self.xlimits = self.problem.xlimits
def _evaluate(self, x, kx):
return self.problem._evaluate(x, kx)
| 937 | 28.3125 | 79 | py |
smt | smt-master/smt/problems/tests/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/problems/tests/test_problem_examples.py | import unittest
import matplotlib
import matplotlib.pyplot
matplotlib.use("Agg")
matplotlib.pyplot.switch_backend("Agg")
class Test(unittest.TestCase):
def test_cantilever_beam(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import CantileverBeam
ndim = 3
problem = CantileverBeam(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(0.01, 0.05, num)
x[:, 1] = 0.5
x[:, 2] = 0.5
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_mixed_cantilever_beam(self):
import matplotlib.pyplot as plt
from smt.problems import MixedCantileverBeam
problem = MixedCantileverBeam()
n_doe = 100
xdoe = problem.sample(n_doe)
y = problem(xdoe)
plt.scatter(xdoe[:, 0], y)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_hier_neural_network(self):
import matplotlib.pyplot as plt
from smt.problems import HierarchicalNeuralNetwork
problem = HierarchicalNeuralNetwork()
n_doe = 100
xdoe, x_is_acting = problem.design_space.sample_valid_x(
n_doe
) # If acting information is needed
# xdoe = problem.sample(n_doe) # Also possible
y = problem(xdoe)
plt.scatter(xdoe[:, 0], y)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_robot_arm(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import RobotArm
ndim = 2
problem = RobotArm(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(0.0, 1.0, num)
x[:, 1] = np.pi
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_rosenbrock(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import Rosenbrock
ndim = 2
problem = Rosenbrock(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-2, 2.0, num)
x[:, 1] = 0.0
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_sphere(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import Sphere
ndim = 2
problem = Sphere(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-10, 10.0, num)
x[:, 1] = 0.0
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_branin(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import Branin
ndim = 2
problem = Branin(ndim=ndim)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-5.0, 10.0, num)
x[:, 1] = np.linspace(0.0, 15.0, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_lp_norm(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import LpNorm
ndim = 2
problem = LpNorm(ndim=ndim, order=2)
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-1.0, 1.0, num)
x[:, 1] = np.linspace(-1.0, 1.0, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_tensor_product(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import TensorProduct
ndim = 2
problem = TensorProduct(ndim=ndim, func="cos")
num = 100
x = np.ones((num, ndim))
x[:, 0] = np.linspace(-1, 1.0, num)
x[:, 1] = 0.0
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_torsion_vibration(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import TorsionVibration
ndim = 15
problem = TorsionVibration(ndim=ndim)
num = 100
x = np.ones((num, ndim))
for i in range(ndim):
x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1])
x[:, 0] = np.linspace(1.8, 2.2, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_water_flow(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import WaterFlow
ndim = 8
problem = WaterFlow(ndim=ndim)
num = 100
x = np.ones((num, ndim))
for i in range(ndim):
x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1])
x[:, 0] = np.linspace(0.05, 0.15, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_welded_beam(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import WeldedBeam
ndim = 3
problem = WeldedBeam(ndim=ndim)
num = 100
x = np.ones((num, ndim))
for i in range(ndim):
x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1])
x[:, 0] = np.linspace(5.0, 10.0, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_wing_weight(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.problems import WingWeight
ndim = 10
problem = WingWeight(ndim=ndim)
num = 100
x = np.ones((num, ndim))
for i in range(ndim):
x[:, i] = 0.5 * (problem.xlimits[i, 0] + problem.xlimits[i, 1])
x[:, 0] = np.linspace(150.0, 200.0, num)
y = problem(x)
yd = np.empty((num, ndim))
for i in range(ndim):
yd[:, i] = problem(x, kx=i).flatten()
print(y.shape)
print(yd.shape)
plt.plot(x[:, 0], y[:, 0])
plt.xlabel("x")
plt.ylabel("y")
plt.show()
if __name__ == "__main__":
unittest.main()
| 8,254 | 22.653295 | 75 | py |
smt | smt-master/smt/utils/sm_test_case.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import unittest
class SMTestCase(unittest.TestCase):
def assert_error(self, computed, desired, atol=1e-15, rtol=1e-15):
"""
Check relative error of a scalar or array.
Parameters
----------
computed : float or ndarray
Computed value; should be the same type and shape as desired.
desired : float or ndarray
Desired value; should be the same type and shape as computed.
atol : float
Acceptable absolute error. Default is 1e-15.
rtol : float
Acceptable relative error. Default is 1e-15.
"""
abs_error = np.linalg.norm(computed - desired)
if np.linalg.norm(desired) > 0:
rel_error = abs_error / np.linalg.norm(desired)
else:
rel_error = abs_error
if abs_error > atol and rel_error > rtol:
self.fail(
"computed %s, desired %s, abs error %s, rel error %s, atol %s, rtol %s"
% (
np.linalg.norm(computed),
np.linalg.norm(desired),
abs_error,
rel_error,
atol,
rtol,
)
)
| 1,370 | 30.159091 | 87 | py |
smt | smt-master/smt/utils/silence.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import os
import sys
class Silence(object):
def __enter__(self):
sys.stdout = open(os.devnull, "w")
def __exit__(self, *args):
sys.stdout.close()
sys.stdout = sys.__stdout__
return False
class Silence2:
"""Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
>>> import sys, numpy.f2py
>>> # build a test fortran extension module with F2PY
...
>>> with open('hellofortran.f', 'w') as f:
... f.write('''\
... integer function foo (n)
... integer n
... print *, "Hello from Fortran!"
... print *, "n = ", n
... foo = n
... end
... ''')
...
>>> sys.argv = ['f2py', '-c', '-m', 'hellofortran', 'hellofortran.f']
>>> with Silence():
... # assuming this succeeds, since output is suppressed
... numpy.f2py.main()
...
>>> import hellofortran
>>> foo = hellofortran.foo(1)
Hello from Fortran!
n = 1
>>> print "Before silence"
Before silence
>>> with Silence(stdout='output.txt', mode='w'):
... print "Hello from Python!"
... bar = hellofortran.foo(2)
... with Silence():
... print "This will fall on deaf ears"
... baz = hellofortran.foo(3)
... print "Goodbye from Python!"
...
...
>>> print "After silence"
After silence
>>> # ... do some other stuff ...
...
>>> with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
...
...
>>> # check the redirected output
...
>>> with open('output.txt', 'r') as f:
... print "=== contents of 'output.txt' ==="
... print f.read()
... print "================================"
...
=== contents of 'output.txt' ===
Hello from Python!
Hello from Fortran!
n = 2
Goodbye from Python!
Hello from stderr
================================
>>> foo, bar, baz
(1, 2, 3)
>>>
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode="wb"):
self.outfiles = stdout, stderr
self.combine = stdout == stderr
self.mode = mode
def __enter__(self):
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams:
s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ["w"] * 2, [0] * 2)
else:
null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
sys = self.sys
# flush any pending output
for s in self.saved_streams:
s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams:
s.close()
return False
## end of http://code.activestate.com/recipes/577564/ }}}
| 3,955 | 28.744361 | 80 | py |
smt | smt-master/smt/utils/checks.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
def ensure_2d_array(array, name):
if not isinstance(array, np.ndarray):
raise ValueError("{} must be a NumPy array".format(name))
array = np.atleast_2d(array.T).T
if len(array.shape) != 2:
raise ValueError("{} must have a rank of 1 or 2".format(name))
return array
def check_support(sm, name, fail=False):
if not sm.supports[name] or fail:
class_name = sm.__class__.__name__
raise NotImplementedError("{} does not support {}".format(class_name, name))
def check_nx(nx, x):
if x.shape[1] != nx:
if nx == 1:
raise ValueError("x should have shape [:, 1] or [:]")
else:
raise ValueError(
"x should have shape [:, {}] and not {}".format(nx, x.shape)
)
| 914 | 24.416667 | 84 | py |
smt | smt-master/smt/utils/printer.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import time
import contextlib
class Printer(object):
"""
Tool for formatting printing and measuring wall times.
Attributes
----------
active : bool
If False, the printer is in a state in which printing is suppressed.
depth : int
Current level of nesting of the code, affecting the degree of indentation of prints.
max_print_depth : int
Maximum depth to print.
times : dict
Recorded wall times for operations.
"""
def __init__(self):
self.active = False
self.depth = 1
self.max_print_depth = 100
self.times = {}
def _time(self, key):
"""
Get the recorded wall time for operation given by key.
Arguments
---------
key : str
Unique name of the operation that was previously timed.
Returns
-------
float
Measured wall time.
"""
return self.times[key]
def __call__(self, string="", noindent=False):
"""
Print the given string.
Arguments
---------
string : str
String to print.
noindent : bool
If True, suppress any indentation; otherwise, indent based on depth.
"""
if self.active and self.depth <= self.max_print_depth:
if noindent:
print(string)
else:
print(" " * self.depth + string)
def _center(self, string):
"""
Print string centered based on a line width of 75 characters.
Arguments
---------
string : str
String to print.
"""
pre = " " * int((75 - len(string)) / 2.0)
self(pre + "%s" % string, noindent=True)
def _line_break(self):
"""
Print a line with a width of 75 characters.
"""
self("_" * 75, noindent=True)
self()
def _title(self, title):
"""
Print a title preceded by a line break.
Arguments
---------
title : str
String to print.
"""
self._line_break()
self(" " + title, noindent=True)
self()
@contextlib.contextmanager
def _timed_context(self, string=None, key=None):
"""
Context manager for an operation.
This context manager does 3 things:
1. Measures the wall time for the operation.
2. Increases the depth during the operation so that prints are indented.
3. Optionally prints a pre-operation and post-operation messages including the time.
Arguments
---------
string : str or None
String to print before/after operation if not None.
key : str
Name for this operation allowing the measured time to be read later if given.
"""
if string is not None:
self(string + " ...")
start_time = time.time()
self.depth += 1
yield
self.depth -= 1
stop_time = time.time()
if string is not None:
self(string + " - done. Time (sec): %10.7f" % (stop_time - start_time))
if key is not None:
if key not in self.times:
self.times[key] = [stop_time - start_time]
else:
self.times[key].append(stop_time - start_time)
| 3,477 | 25.348485 | 92 | py |
smt | smt-master/smt/utils/misc.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import sys
import numpy as np
from bisect import bisect_left
def standardization(X, y):
"""
We substract the mean from each variable. Then, we divide the values of each
variable by its standard deviation. If scale_X_to_unit, we scale the input
space X to the unit hypercube [0,1]^dim with dim the input dimension.
Parameters
----------
X: np.ndarray [n_obs, dim]
- The input variables.
y: np.ndarray [n_obs, 1]
- The output variable.
Returns
-------
X: np.ndarray [n_obs, dim]
The standardized input matrix.
y: np.ndarray [n_obs, 1]
The standardized output vector.
X_offset: list(dim)
The mean (or the min if scale_X_to_unit=True) of each input variable.
y_mean: list(1)
The mean of the output variable.
X_scale: list(dim)
The standard deviation of each input variable.
y_std: list(1)
The standard deviation of the output variable.
"""
X_offset = np.mean(X, axis=0)
X_scale = X.std(axis=0, ddof=1)
X_scale[np.abs(X_scale) < (100.0 * sys.float_info.epsilon)] = 1.0
y_mean = np.mean(y, axis=0)
y_std = y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
# scale X and y
X = (X - X_offset) / X_scale
y = (y - y_mean) / y_std
return X, y, X_offset, y_mean, X_scale, y_std
def compute_rms_error(sm, xe=None, ye=None, kx=None):
"""
Returns a normalized RMS error of the training points or the given points.
Arguments
---------
sm : Surrogate
Surrogate model instance.
xe : np.ndarray[ne, dim] or None
Input values. If None, the input values at the training points are used instead.
ye : np.ndarray[ne, 1] or None
Output / deriv. values. If None, the training pt. outputs / derivs. are used.
kx : int or None
If None, we are checking the output values.
If int, we are checking the derivs. w.r.t. the kx^{th} input variable (0-based).
"""
if xe is not None and ye is not None:
ye = ye.reshape((xe.shape[0], 1))
if kx == None:
ye2 = sm.predict_values(xe)
else:
ye2 = sm.predict_derivatives(xe, kx)
return np.linalg.norm(ye2 - ye) / np.linalg.norm(ye)
elif xe is None and ye is None:
num = 0.0
den = 0.0
if kx is None:
kx2 = 0
else:
kx2 += 1
if kx2 not in sm.training_points[None]:
raise ValueError(
"There is no training point data available for kx %s" % kx2
)
xt, yt = sm.training_points[None][kx2]
if kx == None:
yt2 = sm.predict_values(xt)
else:
yt2 = sm.predict_derivatives(xt, kx)
num = np.linalg.norm(yt2 - yt)
den = np.linalg.norm(yt)
return num / den
def take_closest_number(myList, myNumber):
"""
Assumes myList is sorted. Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
"""
pos = bisect_left(myList, myNumber)
if pos == 0:
return myList[0]
if pos == len(myList):
return myList[-1]
before = myList[pos - 1]
after = myList[pos]
if after - myNumber < myNumber - before:
return after
else:
return before
def take_closest_in_list(myList, x):
vfunc = np.vectorize(take_closest_number, excluded=["myList"])
return vfunc(myList=myList, myNumber=x)
| 3,620 | 26.641221 | 88 | py |
smt | smt-master/smt/utils/linear_solvers.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import scipy.sparse.linalg
import scipy.linalg
import contextlib
from smt.utils.options_dictionary import OptionsDictionary
VALID_SOLVERS = (
"krylov-dense",
"dense-lu",
"dense-chol",
"lu",
"ilu",
"krylov",
"krylov-lu",
"krylov-mg",
"gs",
"jacobi",
"mg",
"null",
)
def get_solver(solver):
if solver == "dense-lu":
return DenseLUSolver()
elif solver == "dense-chol":
return DenseCholeskySolver()
elif solver == "krylov-dense":
return KrylovSolver(pc="dense")
elif solver == "lu" or solver == "ilu":
return DirectSolver(alg=solver)
elif solver == "krylov":
return KrylovSolver()
elif solver == "krylov-lu":
return KrylovSolver(pc="lu")
elif solver == "krylov-mg":
return KrylovSolver(pc="mg")
elif solver == "gs" or solver == "jacobi":
return StationarySolver(solver=solver)
elif solver == "mg":
return MultigridSolver()
elif isinstance(solver, LinearSolver):
return solver
elif solver == "null":
return NullSolver()
elif solver == None:
return None
class Callback(object):
def __init__(self, size, string, interval, printer):
self.size = size
self.string = string
self.interval = interval
self.printer = printer
self.counter = 0
self.ind_y = 0
self.mtx = None
self.rhs = None
self.norm0 = 1.0
def _print_norm(self, norm):
if self.counter == 0:
self.norm0 = norm
if self.counter % self.interval == 0:
self.printer(
"%s (%i x %i mtx), output %-3i : %3i %15.9e %15.9e"
% (
self.string,
self.size,
self.size,
self.ind_y,
self.counter,
norm,
norm / self.norm0,
)
)
self.counter += 1
def _print_res(self, res):
self._print_norm(res)
def _print_sol(self, sol):
res = self.mtx.dot(sol) - self.rhs
norm = np.linalg.norm(res)
self._print_norm(norm)
class LinearSolver(object):
def __init__(self, **kwargs):
self.mtx = None
self.rhs = None
self.options = OptionsDictionary()
self.options.declare("print_init", True, types=bool)
self.options.declare("print_solve", True, types=bool)
self._initialize()
self.options.update(kwargs)
def _initialize(self):
pass
def _setup(self, mtx, printer, mg_matrices=[]):
pass
def _solve(self, rhs, sol=None, ind_y=0):
pass
def _clone(self):
clone = self.__class__()
clone.options.update(clone.options._dict)
return clone
@contextlib.contextmanager
def _active(self, active):
orig_active = self.printer.active
self.printer.active = self.printer.active and active
yield self.printer
self.printer.active = orig_active
class NullSolver(LinearSolver):
def solve(self, rhs, sol=None, ind_y=0):
pass
class DenseCholeskySolver(LinearSolver):
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx.A
assert isinstance(self.mtx, np.ndarray), "mtx is of type %s" % type(mtx)
with printer._timed_context(
"Performing Chol. fact. (%i x %i mtx)" % mtx.shape
):
self.upper = scipy.linalg.cholesky(self.mtx)
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
with printer._timed_context("Back solving (%i x %i mtx)" % self.mtx.shape):
sol[:] = rhs
scipy.linalg.solve_triangular(
self.upper, sol, overwrite_b=True, trans="T"
)
scipy.linalg.solve_triangular(self.upper, sol, overwrite_b=True)
return sol
class DenseLUSolver(LinearSolver):
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx
assert isinstance(mtx, np.ndarray), "mtx is of type %s" % type(mtx)
with printer._timed_context(
"Performing LU fact. (%i x %i mtx)" % mtx.shape
):
self.fact = scipy.linalg.lu_factor(mtx)
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
with printer._timed_context("Back solving (%i x %i mtx)" % self.mtx.shape):
sol[:] = scipy.linalg.lu_solve(self.fact, rhs)
return sol
class DirectSolver(LinearSolver):
def _initialize(self):
self.options.declare("alg", "lu", values=["lu", "ilu"])
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx
assert isinstance(mtx, scipy.sparse.spmatrix), "mtx is of type %s" % type(
mtx
)
with printer._timed_context(
"Performing %s fact. (%i x %i mtx)"
% ((self.options["alg"],) + mtx.shape)
):
if self.options["alg"] == "lu":
self.fact = scipy.sparse.linalg.splu(mtx)
elif self.options["alg"] == "ilu":
self.fact = scipy.sparse.linalg.spilu(
mtx, drop_rule="interp", drop_tol=1e-3, fill_factor=2
)
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
with printer._timed_context("Back solving (%i x %i mtx)" % self.mtx.shape):
sol[:] = self.fact.solve(rhs)
return sol
class KrylovSolver(LinearSolver):
def _initialize(self):
self.options.declare("interval", 10, types=int)
self.options.declare("solver", "cg", values=["cg", "bicgstab", "gmres"])
self.options.declare(
"pc",
None,
values=[None, "ilu", "lu", "gs", "jacobi", "mg", "dense"],
types=LinearSolver,
)
self.options.declare("ilimit", 100, types=int)
self.options.declare("atol", 1e-15, types=(int, float))
self.options.declare("rtol", 1e-15, types=(int, float))
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx
pc_solver = get_solver(self.options["pc"])
if pc_solver is not None:
pc_solver._setup(mtx, printer, mg_matrices=mg_matrices)
self.pc_solver = pc_solver
self.pc_op = scipy.sparse.linalg.LinearOperator(
mtx.shape, matvec=pc_solver._solve
)
else:
self.pc_solver = None
self.pc_op = None
self.callback = Callback(
mtx.shape[0], "Krylov solver", self.options["interval"], printer
)
if self.options["solver"] == "cg":
self.solver = scipy.sparse.linalg.cg
self.callback_func = self.callback._print_sol
self.solver_kwargs = {
"atol": "legacy",
"tol": self.options["atol"],
"maxiter": self.options["ilimit"],
}
elif self.options["solver"] == "bicgstab":
self.solver = scipy.sparse.linalg.bicgstab
self.callback_func = self.callback._print_sol
self.solver_kwargs = {
"tol": self.options["atol"],
"maxiter": self.options["ilimit"],
}
elif self.options["solver"] == "gmres":
self.solver = scipy.sparse.linalg.gmres
self.callback_func = self.callback._print_res
self.solver_kwargs = {
"tol": self.options["atol"],
"maxiter": self.options["ilimit"],
"restart": min(self.options["ilimit"], mtx.shape[0]),
}
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
with printer._timed_context(
"Running %s Krylov solver (%i x %i mtx)"
% ((self.options["solver"],) + self.mtx.shape)
):
self.callback.counter = 0
self.callback.ind_y = ind_y
self.callback.mtx = self.mtx
self.callback.rhs = rhs
self.callback._print_sol(sol)
tmp, info = self.solver(
self.mtx,
rhs,
x0=sol,
M=self.pc_op,
callback=self.callback_func,
**self.solver_kwargs,
)
sol[:] = tmp
return sol
class StationarySolver(LinearSolver):
def _initialize(self):
self.options.declare("interval", 10, types=int)
self.options.declare("solver", "gs", values=["gs", "jacobi"])
self.options.declare("damping", 1.0, types=(int, float))
self.options.declare("ilimit", 10, types=int)
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx
self.callback = Callback(
mtx.shape[0], "Stationary solver", self.options["interval"], printer
)
with printer._timed_context(
"Initializing %s solver (%i x %i mtx)"
% ((self.options["solver"],) + self.mtx.shape)
):
if self.options["solver"] == "jacobi":
# A x = b
# x_{k+1} = x_k + w D^{-1} (b - A x_k)
self.d_inv = self.options["damping"] / self._split_mtx_diag()
self.iterate = self._jacobi
elif self.options["solver"] == "gs":
# A x = b
# x_{k+1} = x_k + (1/w D + L)^{-1} (b - A x_k)
mtx_d = self._split_mtx("diag")
mtx_l = self._split_mtx("lower")
mtx_ldw = mtx_l + mtx_d / self.options["damping"]
self.inv = scipy.sparse.linalg.splu(mtx_ldw)
self.iterate = self._gs
def _split_mtx_diag(self):
shape = self.mtx.shape
rows, cols, data = scipy.sparse.find(self.mtx)
mask_d = rows == cols
diag = np.zeros(shape[0])
np.add.at(diag, rows[mask_d], data[mask_d])
return diag
def _split_mtx(self, part):
shape = self.mtx.shape
rows, cols, data = scipy.sparse.find(self.mtx)
if part == "diag":
mask = rows == cols
elif part == "lower":
mask = rows > cols
elif part == "upper":
mask = rows < cols
return scipy.sparse.csc_matrix(
(data[mask], (rows[mask], cols[mask])), shape=shape
)
def _jacobi(self, rhs, sol):
# A x = b
# x_{k+1} = x_k + w D^{-1} (b - A x_k)
sol += self.d_inv * (rhs - self.mtx.dot(sol))
def _gs(self, rhs, sol):
# A x = b
# x_{k+1} = x_k + (1/w D + L)^{-1} (b - A x_k)
sol += self.inv.solve(rhs - self.mtx.dot(sol))
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
self.callback.counter = 0
self.callback.ind_y = ind_y
self.callback.mtx = self.mtx
self.callback.rhs = rhs
with printer._timed_context(
"Running %s stationary solver (%i x %i mtx)"
% ((self.options["solver"],) + self.mtx.shape)
):
for ind in range(self.options["ilimit"]):
self.iterate(rhs, sol)
self.callback._print_sol(sol)
return sol
class MultigridSolver(LinearSolver):
def _initialize(self):
self.options.declare("interval", 1, types=int)
self.options.declare("mg_cycles", 0, types=int)
self.options.declare(
"solver",
"null",
values=["null", "gs", "jacobi", "krylov"],
types=LinearSolver,
)
def _setup(self, mtx, printer, mg_matrices=[]):
self.printer = printer
with self._active(self.options["print_init"]) as printer:
self.mtx = mtx
solver = get_solver(self.options["solver"])
mg_solver = solver._clone()
mg_solver._setup(mtx, printer)
self.mg_mtx = [mtx]
self.mg_sol = [np.zeros(self.mtx.shape[0])]
self.mg_rhs = [np.zeros(self.mtx.shape[0])]
self.mg_ops = []
self.mg_solvers = [mg_solver]
for ind, mg_op in enumerate(mg_matrices):
mg_mtx = mg_op.T.dot(self.mg_mtx[-1]).dot(mg_op).tocsc()
mg_sol = mg_op.T.dot(self.mg_sol[-1])
mg_rhs = mg_op.T.dot(self.mg_rhs[-1])
mg_solver = solver._clone()
mg_solver._setup(mg_mtx, printer)
self.mg_mtx.append(mg_mtx)
self.mg_sol.append(mg_sol)
self.mg_rhs.append(mg_rhs)
self.mg_ops.append(mg_op)
self.mg_solvers.append(mg_solver)
mg_mtx = self.mg_mtx[-1]
mg_solver = DirectSolver()
mg_solver._setup(mg_mtx, printer)
self.mg_solvers[-1] = mg_solver
self.callback = Callback(
mtx.shape[0], "Multigrid solver", self.options["interval"], printer
)
def _restrict(self, ind_level):
mg_op = self.mg_ops[ind_level]
mtx = self.mg_mtx[ind_level]
sol = self.mg_sol[ind_level]
rhs = self.mg_rhs[ind_level]
res = rhs - mtx.dot(sol)
res_coarse = mg_op.T.dot(res)
self.mg_rhs[ind_level + 1][:] = res_coarse
def _smooth_and_restrict(self, ind_level, ind_cycle, ind_y):
mg_op = self.mg_ops[ind_level]
mtx = self.mg_mtx[ind_level]
sol = self.mg_sol[ind_level]
rhs = self.mg_rhs[ind_level]
solver = self.mg_solvers[ind_level]
solver.print_info = "MG iter %i level %i" % (ind_cycle, ind_level)
solver._solve(rhs, sol, ind_y)
res = rhs - mtx.dot(sol)
res_coarse = mg_op.T.dot(res)
self.mg_rhs[ind_level + 1][:] = res_coarse
def _coarse_solve(self, ind_cycle, ind_y):
sol = self.mg_sol[-1]
rhs = self.mg_rhs[-1]
solver = self.mg_solvers[-1]
solver.print_info = "MG iter %i level %i" % (ind_cycle, len(self.mg_ops))
solver._solve(rhs, sol, ind_y)
def _smooth_and_interpolate(self, ind_level, ind_cycle, ind_y):
mg_op = self.mg_ops[ind_level]
mtx = self.mg_mtx[ind_level]
sol = self.mg_sol[ind_level]
rhs = self.mg_rhs[ind_level]
solver = self.mg_solvers[ind_level]
solver.print_info = "MG iter %i level %i" % (ind_cycle, ind_level)
sol_coarse = self.mg_sol[ind_level + 1]
sol += mg_op.dot(sol_coarse)
solver._solve(rhs, sol, ind_y)
def _solve(self, rhs, sol=None, ind_y=0):
with self._active(self.options["print_solve"]) as printer:
self.rhs = rhs
if sol is None:
sol = np.array(rhs)
orig_sol = sol
self.callback.counter = 0
self.callback.ind_y = ind_y
self.callback.mtx = self.mtx
self.callback.rhs = rhs
self.mg_rhs[0][:] = rhs
for ind_level in range(len(self.mg_ops)):
self._restrict(ind_level)
self._coarse_solve(-1, ind_y)
for ind_level in range(len(self.mg_ops) - 1, -1, -1):
self._smooth_and_interpolate(ind_level, -1, ind_y)
for ind_cycle in range(self.options["mg_cycles"]):
for ind_level in range(len(self.mg_ops)):
self._smooth_and_restrict(ind_level, ind_cycle, ind_y)
self._coarse_solve(ind_cycle, ind_y)
for ind_level in range(len(self.mg_ops) - 1, -1, -1):
self._smooth_and_interpolate(ind_level, ind_cycle, ind_y)
orig_sol[:] = self.mg_sol[0]
return orig_sol
| 17,450 | 31.497207 | 87 | py |
smt | smt-master/smt/utils/line_search.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import scipy.sparse
VALID_LINE_SEARCHES = ("backtracking", "bracketed", "quadratic", "cubic", "null")
def get_line_search_class(line_search):
if line_search == "backtracking":
return BacktrackingLineSearch
elif line_search == "bracketed":
return BracketedLineSearch
elif line_search == "quadratic":
return QuadraticLineSearch
elif line_search == "cubic":
return CubicLineSearch
elif line_search == "null":
return NullLineSearch
class LineSearch(object):
"""
Base line search class.
"""
def __init__(self, x, dx, func, grad, u1=1.0e-4, u2=0.9):
"""
Initialize all attributes for the given problem.
Arguments
---------
x : ndarray[:]
Vector representing the current location in the n-D space.
dx : ndarray[:]
Search direction.
func : function
scalar function of x.
grad : function
vector function that yields the gradient of func.
u1 : float
Parameter in the sufficient decrease criterion to ensure non-zero decrease.
u2 : float
Parameter in the curvature criterion to ensure gradient norm decreases.
"""
self.x = x
self.dx = dx
self.func = func
self.grad = grad
self.u1 = u1
self.u2 = u2
self.phi_0 = self._phi(0.0)
self.dphi_0 = self._dphi(0.0)
def _phi(self, a):
"""
Function in terms of alpha (a).
phi(a) = func(x + a dx)
"""
return self.func(self.x + a * self.dx)
def _dphi(self, a):
"""
Derivative of phi w.r.t. alpha (a).
"""
return np.dot(self.grad(self.x + a * self.dx), self.dx)
def _func_decreased(self, a):
"""
Check sufficient decrease criterion.
"""
return self._phi(a) <= self.phi_0 + self.u1 * a * self.dphi_0
def _grad_decreased(self, a):
"""
Check curvature criterion.
"""
return np.abs(self._dphi(a)) <= np.abs(self.u2 * self.dphi_0)
class NullLineSearch(object):
"""
Base line search class.
"""
def __init__(self, x, dx, func, grad, u1=1.0e-4, u2=0.9):
"""
Initialize all attributes for the given problem.
Arguments
---------
x : ndarray[:]
Vector representing the current location in the n-D space.
dx : ndarray[:]
Search direction.
func : function
scalar function of x.
grad : function
vector function that yields the gradient of func.
u1 : float
Parameter in the sufficient decrease criterion to ensure non-zero decrease.
u2 : float
Parameter in the curvature criterion to ensure gradient norm decreases.
"""
self.x = x
self.dx = dx
def __call__(self, initial_a=1):
return self.x + initial_a * self.dx
class BacktrackingLineSearch(LineSearch):
"""
Simple backtracking line search enforcing only sufficient decrease.
"""
def __call__(self, initial_a=1.0, rho=0.5):
a = initial_a
while not self._func_decreased(a):
a *= rho
return self.x + a * self.dx
class BracketedLineSearch(LineSearch):
"""
Base class for line search algorithms enforcing the Strong Wolfe conditions.
"""
def __call__(self, initial_a=1):
a1 = 0
a2 = initial_a
p1 = self._phi(a1)
p2 = self._phi(a2)
dp1 = self._dphi(a1)
dp2 = self._dphi(a2)
for ind in range(20):
if not self._func_decreased(a2) or p2 > p1:
# We've successfully bracketed if
# 1. The function value is greater than at a=0
# 2. The function value has increased from the previous iteration
return self._zoom(a1, p1, dp1, a2, p2, dp2)
if self._grad_decreased(a2):
# At this point, the func decrease condition is satisfied,
# so if the grad decrease also is satisfied, we're done.
return self.x + a2 * self.dx
elif dp2 >= 0:
# If only the func decrease is satisfied, but the phi' is positive
# we've successfully bracketed.
return self._zoom(a2, p2, dp2, a1, p1, dp1)
else:
# Otherwise, we're lower than initial f and previous f,
# and the slope is still negative and steeper than initial.
# We can get more aggressive and increase the step.
a1 = a2
p1 = p2
dp1 = dp2
a2 = a2 * 1.5
p2 = self._phi(a2)
dp2 = self._dphi(a2)
def _zoom(self, a1, p1, dp1, a2, p2, dp2):
"""
Find a solution in the interval, [a1, a2], assuming that phi(a1) < phi(a2).
"""
while True:
a, p, dp = self._compute_minimum(a1, p1, dp1, a2, p2, dp2)
if not self._func_decreased(a) or p > p1:
# If still lower than initial f or still higher than low
# then make this the new high.
a2 = a
p2 = p
dp2 = dp
else:
if self._grad_decreased(a):
# Both conditions satisfied, so we're done.
return self.x + a * self.dx
elif dp * (a2 - a1) >= 0:
# We have a new low and the slope has the right sign.
a2 = a1
p2 = p1
dp2 = dp1
a1 = a
p1 = p
dp1 = dp
def _compute_minimum(self, a1, p1, dp1, a2, p2, dp2):
"""
Estimate the minimum as the midpoint.
"""
a = 0.5 * a1 + 0.5 * a2
p = self._phi(a)
dp = self._dphi(a)
return a, p, dp
class QuadraticLineSearch(BracketedLineSearch):
"""
Use quadratic interpolation in the zoom method.
"""
def _compute_minimum(self, a1, p1, dp1, a2, p2, dp2):
quadratic_mtx = np.zeros((3, 3))
quadratic_mtx[0, :] = [1.0, a1, a1**2]
quadratic_mtx[1, :] = [1.0, a2, a2**2]
quadratic_mtx[2, :] = [0.0, 1.0, 2 * a1]
c0, c1, c2 = np.linalg.solve(quadratic_mtx, [p1, p2, dp1])
d0 = c1
d1 = 2 * c2
a = -d0 / d1
p = self._phi(a)
dp = self._dphi(a)
return a, p, dp
class CubicLineSearch(BracketedLineSearch):
"""
Use cubic interpolation in the zoom method.
"""
def _compute_minimum(self, a1, p1, dp1, a2, p2, dp2):
cubic_mtx = np.zeros((4, 4))
cubic_mtx[0, :] = [1.0, a1, a1**2, a1**3]
cubic_mtx[1, :] = [1.0, a2, a2**2, a2**3]
cubic_mtx[2, :] = [0.0, 1.0, 2 * a1, 3 * a1**2]
cubic_mtx[3, :] = [0.0, 1.0, 2 * a2, 3 * a2**2]
c0, c1, c2, c3 = np.linalg.solve(cubic_mtx, [p1, p2, dp1, dp2])
d0 = c1
d1 = 2 * c2
d2 = 3 * c3
r1, r2 = np.roots([d2, d1, d0])
a = None
p = max(p1, p2)
if (a1 <= r1 <= a2 or a2 <= r1 <= a1) and np.isreal(r1):
px = self._phi(r1)
if px < p:
a = r1
p = px
dp = self._dphi(r1)
if (a1 <= r2 <= a2 or a2 <= r2 <= a1) and np.isreal(r2):
px = self._phi(r2)
if px < p:
a = r2
p = px
dp = self._dphi(r2)
return a, p, dp
| 7,762 | 28.743295 | 87 | py |
smt | smt-master/smt/utils/kriging.py | """
Author: Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>
This package is distributed under New BSD license.
"""
import warnings
import numpy as np
from enum import Enum
from copy import deepcopy
import os
from sklearn.cross_decomposition import PLSRegression as pls
from pyDOE2 import bbdesign
from sklearn.metrics.pairwise import check_pairwise_arrays
from smt.utils.design_space import BaseDesignSpace, CategoricalVariable
USE_NUMBA_JIT = int(os.getenv("USE_NUMBA_JIT", 0))
prange = range
if USE_NUMBA_JIT:
from numba import njit, prange
"""
Quick benchmarking with the mixed-integer hierarchical Goldstein function indicates the following:
| Scenario | No numba | Numba | Numba with caching | Speedup | Overhead |
|--------------------------|----------|---------|--------------------|---------|----------|
| HGoldstein 15 pt DoE | 1.3 sec | ~25 sec | 1.1 sec | 15% | 24 sec |
| HGoldstein 150 pt DoE | 38 sec | ~29 sec | 7.4 sec | 80% | 23 sec |
Important to note: caching is only needed once after installation of smt, so users will only
experience this overhead ONCE --> the rest of the time they use smt it will be faster than without numba!
"""
def njit_use(parallel=False):
if USE_NUMBA_JIT:
# njit: https://numba.readthedocs.io/en/stable/user/jit.html#nopython
# cache=True: https://numba.readthedocs.io/en/stable/user/jit.html#cache
# parallel=True: https://numba.readthedocs.io/en/stable/user/parallel.html
return njit(parallel=parallel, cache=True)
return lambda func: func
class MixHrcKernelType(Enum):
ARC_KERNEL = "ARC_KERNEL"
ALG_KERNEL = "ALG_KERNEL"
def cross_distances(X, y=None):
"""
Computes the nonzero componentwise cross-distances between the vectors
in X or between the vectors in X and the vectors in y.
Parameters
----------
X: np.ndarray [n_obs, dim]
- The input variables.
y: np.ndarray [n_y, dim]
- The training data.
Returns
-------
D: np.ndarray [n_obs * (n_obs - 1) / 2, dim]
- The cross-distances between the vectors in X.
ij: np.ndarray [n_obs * (n_obs - 1) / 2, 2]
- The indices i and j of the vectors in X associated to the cross-
distances in D.
"""
n_samples, n_features = X.shape
if y is None:
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int32)
D = np.zeros((n_nonzero_cross_dist, n_features))
_cross_dist_mat(n_samples, ij, X, D)
else:
n_y, n_features = y.shape
X, y = check_pairwise_arrays(X, y)
n_nonzero_cross_dist = n_samples * n_y
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int32)
D = np.zeros((n_nonzero_cross_dist, n_features))
_cross_dist_mat_y(n_nonzero_cross_dist, n_y, X, y, D, ij)
return D, ij.astype(np.int32)
@njit_use()
def _cross_dist_mat(n_samples, ij, X, D):
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = X[k] - X[(k + 1) : n_samples]
@njit_use()
def _cross_dist_mat_y(n_nonzero_cross_dist, n_y, X, y, D, ij):
for k in prange(n_nonzero_cross_dist):
xk = k // n_y
yk = k % n_y
D[k] = X[xk] - y[yk]
ij[k, 0] = xk
ij[k, 1] = yk
def cross_levels(X, ij, design_space, y=None):
"""
Returns the levels corresponding to the indices i and j of the vectors in X and the number of levels.
Parameters
----------
X: np.ndarray [n_obs, dim]
- The input variables.
y: np.ndarray [n_y, dim]
- The training data.
ij: np.ndarray [n_obs * (n_obs - 1) / 2, 2]
- The indices i and j of the vectors in X associated to the cross-
distances in D.
design_space: BaseDesignSpace
- The design space definition
Returns
-------
Lij: np.ndarray [n_obs * (n_obs - 1) / 2, 2]
- The levels corresponding to the indices i and j of the vectors in X.
n_levels: np.ndarray
- The number of levels for every categorical variable.
"""
n_levels = []
for dv in design_space.design_variables:
if isinstance(dv, CategoricalVariable):
n_levels.append(dv.n_values)
n_levels = np.array(n_levels)
n_var = n_levels.shape[0]
n, _ = ij.shape
X_cont, cat_features = compute_X_cont(X, design_space)
X_cat = X[:, cat_features]
if y is None:
Lij = _cross_levels_mat(n_var, n, X_cat, ij)
else:
Lij = _cross_levels_mat_y(n_var, n, X_cat, ij, y, cat_features)
return Lij, n_levels
@njit_use(parallel=True)
def _cross_levels_mat(n_var, n, X_cat, ij):
Lij = np.zeros((n_var, n, 2))
for k in prange(n_var):
for l in prange(n):
i, j = ij[l]
Lij[k][l][0] = X_cat[i, k]
Lij[k][l][1] = X_cat[j, k]
return Lij
@njit_use(parallel=True)
def _cross_levels_mat_y(n_var, n, X_cat, ij, y, cat_features):
Lij = np.zeros((n_var, n, 2))
y_cat = y[:, cat_features]
for k in prange(n_var):
for l in prange(n):
i, j = ij[l]
Lij[k][l][0] = X_cat[i, k]
Lij[k][l][1] = y_cat[j, k]
return Lij
def cross_levels_homo_space(X, ij, y=None):
"""
Computes the nonzero componentwise (or Hadamard) product between the vectors in X
Parameters
----------
X: np.ndarray [n_obs, dim]
- The input variables.
y: np.ndarray [n_y, dim]
- The training data.
ij: np.ndarray [n_obs * (n_obs - 1) / 2, 2]
- The indices i and j of the vectors in X associated to the cross-
distances in D.
Returns
-------
dx: np.ndarray [n_obs * (n_obs - 1) / 2,dim]
- The Hadamard product between the vectors in X.
"""
dim = np.shape(X)[1]
n, _ = ij.shape
dx = np.zeros((n, dim))
for l in range(n):
i, j = ij[l]
if y is None:
dx[l] = X[i] * X[j]
else:
dx[l] = X[i] * y[j]
return dx
def compute_X_cont(x, design_space):
"""
Gets the X_cont part of a vector x for mixed integer
Parameters
----------
x: np.ndarray [n_obs, dim]
- The input variables.
design_space : BaseDesignSpace
- The design space definition
Returns
-------
X_cont: np.ndarray [n_obs, dim_cont]
- The non categorical values of the input variables.
cat_features: np.ndarray [dim]
- Indices of the categorical input dimensions.
"""
is_cat_mask = design_space.is_cat_mask
return x[:, ~is_cat_mask], is_cat_mask
def gower_componentwise_distances(
X, x_is_acting, design_space, hierarchical_kernel, y=None, y_is_acting=None
):
"""
Computes the nonzero Gower-distances componentwise between the vectors
in X.
Parameters
----------
X: np.ndarray [n_obs, dim]
- The input variables.
x_is_acting: np.ndarray [n_obs, dim]
- is_acting matrix for the inputs
design_space : BaseDesignSpace
- The design space definition
y: np.ndarray [n_y, dim]
- The training data
y_is_acting: np.ndarray [n_y, dim]
- is_acting matrix for the training points
Returns
-------
D: np.ndarray [n_obs * (n_obs - 1) / 2, dim]
- The gower distances between the vectors in X.
ij: np.ndarray [n_obs * (n_obs - 1) / 2, 2]
- The indices i and j of the vectors in X associated to the cross-
distances in D.
X_cont: np.ndarray [n_obs, dim_cont]
- The non categorical values of the input variables.
"""
X = X.astype(np.float64)
Xt = X
X_cont, cat_features = compute_X_cont(Xt, design_space)
is_decreed = design_space.is_conditionally_acting
# function checks
if y is None:
Y = X
y_is_acting = x_is_acting
else:
Y = y
if y_is_acting is None:
raise ValueError(f"Expected y_is_acting because y is given")
if not isinstance(X, np.ndarray):
if not np.array_equal(X.columns, Y.columns):
raise TypeError("X and Y must have same columns!")
else:
if not X.shape[1] == Y.shape[1]:
raise TypeError("X and Y must have same y-dim!")
if x_is_acting.shape != X.shape or y_is_acting.shape != Y.shape:
raise ValueError(f"is_acting matrices must have same shape as X!")
x_n_rows, x_n_cols = X.shape
y_n_rows, y_n_cols = Y.shape
if not isinstance(X, np.ndarray):
X = np.asarray(X)
if not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Z = np.concatenate((X, Y))
z_is_acting = np.concatenate((x_is_acting, y_is_acting))
Z_cat = Z[:, cat_features]
z_cat_is_acting = z_is_acting[:, cat_features]
cat_is_decreed = is_decreed[cat_features]
x_index = range(0, x_n_rows)
y_index = range(x_n_rows, x_n_rows + y_n_rows)
X_cat = Z_cat[x_index,]
Y_cat = Z_cat[y_index,]
x_cat_is_acting = z_cat_is_acting[x_index,]
y_cat_is_acting = z_cat_is_acting[y_index,]
# To support categorical decreed variables, some extra math wizardry is needed
if np.any(cat_is_decreed) or np.any(~x_cat_is_acting) or np.any(~y_cat_is_acting):
raise ValueError(
"Decreed (conditionally-active) categorical variables are not supported yet!"
)
# This is to normalize the numeric values between 0 and 1.
Z_num = Z[:, ~cat_features]
z_num_is_acting = z_is_acting[:, ~cat_features]
num_is_decreed = is_decreed[~cat_features]
num_bounds = design_space.get_num_bounds()[~cat_features, :]
if num_bounds.shape[0] > 0:
Z_offset = num_bounds[:, 0]
Z_max = num_bounds[:, 1]
Z_scale = Z_max - Z_offset
Z_num = (Z_num - Z_offset) / Z_scale
X_num = Z_num[x_index,]
Y_num = Z_num[y_index,]
x_num_is_acting = z_num_is_acting[x_index,]
y_num_is_acting = z_num_is_acting[y_index,]
D_cat = compute_D_cat(X_cat, Y_cat, y)
D_num, ij = compute_D_num(
X_num,
Y_num,
x_num_is_acting,
y_num_is_acting,
num_is_decreed,
y,
hierarchical_kernel,
)
D = np.concatenate((D_cat, D_num), axis=1) * 0
D[:, np.logical_not(cat_features)] = D_num
D[:, cat_features] = D_cat
if y is not None:
return D
else:
return D, ij.astype(np.int32), X_cont
@njit_use(parallel=True)
def compute_D_cat(X_cat, Y_cat, y):
nx_samples, n_features = X_cat.shape
ny_samples, n_features = Y_cat.shape
n_nonzero_cross_dist = nx_samples * ny_samples
if y is None:
n_nonzero_cross_dist = nx_samples * (nx_samples - 1) // 2
D_cat = np.zeros((n_nonzero_cross_dist, n_features))
indD = 0
k1max = nx_samples
if y is None:
k1max = nx_samples - 1
for k1 in range(k1max):
k2max = ny_samples
if y is None:
k2max = ny_samples - k1 - 1
for k2 in prange(k2max):
l2 = k2
if y is None:
l2 = k2 + k1 + 1
D_cat[indD + k2] = X_cat[k1] != Y_cat[l2]
indD += k2max
return D_cat
@njit_use() # setting parallel=True results in a stack overflow
def compute_D_num(
X_num,
Y_num,
x_num_is_acting,
y_num_is_acting,
num_is_decreed,
y,
hierarchical_kernel,
):
nx_samples, n_features = X_num.shape
ny_samples, n_features = Y_num.shape
n_nonzero_cross_dist = nx_samples * ny_samples
if y is None:
n_nonzero_cross_dist = nx_samples * (nx_samples - 1) // 2
D_num = np.zeros((n_nonzero_cross_dist, n_features))
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int32)
ll_1 = 0
indD = 0
k1max = nx_samples
if y is None:
k1max = nx_samples - 1
for k1 in range(k1max):
k2max = ny_samples
if y is None:
k2max = ny_samples - k1 - 1
ll_0 = ll_1
ll_1 = ll_0 + nx_samples - k1 - 1
ij[ll_0:ll_1, 0] = k1
ij[ll_0:ll_1, 1] = np.arange(k1 + 1, nx_samples)
for k2 in range(k2max):
l2 = k2
if y is None:
l2 = k2 + k1 + 1
D_num[indD] = np.abs(X_num[k1] - Y_num[l2])
indD += 1
if np.any(num_is_decreed):
D_num = apply_the_algebraic_distance_to_the_decreed_variable(
X_num,
Y_num,
x_num_is_acting,
y_num_is_acting,
num_is_decreed,
y,
D_num,
hierarchical_kernel,
)
return D_num, ij
@njit_use() # setting parallel=True results in a stack overflow
def apply_the_algebraic_distance_to_the_decreed_variable(
X_num,
Y_num,
x_num_is_acting,
y_num_is_acting,
num_is_decreed,
y,
D_num,
hierarchical_kernel,
):
nx_samples, n_features = X_num.shape
ny_samples, n_features = Y_num.shape
indD = 0
k1max = nx_samples
if y is None:
k1max = nx_samples - 1
for k1 in range(k1max):
k2max = ny_samples
if y is None:
k2max = ny_samples - k1 - 1
x_k1_acting = x_num_is_acting[k1]
for k2 in range(k2max):
l2 = k2
if y is None:
l2 = k2 + k1 + 1
abs_delta = np.abs(X_num[k1] - Y_num[l2])
y_l2_acting = y_num_is_acting[l2]
# Calculate the distances between the decreed (aka conditionally acting) variables
if hierarchical_kernel == MixHrcKernelType.ALG_KERNEL:
abs_delta[num_is_decreed] = (
2
* np.abs(X_num[k1][num_is_decreed] - Y_num[l2][num_is_decreed])
/ (
np.sqrt(1 + X_num[k1][num_is_decreed] ** 2)
* np.sqrt(1 + Y_num[l2][num_is_decreed] ** 2)
)
)
elif hierarchical_kernel == MixHrcKernelType.ARC_KERNEL:
abs_delta[num_is_decreed] = np.sqrt(2) * np.sqrt(
1
- np.cos(
np.pi
* np.abs(X_num[k1][num_is_decreed] - Y_num[l2][num_is_decreed])
)
)
# Set distances for non-acting variables: 0 if both are non-acting, 1 if only one is non-acting
both_non_acting = num_is_decreed & ~(x_k1_acting | y_l2_acting)
abs_delta[both_non_acting] = 0.0
either_acting = num_is_decreed & (x_k1_acting != y_l2_acting)
abs_delta[either_acting] = 1.0
D_num[indD] = abs_delta
indD += 1
return D_num
def differences(X, Y):
"compute the componentwise difference between X and Y"
X, Y = check_pairwise_arrays(X, Y)
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
return D.reshape((-1, X.shape[1]))
def compute_X_cross(X, n_levels):
"""
Computes the full space cross-relaxation of the input X for
the homoscedastic hypersphere kernel.
Parameters
----------
X: np.ndarray [n_obs, 1]
- The input variables.
n_levels: np.ndarray
- The number of levels for the categorical variable.
Returns
-------
Zeta: np.ndarray [n_obs, n_levels * (n_levels - 1) / 2]
- The non categorical values of the input variables.
"""
dim = int(n_levels * (n_levels - 1) / 2)
nt = len(X)
Zeta = np.zeros((nt, dim))
k = 0
for i in range(n_levels):
for j in range(n_levels):
if j > i:
s = 0
for x in X:
if int(x) == i or int(x) == j:
Zeta[s, k] = 1
s += 1
k += 1
return Zeta
def abs_exp(theta, d, grad_ind=None, hess_ind=None, derivative_params=None):
"""
Absolute exponential autocorrelation model.
(Ornstein-Uhlenbeck stochastic process)::
Parameters
----------
theta : list[small_d * n_comp]
Hyperparameters of the correlation model
d: np.ndarray[n_obs * (n_obs - 1) / 2, n_comp]
d_i otherwise
grad_ind : int, optional
Indice for which component the gradient dr/dtheta must be computed. The default is None.
hess_ind : int, optional
Indice for which component the hessian d²r/d²(theta) must be computed. The default is None.
derivative_paramas : dict, optional
List of arguments mandatory to compute the gradient dr/dx. The default is None.
Raises
------
Exception
Assure that theta is of the good length
Returns
-------
r: np.ndarray[n_obs * (n_obs - 1) / 2,1]
An array containing the values of the autocorrelation model.
"""
return pow_exp(
theta,
d,
grad_ind=grad_ind,
hess_ind=hess_ind,
derivative_params=derivative_params,
)
def squar_exp(theta, d, grad_ind=None, hess_ind=None, derivative_params=None):
"""
Squared exponential autocorrelation model.
Parameters
----------
theta : list[small_d * n_comp]
Hyperparameters of the correlation model
d: np.ndarray[n_obs * (n_obs - 1) / 2, n_comp]
d_i otherwise
grad_ind : int, optional
Indice for which component the gradient dr/dtheta must be computed. The default is None.
hess_ind : int, optional
Indice for which component the hessian d²r/d²(theta) must be computed. The default is None.
derivative_paramas : dict, optional
List of arguments mandatory to compute the gradient dr/dx. The default is None.
Raises
------
Exception
Assure that theta is of the good length
Returns
-------
r: np.ndarray[n_obs * (n_obs - 1) / 2,1]
An array containing the values of the autocorrelation model.
"""
return pow_exp(
theta,
d,
grad_ind=grad_ind,
hess_ind=hess_ind,
derivative_params=derivative_params,
)
def pow_exp(theta, d, grad_ind=None, hess_ind=None, derivative_params=None):
"""
Generative exponential autocorrelation model.
Parameters
----------
theta : list[small_d * n_comp]
Hyperparameters of the correlation model
d: np.ndarray[n_obs * (n_obs - 1) / 2, n_comp]
d_i otherwise
grad_ind : int, optional
Indice for which component the gradient dr/dtheta must be computed. The default is None.
hess_ind : int, optional
Indice for which component the hessian d²r/d²(theta) must be computed. The default is None.
derivative_paramas : dict, optional
List of arguments mandatory to compute the gradient dr/dx. The default is None.
Raises
------
Exception
Assure that theta is of the good length
Returns
-------
r: np.ndarray[n_obs * (n_obs - 1) / 2,1]
An array containing the values of the autocorrelation model.
"""
r = np.zeros((d.shape[0], 1))
n_components = d.shape[1]
# Construct/split the correlation matrix
i, nb_limit = 0, int(1e4)
while i * nb_limit <= d.shape[0]:
r[i * nb_limit : (i + 1) * nb_limit, 0] = np.exp(
-np.sum(
theta.reshape(1, n_components)
* d[i * nb_limit : (i + 1) * nb_limit, :],
axis=1,
)
)
i += 1
i = 0
if grad_ind is not None:
while i * nb_limit <= d.shape[0]:
r[i * nb_limit : (i + 1) * nb_limit, 0] = (
-d[i * nb_limit : (i + 1) * nb_limit, grad_ind]
* r[i * nb_limit : (i + 1) * nb_limit, 0]
)
i += 1
i = 0
if hess_ind is not None:
while i * nb_limit <= d.shape[0]:
r[i * nb_limit : (i + 1) * nb_limit, 0] = (
-d[i * nb_limit : (i + 1) * nb_limit, hess_ind]
* r[i * nb_limit : (i + 1) * nb_limit, 0]
)
i += 1
if derivative_params is not None:
dd = derivative_params["dd"]
r = r.T
dr = -np.einsum("i,ij->ij", r[0], dd)
return r.T, dr
return r
def matern52(theta, d, grad_ind=None, hess_ind=None, derivative_params=None):
"""
Matern 5/2 correlation model.
Parameters
----------
theta : list[small_d * n_comp]
Hyperparameters of the correlation model
d: np.ndarray[n_obs * (n_obs - 1) / 2, n_comp]
d_i otherwise
grad_ind : int, optional
Indice for which component the gradient dr/dtheta must be computed. The default is None.
hess_ind : int, optional
Indice for which component the hessian d²r/d²(theta) must be computed. The default is None.
derivative_params : dict, optional
List of arguments mandatory to compute the gradient dr/dx. The default is None.
Raises
------
Exception
Assure that theta is of the good length
Returns
-------
r: np.ndarray[n_obs * (n_obs - 1) / 2,1]
An array containing the values of the autocorrelation model.
"""
r = np.zeros((d.shape[0], 1))
n_components = d.shape[1]
# Construct/split the correlation matrix
i, nb_limit = 0, int(1e4)
while i * nb_limit <= d.shape[0]:
ll = theta.reshape(1, n_components) * d[i * nb_limit : (i + 1) * nb_limit, :]
r[i * nb_limit : (i + 1) * nb_limit, 0] = (
1.0 + np.sqrt(5.0) * ll + 5.0 / 3.0 * ll**2.0
).prod(axis=1) * np.exp(-np.sqrt(5.0) * (ll.sum(axis=1)))
i += 1
i = 0
M52 = r.copy()
if grad_ind is not None:
theta_r = theta.reshape(1, n_components)
while i * nb_limit <= d.shape[0]:
fact_1 = (
np.sqrt(5) * d[i * nb_limit : (i + 1) * nb_limit, grad_ind]
+ 10.0
/ 3.0
* theta_r[0, grad_ind]
* d[i * nb_limit : (i + 1) * nb_limit, grad_ind] ** 2.0
)
fact_2 = (
1.0
+ np.sqrt(5)
* theta_r[0, grad_ind]
* d[i * nb_limit : (i + 1) * nb_limit, grad_ind]
+ 5.0
/ 3.0
* (theta_r[0, grad_ind] ** 2)
* (d[i * nb_limit : (i + 1) * nb_limit, grad_ind] ** 2)
)
fact_3 = np.sqrt(5) * d[i * nb_limit : (i + 1) * nb_limit, grad_ind]
r[i * nb_limit : (i + 1) * nb_limit, 0] = (fact_1 / fact_2 - fact_3) * r[
i * nb_limit : (i + 1) * nb_limit, 0
]
i += 1
i = 0
if hess_ind is not None:
while i * nb_limit <= d.shape[0]:
fact_1 = (
np.sqrt(5) * d[i * nb_limit : (i + 1) * nb_limit, hess_ind]
+ 10.0
/ 3.0
* theta_r[0, hess_ind]
* d[i * nb_limit : (i + 1) * nb_limit, hess_ind] ** 2.0
)
fact_2 = (
1.0
+ np.sqrt(5)
* theta_r[0, hess_ind]
* d[i * nb_limit : (i + 1) * nb_limit, hess_ind]
+ 5.0
/ 3.0
* (theta_r[0, hess_ind] ** 2)
* (d[i * nb_limit : (i + 1) * nb_limit, hess_ind] ** 2)
)
fact_3 = np.sqrt(5) * d[i * nb_limit : (i + 1) * nb_limit, hess_ind]
r[i * nb_limit : (i + 1) * nb_limit, 0] = (fact_1 / fact_2 - fact_3) * r[
i * nb_limit : (i + 1) * nb_limit, 0
]
if hess_ind == grad_ind:
fact_4 = (
10.0
/ 3.0
* d[i * nb_limit : (i + 1) * nb_limit, hess_ind] ** 2.0
* fact_2
)
r[i * nb_limit : (i + 1) * nb_limit, 0] = (
(fact_4 - fact_1**2) / (fact_2) ** 2
) * M52[i * nb_limit : (i + 1) * nb_limit, 0] + r[
i * nb_limit : (i + 1) * nb_limit, 0
]
i += 1
if derivative_params is not None:
dx = derivative_params["dx"]
abs_ = abs(dx)
sqr = np.square(dx)
abs_0 = np.dot(abs_, theta)
dr = np.zeros(dx.shape)
A = np.zeros((dx.shape[0], 1))
for i in range(len(abs_0)):
A[i][0] = np.exp(-np.sqrt(5) * abs_0[i])
der = np.ones(dx.shape)
for i in range(len(der)):
for j in range(n_components):
if dx[i][j] < 0:
der[i][j] = -1
dB = np.zeros((dx.shape[0], n_components))
for j in range(dx.shape[0]):
for k in range(n_components):
coef = 1
for l in range(n_components):
if l != k:
coef = coef * (
1
+ np.sqrt(5) * abs_[j][l] * theta[l]
+ (5.0 / 3) * sqr[j][l] * theta[l] ** 2
)
dB[j][k] = (
np.sqrt(5) * theta[k] * der[j][k]
+ 2 * (5.0 / 3) * der[j][k] * abs_[j][k] * theta[k] ** 2
) * coef
for j in range(dx.shape[0]):
for k in range(n_components):
dr[j][k] = (
-np.sqrt(5) * theta[k] * der[j][k] * r[j] + A[j][0] * dB[j][k]
)
return r, dr
return r
def matern32(theta, d, grad_ind=None, hess_ind=None, derivative_params=None):
"""
Matern 3/2 correlation model.
Parameters
----------
theta : list[small_d * n_comp]
Hyperparameters of the correlation model
d: np.ndarray[n_obs * (n_obs - 1) / 2, n_comp]
d_i otherwise
grad_ind : int, optional
Indice for which component the gradient dr/dtheta must be computed. The default is None.
hess_ind : int, optional
Indice for which component the hessian d²r/d²(theta) must be computed. The default is None.
derivative_paramas : dict, optional
List of arguments mandatory to compute the gradient dr/dx. The default is None.
Raises
------
Exception
Assure that theta is of the good length
Returns
-------
r: np.ndarray[n_obs * (n_obs - 1) / 2,1]
An array containing the values of the autocorrelation model.
"""
r = np.zeros((d.shape[0], 1))
n_components = d.shape[1]
# Construct/split the correlation matrix
i, nb_limit = 0, int(1e4)
theta_r = theta.reshape(1, n_components)
while i * nb_limit <= d.shape[0]:
ll = theta_r * d[i * nb_limit : (i + 1) * nb_limit, :]
r[i * nb_limit : (i + 1) * nb_limit, 0] = (1.0 + np.sqrt(3.0) * ll).prod(
axis=1
) * np.exp(-np.sqrt(3.0) * (ll.sum(axis=1)))
i += 1
i = 0
M32 = r.copy()
if grad_ind is not None:
while i * nb_limit <= d.shape[0]:
fact_1 = (
1.0
/ (
1.0
+ np.sqrt(3.0)
* theta_r[0, grad_ind]
* d[i * nb_limit : (i + 1) * nb_limit, grad_ind]
)
- 1.0
)
r[i * nb_limit : (i + 1) * nb_limit, 0] = (
fact_1
* r[i * nb_limit : (i + 1) * nb_limit, 0]
* np.sqrt(3.0)
* d[i * nb_limit : (i + 1) * nb_limit, grad_ind]
)
i += 1
i = 0
if hess_ind is not None:
while i * nb_limit <= d.shape[0]:
fact_2 = (
1.0
/ (
1.0
+ np.sqrt(3.0)
* theta_r[0, hess_ind]
* d[i * nb_limit : (i + 1) * nb_limit, hess_ind]
)
- 1.0
)
r[i * nb_limit : (i + 1) * nb_limit, 0] = (
r[i * nb_limit : (i + 1) * nb_limit, 0]
* fact_2
* np.sqrt(3.0)
* d[i * nb_limit : (i + 1) * nb_limit, hess_ind]
)
if grad_ind == hess_ind:
fact_3 = (
3.0 * d[i * nb_limit : (i + 1) * nb_limit, hess_ind] ** 2.0
) / (
1.0
+ np.sqrt(3.0)
* theta_r[0, hess_ind]
* d[i * nb_limit : (i + 1) * nb_limit, hess_ind]
) ** 2.0
r[i * nb_limit : (i + 1) * nb_limit, 0] = (
r[i * nb_limit : (i + 1) * nb_limit, 0]
- fact_3 * M32[i * nb_limit : (i + 1) * nb_limit, 0]
)
i += 1
if derivative_params is not None:
dx = derivative_params["dx"]
abs_ = abs(dx)
abs_0 = np.dot(abs_, theta)
dr = np.zeros(dx.shape)
A = np.zeros((dx.shape[0], 1))
for i in range(len(abs_0)):
A[i][0] = np.exp(-np.sqrt(3) * abs_0[i])
der = np.ones(dx.shape)
for i in range(len(der)):
for j in range(n_components):
if dx[i][j] < 0:
der[i][j] = -1
dB = np.zeros((dx.shape[0], n_components))
for j in range(dx.shape[0]):
for k in range(n_components):
coef = 1
for l in range(n_components):
if l != k:
coef = coef * (1 + np.sqrt(3) * abs_[j][l] * theta[l])
dB[j][k] = np.sqrt(3) * theta[k] * der[j][k] * coef
for j in range(dx.shape[0]):
for k in range(n_components):
dr[j][k] = (
-np.sqrt(3) * theta[k] * der[j][k] * r[j] + A[j][0] * dB[j][k]
)
return r, dr
return r
def act_exp(theta, d, grad_ind=None, hess_ind=None, d_x=None, derivative_params=None):
"""
Active learning exponential correlation model
Parameters
----------
theta : list[small_d * n_comp]
Hyperparameters of the correlation model
d: np.ndarray[n_obs * (n_obs - 1) / 2, n_comp]
d_i otherwise
grad_ind : int, optional
Indice for which component the gradient dr/dtheta must be computed. The default is None.
hess_ind : int, optional
Indice for which component the hessian d²r/d²(theta) must be computed. The default is None.
derivative_paramas : dict, optional
List of arguments mandatory to compute the gradient dr/dx. The default is None.
Raises
------
Exception
Assure that theta is of the good length
Returns
-------
r: np.ndarray[n_obs * (n_obs - 1) / 2,1]
An array containing the values of the autocorrelation model.
"""
r = np.zeros((d.shape[0], 1))
n_components = d.shape[1]
if len(theta) % n_components != 0:
raise Exception("Length of theta must be a multiple of n_components")
n_small_components = len(theta) // n_components
A = np.reshape(theta, (n_small_components, n_components)).T
d_A = d.dot(A)
# Necessary when working in embeddings space
if d_x is not None:
d = d_x
n_components = d.shape[1]
r[:, 0] = np.exp(-(1 / 2) * np.sum(d_A**2.0, axis=1))
if grad_ind is not None:
d_grad_ind = grad_ind % n_components
d_A_grad_ind = grad_ind // n_components
if hess_ind is None:
r[:, 0] = -d[:, d_grad_ind] * d_A[:, d_A_grad_ind] * r[:, 0]
elif hess_ind is not None:
d_hess_ind = hess_ind % n_components
d_A_hess_ind = hess_ind // n_components
fact = -d_A[:, d_A_grad_ind] * d_A[:, d_A_hess_ind]
if d_A_hess_ind == d_A_grad_ind:
fact = 1 + fact
r[:, 0] = -d[:, d_grad_ind] * d[:, d_hess_ind] * fact * r[:, 0]
if derivative_params is not None:
raise ValueError("Jacobians are not available for this correlation kernel")
return r
def ge_compute_pls(X, y, n_comp, pts, delta_x, xlimits, extra_points):
"""
Gradient-enhanced PLS-coefficients.
Parameters
----------
X: np.ndarray [n_obs,dim]
- - The input variables.
y: np.ndarray [n_obs,ny]
- The output variable
n_comp: int
- Number of principal components used.
pts: dict()
- The gradient values.
delta_x: real
- The step used in the FOTA.
xlimits: np.ndarray[dim, 2]
- The upper and lower var bounds.
extra_points: int
- The number of extra points per each training point.
Returns
-------
Coeff_pls: np.ndarray[dim, n_comp]
- The PLS-coefficients.
XX: np.ndarray[extra_points*nt, dim]
- Extra points added (when extra_points > 0)
yy: np.ndarray[extra_points*nt, 1]
- Extra points added (when extra_points > 0)
"""
nt, dim = X.shape
XX = np.empty(shape=(0, dim))
yy = np.empty(shape=(0, y.shape[1]))
_pls = pls(n_comp)
coeff_pls = np.zeros((nt, dim, n_comp))
for i in range(nt):
if dim >= 3:
sign = np.roll(bbdesign(int(dim), center=1), 1, axis=0)
_X = np.zeros((sign.shape[0], dim))
_y = np.zeros((sign.shape[0], 1))
sign = sign * delta_x * (xlimits[:, 1] - xlimits[:, 0])
_X = X[i, :] + sign
for j in range(1, dim + 1):
sign[:, j - 1] = sign[:, j - 1] * pts[None][j][1][i, 0]
_y = y[i, :] + np.sum(sign, axis=1).reshape((sign.shape[0], 1))
else:
_X = np.zeros((9, dim))
_y = np.zeros((9, 1))
# center
_X[:, :] = X[i, :].copy()
_y[0, 0] = y[i, 0].copy()
# right
_X[1, 0] += delta_x * (xlimits[0, 1] - xlimits[0, 0])
_y[1, 0] = _y[0, 0].copy() + pts[None][1][1][i, 0] * delta_x * (
xlimits[0, 1] - xlimits[0, 0]
)
# up
_X[2, 1] += delta_x * (xlimits[1, 1] - xlimits[1, 0])
_y[2, 0] = _y[0, 0].copy() + pts[None][2][1][i, 0] * delta_x * (
xlimits[1, 1] - xlimits[1, 0]
)
# left
_X[3, 0] -= delta_x * (xlimits[0, 1] - xlimits[0, 0])
_y[3, 0] = _y[0, 0].copy() - pts[None][1][1][i, 0] * delta_x * (
xlimits[0, 1] - xlimits[0, 0]
)
# down
_X[4, 1] -= delta_x * (xlimits[1, 1] - xlimits[1, 0])
_y[4, 0] = _y[0, 0].copy() - pts[None][2][1][i, 0] * delta_x * (
xlimits[1, 1] - xlimits[1, 0]
)
# right up
_X[5, 0] += delta_x * (xlimits[0, 1] - xlimits[0, 0])
_X[5, 1] += delta_x * (xlimits[1, 1] - xlimits[1, 0])
_y[5, 0] = (
_y[0, 0].copy()
+ pts[None][1][1][i, 0] * delta_x * (xlimits[0, 1] - xlimits[0, 0])
+ pts[None][2][1][i, 0] * delta_x * (xlimits[1, 1] - xlimits[1, 0])
)
# left up
_X[6, 0] -= delta_x * (xlimits[0, 1] - xlimits[0, 0])
_X[6, 1] += delta_x * (xlimits[1, 1] - xlimits[1, 0])
_y[6, 0] = (
_y[0, 0].copy()
- pts[None][1][1][i, 0] * delta_x * (xlimits[0, 1] - xlimits[0, 0])
+ pts[None][2][1][i, 0] * delta_x * (xlimits[1, 1] - xlimits[1, 0])
)
# left down
_X[7, 0] -= delta_x * (xlimits[0, 1] - xlimits[0, 0])
_X[7, 1] -= delta_x * (xlimits[1, 1] - xlimits[1, 0])
_y[7, 0] = (
_y[0, 0].copy()
- pts[None][1][1][i, 0] * delta_x * (xlimits[0, 1] - xlimits[0, 0])
- pts[None][2][1][i, 0] * delta_x * (xlimits[1, 1] - xlimits[1, 0])
)
# right down
_X[8, 0] += delta_x * (xlimits[0, 1] - xlimits[0, 0])
_X[8, 1] -= delta_x * (xlimits[1, 1] - xlimits[1, 0])
_y[8, 0] = (
_y[0, 0].copy()
+ pts[None][1][1][i, 0] * delta_x * (xlimits[0, 1] - xlimits[0, 0])
- pts[None][2][1][i, 0] * delta_x * (xlimits[1, 1] - xlimits[1, 0])
)
# As of sklearn 0.24.1 a zeroed _y raises an exception while sklearn 0.23 returns zeroed x_rotations
# For now the try/except below is a workaround to restore the 0.23 behaviour
try:
_pls.fit(_X.copy(), _y.copy())
coeff_pls[i, :, :] = _pls.x_rotations_
except StopIteration:
coeff_pls[i, :, :] = 0
# Add additional points
if extra_points != 0:
max_coeff = np.argsort(np.abs(coeff_pls[i, :, 0]))[-extra_points:]
for ii in max_coeff:
XX = np.vstack((XX, X[i, :]))
XX[-1, ii] += delta_x * (xlimits[ii, 1] - xlimits[ii, 0])
yy = np.vstack((yy, y[i]))
yy[-1] += (
pts[None][1 + ii][1][i]
* delta_x
* (xlimits[ii, 1] - xlimits[ii, 0])
)
return np.abs(coeff_pls).mean(axis=0), XX, yy
def componentwise_distance(
D, corr, dim, power=None, theta=None, return_derivative=False
):
"""
Computes the nonzero componentwise cross-spatial-correlation-distance
between the vectors in X.
Parameters
----------
D: np.ndarray [n_obs * (n_obs - 1) / 2, dim]
- The cross-distances between the vectors in X depending of the correlation function.
corr: str
- Name of the correlation function used.
squar_exp or abs_exp.
dim: int
- Number of dimension.
theta: np.ndarray [n_comp]
- The theta values associated to the coeff_pls.
return_derivative: boolean
- Return d/dx derivative of theta*cross-spatial-correlation-distance
Returns
-------
D_corr: np.ndarray [n_obs * (n_obs - 1) / 2, dim]
- The componentwise cross-spatial-correlation-distance between the
vectors in X.
"""
if power is None and corr != "act_exp":
raise ValueError(
"Missing power initialization to compute cross-spatial correlation distance"
)
if not return_derivative:
if corr == "act_exp":
return _comp_dist_act_exp(D, dim)
return _comp_dist(D, dim, power)
else:
if theta is None:
raise ValueError(
"Missing theta to compute spatial derivative of theta cross-spatial correlation distance"
)
if corr == "act_exp":
raise ValueError("this option is not implemented for active learning")
der = _comp_dist_derivative(D, power)
D_corr = power * np.einsum("j,ij->ij", theta.T, der)
return D_corr
@njit_use(parallel=True)
def _comp_dist_act_exp(D, dim):
D_corr = np.zeros((D.shape[0], dim))
i, nb_limit = 0, 1000
for i in prange((D_corr.shape[0] // nb_limit) + 1):
D_corr[i * nb_limit : (i + 1) * nb_limit, :] = D[
i * nb_limit : (i + 1) * nb_limit, :
]
return D_corr
@njit_use()
def _comp_dist(D, dim, power):
D_corr = np.zeros((D.shape[0], dim))
i, nb_limit = 0, 1000
for i in range((D_corr.shape[0] // nb_limit) + 1):
D_corr[i * nb_limit : (i + 1) * nb_limit, :] = (
np.abs(D[i * nb_limit : (i + 1) * nb_limit, :]) ** power
)
return D_corr
@njit_use()
def _comp_dist_derivative(D, power):
der = np.ones(D.shape)
for i, j in np.ndindex(D.shape):
der[i][j] = np.abs(D[i][j]) ** (power - 1)
if D[i][j] < 0:
der[i][j] = -der[i][j]
return der
def componentwise_distance_PLS(
D, corr, n_comp, coeff_pls, power=2.0, theta=None, return_derivative=False
):
"""
Computes the nonzero componentwise cross-spatial-correlation-distance
between the vectors in X.
Parameters
----------
D: np.ndarray [n_obs * (n_obs - 1) / 2, dim]
- The L1 cross-distances between the vectors in X.
corr: str
- Name of the correlation function used.
squar_exp or abs_exp.
n_comp: int
- Number of principal components used.
coeff_pls: np.ndarray [dim, n_comp]
- The PLS-coefficients.
theta: np.ndarray [n_comp]
- The theta values associated to the coeff_pls.
return_derivative: boolean
- Return d/dx derivative of theta*cross-spatial-correlation-distance
Returns
-------
D_corr: np.ndarray [n_obs * (n_obs - 1) / 2, n_comp]
- The componentwise cross-spatial-correlation-distance between the
vectors in X.
"""
# Fit the matrix iteratively: avoid some memory troubles .
limit = int(1e4)
if corr == "squar_exp":
power = 2.0
# assert power == 2.0, "The power coefficient for the squar exp should be 2.0"
elif corr in ["abs_exp", "matern32", "matern52"]:
power = 1.0
D_corr = np.zeros((D.shape[0], n_comp))
i, nb_limit = 0, int(limit)
if return_derivative == False:
while True:
if i * nb_limit > D_corr.shape[0]:
return D_corr
else:
if corr == "squar_exp":
D_corr[i * nb_limit : (i + 1) * nb_limit, :] = np.dot(
D[i * nb_limit : (i + 1) * nb_limit, :] ** 2, coeff_pls**2
)
elif corr == "pow_exp":
D_corr[i * nb_limit : (i + 1) * nb_limit, :] = np.dot(
np.abs(D[i * nb_limit : (i + 1) * nb_limit, :]) ** power,
np.abs(coeff_pls) ** power,
)
else:
# abs_exp
D_corr[i * nb_limit : (i + 1) * nb_limit, :] = np.dot(
np.abs(D[i * nb_limit : (i + 1) * nb_limit, :]),
np.abs(coeff_pls),
)
i += 1
else:
if theta is None:
raise ValueError(
"Missing theta to compute spatial derivative of theta cross-spatial correlation distance"
)
if corr == "squar_exp":
D_corr = np.zeros(np.shape(D))
for i, j in np.ndindex(D.shape):
coef = 0
for l in range(n_comp):
coef = coef + theta[l] * coeff_pls[j][l] ** 2
coef = 2 * coef
D_corr[i][j] = coef * D[i][j]
return D_corr
elif corr == "pow_exp":
D_corr = np.zeros(np.shape(D))
der = np.ones(np.shape(D))
for i, j in np.ndindex(D.shape):
coef = 0
for l in range(n_comp):
coef = coef + theta[l] * np.abs(coeff_pls[j][l]) ** power
coef = power * coef
D_corr[i][j] = coef * np.abs(D[i][j]) ** (power - 1) * der[i][j]
return D_corr
else:
# abs_exp
D_corr = np.zeros(np.shape(D))
der = np.ones(np.shape(D))
for i, j in np.ndindex(D.shape):
if D[i][j] < 0:
der[i][j] = -1
coef = 0
for l in range(n_comp):
coef = coef + theta[l] * np.abs(coeff_pls[j][l])
D_corr[i][j] = coef * der[i][j]
return D_corr
# sklearn.gaussian_process.regression_models
# Copied from sklearn as it is deprecated since 0.19.1 and will be removed in sklearn 0.22
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# License: BSD 3 clause
"""
The built-in regression models submodule for the gaussian_process module.
"""
def constant(x):
"""
Zero order polynomial (constant, p = 1) regression model.
x --> f(x) = 1
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float64)
n_eval = x.shape[0]
f = np.ones([n_eval, 1])
return f
def linear(x):
"""
First order polynomial (linear, p = n+1) regression model.
x --> f(x) = [ 1, x_1, ..., x_n ].T
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float64)
n_eval = x.shape[0]
f = np.hstack([np.ones([n_eval, 1]), x])
return f
def quadratic(x):
"""
Second order polynomial (quadratic, p = n*(n-1)/2+n+1) regression model.
x --> f(x) = [ 1, { x_i, i = 1,...,n }, { x_i * x_j, (i,j) = 1,...,n } ].T
i > j
Parameters
----------
x : array_like
An array with shape (n_eval, n_features) giving the locations x at
which the regression model should be evaluated.
Returns
-------
f : array_like
An array with shape (n_eval, p) with the values of the regression
model.
"""
x = np.asarray(x, dtype=np.float64)
n_eval, n_features = x.shape
f = np.hstack([np.ones([n_eval, 1]), x])
for k in range(n_features):
f = np.hstack([f, x[:, k, np.newaxis] * x[:, k:]])
return f
@njit_use(parallel=True)
def matrix_data_corr_levels_cat_matrix(
i, n_levels, theta_cat, theta_bounds, is_ehh: bool
):
Theta_mat = np.zeros((n_levels[i], n_levels[i]))
L = np.zeros((n_levels[i], n_levels[i]))
v = 0
for j in range(n_levels[i]):
for k in range(n_levels[i] - j):
if j == k + j:
Theta_mat[j, k + j] = 1
else:
Theta_mat[j, k + j] = theta_cat[v]
Theta_mat[k + j, j] = theta_cat[v]
v = v + 1
for j in range(n_levels[i]):
for k in range(n_levels[i] - j):
if j == k + j:
if j == 0:
L[j, k + j] = 1
else:
L[j, k + j] = 1
for l in range(j):
L[j, k + j] = L[j, k + j] * np.sin(Theta_mat[j, l])
else:
if j == 0:
L[k + j, j] = np.cos(Theta_mat[k, 0])
else:
L[k + j, j] = np.cos(Theta_mat[k + j, j])
for l in range(j):
L[k + j, j] = L[k + j, j] * np.sin(Theta_mat[k + j, l])
T = np.dot(L, L.T)
if is_ehh:
T = (T - 1) * theta_bounds[1] / 2
T = np.exp(2 * T)
k = (1 + np.exp(-theta_bounds[1])) / np.exp(-theta_bounds[0])
T = (T + np.exp(-theta_bounds[1])) / (k)
return T
@njit_use()
def matrix_data_corr_levels_cat_mod(i, Lij, r_cat, T, has_cat_kernel):
for k in range(np.shape(Lij[i])[0]):
indi = int(Lij[i][k][0])
indj = int(Lij[i][k][1])
if indi == indj:
r_cat[k] = 1.0
else:
if has_cat_kernel:
r_cat[k] = T[indi, indj]
@njit_use()
def matrix_data_corr_levels_cat_mod_comps(
i, Lij, r_cat, n_levels, T, d_cat_i, has_cat_kernel
):
for k in range(np.shape(Lij[i])[0]):
indi = int(Lij[i][k][0])
indj = int(Lij[i][k][1])
if indi == indj:
r_cat[k] = 1.0
else:
if has_cat_kernel:
Theta_i_red = np.zeros(int((n_levels[i] - 1) * n_levels[i] / 2))
indmatvec = 0
for j in range(n_levels[i]):
for l in range(n_levels[i]):
if l > j:
Theta_i_red[indmatvec] = T[j, l]
indmatvec += 1
kval_cat = 0
for indijk in range(len(Theta_i_red)):
kval_cat += np.multiply(
Theta_i_red[indijk], d_cat_i[k : k + 1][0][indijk]
)
r_cat[k] = kval_cat
| 48,566 | 30.578023 | 108 | py |
smt | smt-master/smt/utils/options_dictionary.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
class OptionsDictionary(object):
"""
Generalization of the dictionary that allows for declaring keys.
Attributes
----------
_dict : dict
Dictionary of option values keyed by option names.
_declared_entries : dict
Dictionary of declared entries.
"""
def __init__(self):
self._dict = {}
self._declared_entries = {}
def clone(self):
"""
Return a clone of this object.
Returns
-------
OptionsDictionary
Deep-copied clone.
"""
clone = self.__class__()
clone._dict = dict(self._dict)
clone._declared_entries = dict(self._declared_entries)
return clone
def __getitem__(self, name):
"""
Get an option that was previously declared and optionally set.
Arguments
---------
name : str
The name of the option.
Returns
-------
object
Value of the option.
"""
return self._dict[name]
def __setitem__(self, name, value):
"""
Set an option that was previously declared.
The value argument must be valid, which means it must satisfy the following:
1. If values and not types was given when declaring, value must be in values.
2. If types and not values was given when declaring, type(value) must be in types.
3. If values and types were given when declaring, either of the above must be true.
Arguments
---------
name : str
The name of the option.
value : object
The value to set.
"""
assert name in self._declared_entries, "Option %s has not been declared" % name
self._assert_valid(name, value)
self._dict[name] = value
def __contains__(self, key):
return key in self._dict
def is_declared(self, key):
return key in self._declared_entries
def _assert_valid(self, name, value):
values = self._declared_entries[name]["values"]
types = self._declared_entries[name]["types"]
if values is not None and types is not None:
assert value in values or isinstance(
value, types
), "Option %s: value and type of %s are both invalid - " % (
name,
value,
) + "value must be %s or type must be %s" % (
values,
types,
)
elif values is not None:
assert value in values, "Option %s: value %s is invalid - must be %s" % (
name,
value,
values,
)
elif types is not None:
assert isinstance(
value, types
), "Option %s: type of %s is invalid - must be %s" % (name, value, types)
def update(self, dict_):
"""
Loop over and set all the entries in the given dictionary into self.
Arguments
---------
dict_ : dict
The given dictionary. All keys must have been declared.
"""
for name in dict_:
self[name] = dict_[name]
def declare(self, name, default=None, values=None, types=None, desc=""):
"""
Declare an option.
The value of the option must satisfy the following:
1. If values and not types was given when declaring, value must be in values.
2. If types and not values was given when declaring, type(value) must be in types.
3. If values and types were given when declaring, either of the above must be true.
Arguments
---------
name : str
Name of the option.
default : object
Optional default value that must be valid under the above 3 conditions.
values : list
Optional list of acceptable option values.
types : type or list of types
Optional list of acceptable option types.
desc : str
Optional description of the option.
"""
self._declared_entries[name] = {
"values": values,
"types": types,
"default": default,
"desc": desc,
}
if default is not None:
self._assert_valid(name, default)
self._dict[name] = default
| 4,481 | 28.88 | 91 | py |
smt | smt-master/smt/utils/__init__.py | from .misc import compute_rms_error
| 36 | 17.5 | 35 | py |
smt | smt-master/smt/utils/krg_sampling.py | """
Authors : Morgane Menz / Alexandre Thouvenot
Some parts are copied from KrgBased SMT class
"""
import numpy as np
from smt.utils.kriging import differences
from scipy import linalg
def covariance_matrix(krg, X, conditioned=True):
"""
This function computes the covariance matrix (with conditioned kernel or not) at point(s) X.
Parameters
----------
krg : SMT KRG
SMT kriging model already trained
X : array_like
Array with shape (n_eval, n_feature) composed of the point(s) on which the
covariance matrix will be computed
conditioned : Boolean
Option to whether computes covariance matrix with conditioned kernel or not.
Returns
-------
cov_matrix : array_like
if conditioned: Array with shape(n_eval, n_eval) composed of the value(s) of the
conditioned kernel
cov_matrix : array_like
if not conditioned: Array with shape(n_eval, n_eval) composed of the value(s) of the
non conditioned kernel
"""
X_cont = (X - krg.X_offset) / krg.X_scale
d = differences(X_cont, Y=krg.X_norma.copy())
cross_d = differences(X_cont, Y=X_cont)
C = krg.optimal_par["C"]
theta = krg.optimal_theta
n_eval = X.shape[0]
k = krg._correlation_types[krg.options["corr"]](
theta, krg._componentwise_distance(cross_d)
).reshape(n_eval, n_eval)
if not conditioned:
cov_matrix = krg.optimal_par["sigma2"] * k
return cov_matrix
r = krg._correlation_types[krg.options["corr"]](
theta, krg._componentwise_distance(d)
).reshape(n_eval, -1)
rt = linalg.solve_triangular(C, r.T, lower=True)
u = linalg.solve_triangular(
krg.optimal_par["G"].T,
np.dot(krg.optimal_par["Ft"].T, rt)
- krg._regression_types[krg.options["poly"]](X_cont).T,
)
cov_matrix = krg.optimal_par["sigma2"] * (k - rt.T.dot(rt) + u.T.dot(u))
return cov_matrix
def sample_trajectory(krg, X, n_traj, method="eigen", eps=10 ** (-10)):
"""
This function samples gaussian process trajectories with eigen decomposition or Cholesky decomposition.
Parameters
----------
krg : SMT KRG
SMT kriging model already trained
X : array_like
Array with shape (n_eval, n_feature) composed of the point(s) on which the
trajctories will be sampled
n_traj : int
Number of sampled trajectories
method : string
Option to whether samples trajectories with eigen or Cholesky method. Cholesky decomposition
might not be possible because of bad conditioning
eps : float
Threshold used to floor negative eigen value
Returns
-------
traj : array_like
Array with shape(n_eval, n_traj) composed of the sampled trajectorie with a chosen
decomposition method
"""
n_eval = X.shape[0]
cov = covariance_matrix(krg, X)
if method == "eigen":
v, w = np.linalg.eigh(cov)
v[np.abs(v) < eps] = np.zeros_like(v[np.abs(v) < eps])
C = w.dot(np.diagflat(np.sqrt(v)))
if method == "cholesky":
C = np.linalg.cholesky(cov)
mean_ = krg._predict_values(X).reshape(-1, 1)
traj = (mean_ + C.dot(np.random.randn(n_eval, n_traj))).reshape(n_eval, n_traj)
return traj
def gauss_legendre_grid(bounds, n_point):
"""
This function creates a grid to computes integrals with Gauss-Legendre quadrature method.
Parameters
----------
bounds : array_like
Array with shape (2, int_dims) where dims is the number of integration dimension.
It iscontaining the integration domain where bounds[0] is the inferior integration
bound and bounds[1] is the superior integration bound.
n_point : int
Number of point in each dimension
Returns
-------
x_grid : array_like
Array with shape(n_point**int_dims, int_dims) composed of point(s) on which the
integral will be computed
weights_grid : array_like
Array with shape(n_point**int_dims, 1) composed of weigths used to computed the
integral
"""
dims = bounds.shape[1]
x, weights = np.polynomial.legendre.leggauss(n_point)
x_list = [
(bounds[1][i] - bounds[0][i]) / 2 * x + (bounds[1][i] + bounds[0][i]) / 2
for i in range(dims)
]
x_grid = np.meshgrid(*x_list)
x_grid = np.concatenate([x_grid[i].reshape(-1, 1) for i in range(dims)], axis=1)
weights_grid = np.meshgrid(*[weights] * dims)
weights_grid = np.prod(weights_grid, axis=0).reshape(-1, 1)
return x_grid, weights_grid
def rectangular_grid(bounds, n_point):
"""
This function creates a grid to computes integrals with rectangular grid method.
Parameters
----------
bounds : array_like
Array with shape (2, int_dims) where dims is the number of integration dimension.
It iscontaining the integration domain where bounds[0] is the inferior integration
bound and bounds[1] is the superior integration bound.
n_point : int
Number of point in each dimension
Returns
-------
x_grid : array_like
Array with shape(n_point**int_dims, int_dims) composed of point(s) on which the
integral will be computed
weights_grid : array_like
Array with shape(n_point**int_dims, 1) composed of weigths used to computed the
integral
"""
dims = bounds.shape[1]
x, weights = np.linspace(-1, 1, n_point), np.ones(n_point) / n_point
x_list = [
(bounds[1][i] - bounds[0][i]) / 2 * x + (bounds[1][i] + bounds[0][i]) / 2
for i in range(dims)
]
x_grid = np.meshgrid(*x_list)
x_grid = np.concatenate([x_grid[i].reshape(-1, 1) for i in range(dims)], axis=1)
weights_grid = np.meshgrid(*[weights] * dims)
weights_grid = np.prod(weights_grid, axis=0).reshape(-1, 1)
return x_grid, weights_grid
def simpson_weigths(n_points, h):
"""
This function computes Simpson quadrature weigths in one dimension.
Parameters
----------
n_point : int
Number of weigths
h : float
Scaling coefficient
Returns
-------
weights : array_like
Array with shape(n_point,) composed of weigths used to computed the
integral
"""
weights = np.zeros(n_points)
for i in range((n_points + 1) // 2):
weights[2 * i] = 2.0 * h / 3.0
weights[2 * i - 1] = 4.0 * h / 3.0
weights[0], weights[-1] = h / 3.0, h / 3.0
return weights
def simpson_grid(bounds, n_point):
"""
This function creates a grid to computes integrals with Simpson quadrature.
Parameters
----------
bounds : array_like
Array with shape (2, int_dims) where dims is the number of integration dimension.
It iscontaining the integration domain where bounds[0] is the inferior integration
bound and bounds[1] is the superior integration bound.
n_point : int
Number of point in each dimension
Returns
-------
x_grid : array_like
Array with shape (n_point**int_dims, int_dims) composed of point(s) on which the
integral will be computed
weights_grid : array_like
Array with shape (n_point**int_dims, 1) composed of weigths used to computed the
integral
"""
dims = bounds.shape[1]
x = np.linspace(-1, 1, n_point)
x_list = [
(bounds[1][i] - bounds[0][i]) / 2 * x + (bounds[1][i] + bounds[0][i]) / 2
for i in range(dims)
]
x_grid = np.meshgrid(*x_list)
x_grid = np.concatenate([x_grid[i].reshape(-1, 1) for i in range(dims)], axis=1)
weights_grid = np.meshgrid(
*[
simpson_weigths(n_point, (bounds[1][i] - bounds[0][i]) / n_point)
for i in range(dims)
]
)
weights_grid = np.prod(weights_grid, axis=0).reshape(-1, 1)
return x_grid, weights_grid
def eig_grid(krg, x_grid, weights_grid):
"""
This function computes eigen values and eigen vectors of Karhunen-Loève decomposition
with Nyström method on a given interpolation.
Parameters
----------
krg : SMT KRG
SMT kriging model already trained
x_grid : array_like
Array with shape (n_point, n_features) composed of point(s) on which the
integral will be computed
weights_grid : array_like
Array with shape (n_point, 1) composed of weigths used to computed the
integral
Returns
-------
eig_val : array_like
Eigen values
eig_vec : array_like
Eigen Vectors
M : int
Number of retained eigen values
"""
C = covariance_matrix(krg, x_grid, conditioned=False)
W_sqrt = np.sqrt(np.diagflat(weights_grid))
B = W_sqrt.dot(C).dot(W_sqrt)
eig_val, eig_vec = np.linalg.eigh(B)
ind = (-eig_val).argsort()
eig_val = eig_val[ind]
eig_vec = eig_vec[:, ind]
crit = 1.0 - np.cumsum(eig_val) / eig_val.sum()
M = int(np.argwhere(crit > 10 ** (-8))[-1]) + 1
return eig_val, eig_vec, M
def evaluate_eigen_function(krg, X, eig_val, eig_vec, x_grid, weights_grid, M):
"""
This function evaluates eigen functions of Karhunen-Loève decomposition
with Nyström method on X.
Parameters
----------
krg : SMT KRG
SMT kriging model already trained
X : array_like
Array with shape (n_eval, n_feature) containing point(s) on which the eigen
function will be evaluated
eig_val : array_like
Eigen values
eig_vec : array_like
Eigen Vectors
x_grid : array_like
Array with shape (n_point, n_features) composed of point(s) on which the
integral will be computed
weights_grid : array_like
Array with shape (n_point, 1) composed of weigths used to computed the
integral
M : int
Number of retained eigen values
Returns
-------
phi : array_like
Value of retained eigen functions on X
"""
W_sqrt = np.sqrt(np.diagflat(weights_grid))
X_ = np.concatenate((krg.X_train, X), axis=0)
n_X = X_.shape[0]
n_grid = x_grid.shape[0]
X_cont = (X_ - krg.X_offset) / krg.X_scale
X_grid_cont = (x_grid - krg.X_offset) / krg.X_scale
cross_d = (
np.tile(X_cont, (n_grid, 1)) - X_grid_cont.repeat(repeats=n_X, axis=0)
) ** 2
C = krg.optimal_par["sigma2"] * krg._correlation_types[krg.options["corr"]](
krg.optimal_theta, cross_d
).reshape(n_grid, n_X)
U = np.diagflat(np.sqrt(1 / eig_val[:M]))
phi = U.dot(eig_vec[:, :M].T.dot(W_sqrt).dot(C))
return phi
def sample_eigen(krg, X, eig_val, eig_vec, x_grid, weights_grid, M, n_traj):
"""
This function samples trajectories of gaussian process with Karhunen-Loève decomposition
with Nyström method.
Parameters
----------
krg : SMT KRG
SMT kriging model already trained
X : array_like
Array with shape (n_eval, n_feature) containing point(s) on which the eigen
functions will be evaluated and on which the trajectories will be sampled
eig_val : array_like
Eigen values
eig_vec : array_like
Eigen Vectors
x_grid : array_like
Array with shape (n_point, n_features) composed of point(s) on which the
integral will be computed
weights_grid : array_like
Array with shape (n_point, 1) composed of weigths used to computed the
integral
M : int
Number of retained eigen values
n_traj : int
Number trajectories
Returns
-------
traj : array_like
Trajectories sampled on X
"""
phi = evaluate_eigen_function(krg, X, eig_val, eig_vec, x_grid, weights_grid, M)
sample = np.random.randn(n_traj, M).dot(phi)
Y = sample[:, : krg.X_train.shape[0]]
Y_mean, Y_std = Y.mean(axis=1).reshape(-1, 1), Y.std(axis=1).reshape(-1, 1)
Y_norma = ((Y - Y_mean) / Y_std).T
C, Q, G, Ft = (
krg.optimal_par["C"],
krg.optimal_par["Q"],
krg.optimal_par["G"],
krg.optimal_par["Ft"],
)
Yt = linalg.solve_triangular(C, Y_norma, lower=True)
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
rho = Yt - np.dot(Ft, beta)
gamma = linalg.solve_triangular(C.T, rho)
X_cont = (X - krg.X_offset) / krg.X_scale
dx = differences(X_cont, Y=krg.X_norma.copy())
d = krg._componentwise_distance(dx)
r = krg._correlation_types[krg.options["corr"]](krg.optimal_theta, d).reshape(
X.shape[0], krg.nt
)
y = np.zeros(X.shape[0])
f = krg._regression_types[krg.options["poly"]](X_cont)
y_ = np.dot(f, beta) + np.dot(r, gamma)
y = Y_mean.flatten() + y_ * Y_std.flatten()
mean = krg._predict_values(X).reshape(-1, 1) - y
traj = mean + sample[:, krg.X_train.shape[0] :].T
return traj
| 12,780 | 31.113065 | 107 | py |
smt | smt-master/smt/utils/caching.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
try:
import cPickle as pickle
except:
import pickle
import hashlib
import contextlib
@contextlib.contextmanager
def cached_operation(inputs_dict, data_dir, desc=""):
"""
Context manager for an operation that may be cached.
Arguments
---------
inputs_dict : dict
Dictionary containing the inputs of the operation.
data_dir : None or str
Directory containing the cached data files; if None, do not load or save.
desc : str
Optional descriptive prefix for the filename.
Yields
------
outputs_dict : dict
Dictionary containing the outputs of the operation.
"""
checksum = _caching_checksum(inputs_dict)
filename = "%s/%s_%s.dat" % (data_dir, desc, checksum)
try:
with open(filename, "rb") as f:
outputs_dict = pickle.load(f)
load_successful = True
except:
outputs_dict = {}
load_successful = False
yield outputs_dict
if not load_successful and data_dir:
with open(filename, "wb") as f:
pickle.dump(outputs_dict, f)
def _caching_checksum(obj):
"""
Compute the hex string checksum of the given object.
Arguments
---------
obj : object
Object to compute the checksum for; normally a dictionary.
Returns
-------
str
Hexadecimal string checksum that was computed.
"""
try:
tmp = obj["self"].printer
obj["self"].printer = None
except:
pass
self_pkl = pickle.dumps(obj)
checksum = hashlib.md5(self_pkl).hexdigest()
try:
obj["self"].printer = tmp
except:
pass
return checksum
| 1,784 | 21.3125 | 81 | py |
smt | smt-master/smt/utils/design_space.py | """
Author: Jasper Bussemaker <jasper.bussemaker@dlr.de>
This package is distributed under New BSD license.
"""
import numpy as np
from typing import List, Union, Tuple, Sequence, Optional
from smt.sampling_methods import LHS
def ensure_design_space(xt=None, xlimits=None, design_space=None) -> "BaseDesignSpace":
"""Interface to turn legacy input formats into a DesignSpace"""
if design_space is not None and isinstance(design_space, BaseDesignSpace):
return design_space
if xlimits is not None:
return DesignSpace(xlimits)
if xt is not None:
return DesignSpace([[0, 1]] * xt.shape[1])
raise ValueError("Nothing defined that could be interpreted as a design space!")
class DesignVariable:
"""Base class for defining a design variable"""
upper: Union[float, int]
lower: Union[float, int]
def get_typename(self):
return self.__class__.__name__
def get_limits(self) -> Union[list, tuple]:
raise NotImplementedError
def __str__(self):
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
class FloatVariable(DesignVariable):
"""A continuous design variable, varying between its lower and upper bounds"""
def __init__(self, lower: float, upper: float):
if upper <= lower:
raise ValueError(
f"Upper bound should be higher than lower bound: {upper} <= {lower}"
)
self.lower = lower
self.upper = upper
def get_limits(self) -> Tuple[float, float]:
return self.lower, self.upper
def __str__(self):
return f"Float ({self.lower}, {self.upper})"
def __repr__(self):
return f"{self.get_typename()}({self.lower}, {self.upper})"
class IntegerVariable(DesignVariable):
"""An integer variable that can take any integer value between the bounds (inclusive)"""
def __init__(self, lower: int, upper: int):
if upper <= lower:
raise ValueError(
f"Upper bound should be higher than lower bound: {upper} <= {lower}"
)
self.lower = lower
self.upper = upper
def get_limits(self) -> Tuple[int, int]:
return self.lower, self.upper
def __str__(self):
return f"Int ({self.lower}, {self.upper})"
def __repr__(self):
return f"{self.get_typename()}({self.lower}, {self.upper})"
class OrdinalVariable(DesignVariable):
"""An ordinal variable that can take any of the given value, and where order between the values matters"""
def __init__(self, values: List[Union[str, int, float]]):
if len(values) < 2:
raise ValueError(f"There should at least be 2 values: {values}")
self.values = values
@property
def lower(self) -> int:
return 0
@property
def upper(self) -> int:
return len(self.values) - 1
def get_limits(self) -> List[str]:
# We convert to integer strings for compatibility reasons
return [str(i) for i in range(len(self.values))]
def __str__(self):
return f"Ord {self.values}"
def __repr__(self):
return f"{self.get_typename()}({self.values})"
class CategoricalVariable(DesignVariable):
"""A categorical variable that can take any of the given values, and where order does not matter"""
def __init__(self, values: List[Union[str, int, float]]):
if len(values) < 2:
raise ValueError(f"There should at least be 2 values: {values}")
self.values = values
@property
def lower(self) -> int:
return 0
@property
def upper(self) -> int:
return len(self.values) - 1
@property
def n_values(self):
return len(self.values)
def get_limits(self) -> List[Union[str, int, float]]:
# We convert to strings for compatibility reasons
return [str(value) for value in self.values]
def __str__(self):
return f"Cat {self.values}"
def __repr__(self):
return f"{self.get_typename()}({self.values})"
class BaseDesignSpace:
"""
Interface for specifying (hierarchical) design spaces.
This class itself only specifies the functionality that any design space definition should implement:
- a way to specify the design variables, their types, and their bounds or options
- a way to correct a set of design vectors such that they satisfy all design space hierarchy constraints
- a way to query which design variables are acting for a set of design vectors
- a way to impute a set of design vectors such that non-acting design variables are assigned some default value
- a way to sample n valid design vectors from the design space
If you want to actually define a design space, use the `DesignSpace` class!
Note that the correction, querying, and imputation mechanisms should all be implemented in one function
(`correct_get_acting`), as usually these operations are tightly related.
"""
def __init__(self, design_variables: List[DesignVariable] = None):
self._design_variables = design_variables
self._is_cat_mask = None
self._is_conditionally_acting_mask = None
@property
def design_variables(self) -> List[DesignVariable]:
if self._design_variables is None:
self._design_variables = dvs = self._get_design_variables()
if dvs is None:
raise RuntimeError(
"Design space should either specify the design variables upon initialization "
"or as output from _get_design_variables!"
)
return self._design_variables
@property
def is_cat_mask(self) -> np.ndarray:
"""Boolean mask specifying for each design variable whether it is a categorical variable"""
if self._is_cat_mask is None:
self._is_cat_mask = np.array(
[isinstance(dv, CategoricalVariable) for dv in self.design_variables]
)
return self._is_cat_mask
@property
def is_all_cont(self) -> bool:
"""Whether or not the space is continuous"""
is_continuous = all(
isinstance(dv, FloatVariable) for dv in self.design_variables
)
return is_continuous
@property
def is_conditionally_acting(self) -> np.ndarray:
"""Boolean mask specifying for each design variable whether it is conditionally acting (can be non-acting)"""
if self._is_conditionally_acting_mask is None:
self._is_conditionally_acting_mask = self._is_conditionally_acting()
return self._is_conditionally_acting_mask
@property
def n_dv(self) -> int:
"""Get the number of design variables"""
return len(self.design_variables)
def correct_get_acting(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Correct the given matrix of design vectors and return the corrected vectors and the is_acting matrix.
It is automatically detected whether input is provided in unfolded space or not.
Parameters
----------
x: np.ndarray [n_obs, dim]
- Input variables
Returns
-------
x_corrected: np.ndarray [n_obs, dim]
- Corrected and imputed input variables
is_acting: np.ndarray [n_obs, dim]
- Boolean matrix specifying for each variable whether it is acting or non-acting
"""
# Detect whether input is provided in unfolded space
x = np.atleast_2d(x)
if x.shape[1] == self.n_dv:
x_is_unfolded = False
elif x.shape[1] == self._get_n_dim_unfolded():
x_is_unfolded = True
else:
raise ValueError(f"Incorrect shape, expecting {self.n_dv} columns!")
# If needed, fold before correcting
if x_is_unfolded:
x, _ = self.fold_x(x)
# Correct and get the is_acting matrix
x_corrected, is_acting = self._correct_get_acting(x)
# Check conditionally-acting status
if np.any(~is_acting[:, ~self.is_conditionally_acting]):
raise RuntimeError("Unconditionally acting variables cannot be non-acting!")
# Unfold if needed
if x_is_unfolded:
x_corrected, is_acting = self.unfold_x(x_corrected, is_acting)
return x_corrected, is_acting
def decode_values(
self, x: np.ndarray, i_dv: int = None
) -> List[Union[str, int, float, list]]:
"""
Return decoded values: converts ordinal and categorical back to their original values.
If i_dv is given, decoding is done for one specific design variable only.
If i_dv=None, decoding will be done for all design variables: 1d input is interpreted as a design vector,
2d input is interpreted as a set of design vectors.
"""
def _decode_dv(x_encoded: np.ndarray, i_dv_decode):
dv = self.design_variables[i_dv_decode]
if isinstance(dv, (OrdinalVariable, CategoricalVariable)):
values = dv.values
decoded_values = [values[int(x_ij)] for x_ij in x_encoded]
return decoded_values
# No need to decode integer or float variables
return list(x_encoded)
# Decode one design variable
if i_dv is not None:
if len(x.shape) == 2:
x_i = x[:, i_dv]
elif len(x.shape) == 1:
x_i = x
else:
raise ValueError("Expected either 1 or 2-dimensional matrix!")
# No need to decode for integer or float variable
return _decode_dv(x_i, i_dv_decode=i_dv)
# Decode design vectors
n_dv = self.n_dv
is_1d = len(x.shape) == 1
x_mat = np.atleast_2d(x)
if x_mat.shape[1] != n_dv:
raise ValueError(
f"Incorrect number of inputs, expected {n_dv} design variables, received {x_mat.shape[1]}"
)
decoded_des_vars = [_decode_dv(x_mat[:, i], i_dv_decode=i) for i in range(n_dv)]
decoded_des_vectors = [
[decoded_des_vars[i][ix] for i in range(n_dv)]
for ix in range(x_mat.shape[0])
]
return decoded_des_vectors[0] if is_1d else decoded_des_vectors
def sample_valid_x(self, n: int, unfolded=False) -> Tuple[np.ndarray, np.ndarray]:
"""
Sample n design vectors and additionally return the is_acting matrix.
Parameters
----------
n: int
- Number of samples to generate
unfolded: bool
- Whether to return the samples in unfolded space (each categorical level gets its own dimension)
Returns
-------
x: np.ndarray [n, dim]
- Valid design vectors
is_acting: np.ndarray [n, dim]
- Boolean matrix specifying for each variable whether it is acting or non-acting
"""
# Sample from the design space
x, is_acting = self._sample_valid_x(n)
# Check conditionally-acting status
if np.any(~is_acting[:, ~self.is_conditionally_acting]):
raise RuntimeError("Unconditionally acting variables cannot be non-acting!")
# Unfold if needed
if unfolded:
x, is_acting = self.unfold_x(x, is_acting)
return x, is_acting
def get_x_limits(self) -> list:
"""Returns the variable limit definitions in SMT < 2.0 style"""
return [dv.get_limits() for dv in self.design_variables]
def get_num_bounds(self):
"""
Get bounds for the design space.
Returns
-------
np.ndarray [nx, 2]
- Bounds of each dimension
"""
return np.array([(dv.lower, dv.upper) for dv in self.design_variables])
def get_unfolded_num_bounds(self):
"""
Get bounds for the unfolded continuous space.
Returns
-------
np.ndarray [nx cont, 2]
- Bounds of each dimension where limits for categorical variables are expanded to [0, 1]
"""
unfolded_x_limits = []
for dv in self.design_variables:
if isinstance(dv, CategoricalVariable):
unfolded_x_limits += [[0, 1]] * dv.n_values
elif isinstance(dv, OrdinalVariable):
# Note that this interpretation is slightly different from the original mixed_integer implementation in
# smt: we simply map ordinal values to integers, instead of converting them to integer literals
# This ensures that each ordinal value gets sampled evenly, also if the values themselves represent
# unevenly spaced (e.g. log-spaced) values
unfolded_x_limits.append([dv.lower, dv.upper])
else:
unfolded_x_limits.append(dv.get_limits())
return np.array(unfolded_x_limits).astype(float)
def fold_x(
self, x: np.ndarray, is_acting: np.ndarray = None
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Fold x and optionally is_acting. Folding reverses the one-hot encoding of categorical variables applied by
unfolding.
Parameters
----------
x: np.ndarray [n, dim_unfolded]
- Unfolded samples
is_acting: np.ndarray [n, dim_unfolded]
- Boolean matrix specifying for each unfolded variable whether it is acting or non-acting
Returns
-------
x_folded: np.ndarray [n, dim]
- Folded samples
is_acting_folded: np.ndarray [n, dim]
- (Optional) boolean matrix specifying for each folded variable whether it is acting or non-acting
"""
# Get number of unfolded dimension
x = np.atleast_2d(x)
x_folded = np.zeros((x.shape[0], len(self.design_variables)))
is_acting_folded = (
np.ones(x_folded.shape, dtype=bool) if is_acting is not None else None
)
i_x_unfold = 0
for i, dv in enumerate(self.design_variables):
if isinstance(dv, CategoricalVariable):
n_dim_cat = dv.n_values
# Categorical values are folded by reversed one-hot encoding:
# [[1, 0, 0], [0, 1, 0], [0, 0, 1]] --> [0, 1, 2].T
x_cat_unfolded = x[:, i_x_unfold : i_x_unfold + n_dim_cat]
value_index = np.argmax(x_cat_unfolded, axis=1)
x_folded[:, i] = value_index
# The is_acting matrix is repeated column-wise, so we can just take the first column
if is_acting is not None:
is_acting_folded[:, i] = is_acting[:, i_x_unfold]
i_x_unfold += n_dim_cat
else:
x_folded[:, i] = x[:, i_x_unfold]
if is_acting is not None:
is_acting_folded[:, i] = is_acting[:, i_x_unfold]
i_x_unfold += 1
return x_folded, is_acting_folded
def unfold_x(
self, x: np.ndarray, is_acting: np.ndarray = None
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Unfold x and optionally is_acting. Unfolding creates one extra dimension for each categorical variable using
one-hot encoding.
Parameters
----------
x: np.ndarray [n, dim]
- Folded samples
is_acting: np.ndarray [n, dim]
- Boolean matrix specifying for each variable whether it is acting or non-acting
Returns
-------
x_unfolded: np.ndarray [n, dim_unfolded]
- Unfolded samples
is_acting_unfolded: np.ndarray [n, dim_unfolded]
- (Optional) boolean matrix specifying for each unfolded variable whether it is acting or non-acting
"""
# Get number of unfolded dimension
n_dim_unfolded = self._get_n_dim_unfolded()
x = np.atleast_2d(x)
x_unfolded = np.zeros((x.shape[0], n_dim_unfolded))
is_acting_unfolded = (
np.ones(x_unfolded.shape, dtype=bool) if is_acting is not None else None
)
i_x_unfold = 0
for i, dv in enumerate(self.design_variables):
if isinstance(dv, CategoricalVariable):
n_dim_cat = dv.n_values
x_cat = x_unfolded[:, i_x_unfold : i_x_unfold + n_dim_cat]
# Categorical values are unfolded by one-hot encoding:
# [0, 1, 2].T --> [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
x_i_int = x[:, i].astype(int)
for i_level in range(n_dim_cat):
has_value_mask = x_i_int == i_level
x_cat[has_value_mask, i_level] = 1
# The is_acting matrix is simply repeated column-wise
if is_acting is not None:
is_acting_unfolded[
:, i_x_unfold : i_x_unfold + n_dim_cat
] = np.tile(is_acting[:, [i]], (1, n_dim_cat))
i_x_unfold += n_dim_cat
else:
x_unfolded[:, i_x_unfold] = x[:, i]
if is_acting is not None:
is_acting_unfolded[:, i_x_unfold] = is_acting[:, i]
i_x_unfold += 1
return x_unfolded, is_acting_unfolded
def _get_n_dim_unfolded(self) -> int:
return sum(
[
dv.n_values if isinstance(dv, CategoricalVariable) else 1
for dv in self.design_variables
]
)
@staticmethod
def _round_equally_distributed(x_cont, lower: int, upper: int):
"""
To ensure equal distribution of continuous values to discrete values, we first stretch-out the continuous values
to extend to 0.5 beyond the integer limits and then round. This ensures that the values at the limits get a
large-enough share of the continuous values.
"""
x_cont[x_cont < lower] = lower
x_cont[x_cont > upper] = upper
diff = upper - lower
x_stretched = (x_cont - lower) * ((diff + 0.9999) / (diff + 1e-16)) - 0.5
return np.round(x_stretched) + lower
"""IMPLEMENT FUNCTIONS BELOW"""
def _get_design_variables(self) -> List[DesignVariable]:
"""Return the design variables defined in this design space if not provided upon initialization of the class"""
def _is_conditionally_acting(self) -> np.ndarray:
"""
Return for each design variable whether it is conditionally acting or not. A design variable is conditionally
acting if it MAY be non-acting.
Returns
-------
is_conditionally_acting: np.ndarray [dim]
- Boolean vector specifying for each design variable whether it is conditionally acting
"""
raise NotImplementedError
def _correct_get_acting(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Correct the given matrix of design vectors and return the corrected vectors and the is_acting matrix.
Parameters
----------
x: np.ndarray [n_obs, dim]
- Input variables
Returns
-------
x_corrected: np.ndarray [n_obs, dim]
- Corrected and imputed input variables
is_acting: np.ndarray [n_obs, dim]
- Boolean matrix specifying for each variable whether it is acting or non-acting
"""
raise NotImplementedError
def _sample_valid_x(self, n: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Sample n design vectors and additionally return the is_acting matrix.
Returns
----------
x: np.ndarray [n, dim]
- Valid design vectors
is_acting: np.ndarray [n, dim]
- Boolean matrix specifying for each variable whether it is acting or non-acting
"""
raise NotImplementedError
def __str__(self):
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
VarValueType = Union[int, str, List[Union[int, str]]]
def raise_config_space():
raise RuntimeError("Dependencies are not installed, run: pip install smt[cs]")
class DesignSpace(BaseDesignSpace):
"""
Class for defining a (hierarchical) design space by defining design variables, and defining decreed variables
(optional).
Numerical bounds can be requested using `get_num_bounds()`.
If needed, it is possible to get the legacy SMT < 2.0 `xlimits` format using `get_x_limits()`.
Parameters
----------
design_variables: list[DesignVariable]
- The list of design variables: FloatVariable, IntegerVariable, OrdinalVariable, or CategoricalVariable
Examples
--------
Instantiate the design space with all its design variables:
>>> print("toto")
>>> from smt.utils.design_space import DesignSpace, FloatVariable, IntegerVariable, OrdinalVariable, CategoricalVariable
>>> ds = DesignSpace([
>>> CategoricalVariable(['A', 'B']), # x0 categorical: A or B; order is not relevant
>>> OrdinalVariable(['C', 'D', 'E']), # x1 ordinal: C, D or E; order is relevant
>>> IntegerVariable(0, 2), # x2 integer between 0 and 2 (inclusive): 0, 1, 2
>>> FloatVariable(0, 1), # c3 continuous between 0 and 1
>>> ])
>>> assert len(ds.design_variables) == 4
You can define decreed variables (conditional activation):
>>> ds.declare_decreed_var(decreed_var=1, meta_var=0, meta_value='A') # Activate x1 if x0 == A
After defining everything correctly, you can then use the design space object to correct design vectors and get
information about which design variables are acting:
>>> x_corr, is_acting = ds.correct_get_acting(np.array([
>>> [0, 0, 2, .25],
>>> [1, 2, 1, .75],
>>> ]))
>>> assert np.all(x_corr == np.array([
>>> [0, 0, 2, .25],
>>> [1, 0, 1, .75],
>>> ]))
>>> assert np.all(is_acting == np.array([
>>> [True, True, True, True],
>>> [True, False, True, True], # x1 is not acting if x0 != A
>>> ]))
It is also possible to randomly sample design vectors conforming to the constraints:
>>> x_sampled, is_acting_sampled = ds.sample_valid_x(100)
You can also instantiate a purely-continuous design space from bounds directly:
>>> continuous_design_space = DesignSpace([(0, 1), (0, 2), (.5, 5.5)])
>>> assert continuous_design_space.n_dv == 3
If needed, it is possible to get the legacy design space definition format:
>>> xlimits = ds.get_x_limits()
>>> cont_bounds = ds.get_num_bounds()
>>> unfolded_cont_bounds = ds.get_unfolded_num_bounds()
"""
def __init__(
self, design_variables: Union[List[DesignVariable], list, np.ndarray], seed=None
):
# Assume float variable bounds as inputs
def _is_num(val):
try:
float(val)
return True
except ValueError:
return False
if len(design_variables) > 0 and not isinstance(
design_variables[0], DesignVariable
):
converted_dvs = []
for bounds in design_variables:
if len(bounds) != 2 or not _is_num(bounds[0]) or not _is_num(bounds[1]):
raise RuntimeError(
f"Expecting either a list of DesignVariable objects or float variable "
f"bounds! Unrecognized: {bounds!r}"
)
converted_dvs.append(FloatVariable(bounds[0], bounds[1]))
design_variables = converted_dvs
self.seed = seed # For testing
self._meta_vars = (
{}
) # dict[int, dict[any, list[int]]]: {meta_var_idx: {value: [decreed_var_idx, ...], ...}, ...}
self._is_decreed = np.zeros((len(design_variables),), dtype=bool)
super().__init__(design_variables)
def declare_decreed_var(
self, decreed_var: int, meta_var: int, meta_value: VarValueType
):
"""
Define a conditional (decreed) variable to be active when the meta variable has (one of) the provided values.
Parameters
----------
decreed_var: int
- Index of the conditional variable (the variable that is conditionally active)
meta_var: int
- Index of the meta variable (the variable that determines whether the conditional var is active)
meta_value: int | str | list[int|str]
- The value or list of values that the meta variable can have to activate the decreed var
"""
# Variables cannot be both meta and decreed at the same time
if self._is_decreed[meta_var]:
raise RuntimeError(
f"Variable cannot be both meta and decreed ({meta_var})!"
)
# Variables can only be decreed by one meta var
if self._is_decreed[decreed_var]:
raise RuntimeError(f"Variable is already decreed: {decreed_var}")
# Define meta-decreed relationship
if meta_var not in self._meta_vars:
self._meta_vars[meta_var] = {}
meta_var_obj = self.design_variables[meta_var]
for value in meta_value if isinstance(meta_value, Sequence) else [meta_value]:
encoded_value = value
if isinstance(meta_var_obj, (OrdinalVariable, CategoricalVariable)):
if value in meta_var_obj.values:
encoded_value = meta_var_obj.values.index(value)
if encoded_value not in self._meta_vars[meta_var]:
self._meta_vars[meta_var][encoded_value] = []
self._meta_vars[meta_var][encoded_value].append(decreed_var)
# Mark as decreed (conditionally acting)
self._is_decreed[decreed_var] = True
def _is_conditionally_acting(self) -> np.ndarray:
# Decreed variables are the conditionally acting variables
return self._is_decreed
def _correct_get_acting(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Correct and impute design vectors"""
# Simplified implementation
# Correct discrete variables
x_corr = x.copy()
self._normalize_x(x_corr)
# Determine which variables are acting
is_acting = np.ones(x_corr.shape, dtype=bool)
is_acting[:, self._is_decreed] = False
for i, xi in enumerate(x_corr):
for i_meta, decrees in self._meta_vars.items():
meta_var_value = xi[i_meta]
if meta_var_value in decrees:
i_decreed_vars = decrees[meta_var_value]
is_acting[i, i_decreed_vars] = True
# Impute non-acting variables
self._impute_non_acting(x_corr, is_acting)
return x_corr, is_acting
def _sample_valid_x(self, n: int) -> Tuple[np.ndarray, np.ndarray]:
"""Sample design vectors"""
# Simplified implementation: sample design vectors in unfolded space
x_limits_unfolded = self.get_unfolded_num_bounds()
sampler = LHS(xlimits=x_limits_unfolded, random_state=self.seed)
x = sampler(n)
# Cast to discrete and fold
self._normalize_x(x)
x, _ = self.fold_x(x)
# Get acting information and impute
return self.correct_get_acting(x)
def _impute_non_acting(self, x: np.ndarray, is_acting: np.ndarray):
for i, dv in enumerate(self.design_variables):
if isinstance(dv, FloatVariable):
# Impute continuous variables to the mid of their bounds
x[~is_acting[:, i], i] = 0.5 * (dv.upper - dv.lower)
else:
# Impute discrete variables to their lower bounds
lower = 0
if isinstance(dv, (IntegerVariable, OrdinalVariable)):
lower = dv.lower
x[~is_acting[:, i], i] = lower
def _normalize_x(self, x: np.ndarray):
for i, dv in enumerate(self.design_variables):
if isinstance(dv, IntegerVariable):
x[:, i] = self._round_equally_distributed(x[:, i], dv.lower, dv.upper)
elif isinstance(dv, (OrdinalVariable, CategoricalVariable)):
# To ensure equal distribution of continuous values to discrete values, we first stretch-out the
# continuous values to extend to 0.5 beyond the integer limits and then round. This ensures that the
# values at the limits get a large-enough share of the continuous values
x[:, i] = self._round_equally_distributed(x[:, i], dv.lower, dv.upper)
def __str__(self):
dvs = "\n".join([f"x{i}: {dv!s}" for i, dv in enumerate(self.design_variables)])
return f"Design space:\n{dvs}"
def __repr__(self):
return f"{self.__class__.__name__}({self.design_variables!r})"
| 28,950 | 35.786531 | 124 | py |
smt | smt-master/smt/utils/test/test_design_space.py | """
Author: Jasper Bussemaker <jasper.bussemaker@dlr.de>
"""
import unittest
import itertools
import numpy as np
from smt.sampling_methods import LHS
from smt.utils.design_space import (
FloatVariable,
IntegerVariable,
OrdinalVariable,
CategoricalVariable,
BaseDesignSpace,
DesignSpace,
)
class Test(unittest.TestCase):
def test_design_variables(self):
with self.assertRaises(ValueError):
FloatVariable(1, 0)
float_var = FloatVariable(0, 1)
self.assertEqual(float_var.lower, 0)
self.assertEqual(float_var.upper, 1)
self.assertEqual(float_var.get_limits(), (0, 1))
self.assertTrue(str(float_var))
self.assertTrue(repr(float_var))
self.assertEqual("FloatVariable", float_var.get_typename())
with self.assertRaises(ValueError):
IntegerVariable(1, 0)
int_var = IntegerVariable(0, 1)
self.assertEqual(int_var.lower, 0)
self.assertEqual(int_var.upper, 1)
self.assertEqual(int_var.get_limits(), (0, 1))
self.assertTrue(str(int_var))
self.assertTrue(repr(int_var))
self.assertEqual("IntegerVariable", int_var.get_typename())
with self.assertRaises(ValueError):
OrdinalVariable([])
with self.assertRaises(ValueError):
OrdinalVariable(["1"])
ord_var = OrdinalVariable(["A", "B", "C"])
self.assertEqual(ord_var.values, ["A", "B", "C"])
self.assertEqual(ord_var.get_limits(), ["0", "1", "2"])
self.assertEqual(ord_var.lower, 0)
self.assertEqual(ord_var.upper, 2)
self.assertTrue(str(ord_var))
self.assertTrue(repr(ord_var))
self.assertEqual("OrdinalVariable", ord_var.get_typename())
with self.assertRaises(ValueError):
CategoricalVariable([])
with self.assertRaises(ValueError):
CategoricalVariable(["A"])
cat_var = CategoricalVariable(["A", "B", "C"])
self.assertEqual(cat_var.values, ["A", "B", "C"])
self.assertEqual(cat_var.get_limits(), ["A", "B", "C"])
self.assertEqual(cat_var.lower, 0)
self.assertEqual(cat_var.upper, 2)
self.assertTrue(str(cat_var))
self.assertTrue(repr(cat_var))
self.assertEqual("CategoricalVariable", cat_var.get_typename())
def test_rounding(self):
ds = BaseDesignSpace(
[
IntegerVariable(0, 5),
IntegerVariable(-1, 1),
IntegerVariable(2, 4),
]
)
x = np.array(
list(
itertools.product(
np.linspace(0, 5, 20), np.linspace(-1, 1, 20), np.linspace(2, 4, 20)
)
)
)
for i, dv in enumerate(ds.design_variables):
self.assertIsInstance(dv, IntegerVariable)
x[:, i] = ds._round_equally_distributed(x[:, i], dv.lower, dv.upper)
x1, x1_counts = np.unique(x[:, 0], return_counts=True)
self.assertTrue(np.all(x1 == [0, 1, 2, 3, 4, 5]))
x1_counts = x1_counts / np.sum(x1_counts)
self.assertTrue(np.all(np.abs(x1_counts - np.mean(x1_counts)) <= 0.05))
x2, x2_counts = np.unique(x[:, 1], return_counts=True)
self.assertTrue(np.all(x2 == [-1, 0, 1]))
x2_counts = x2_counts / np.sum(x2_counts)
self.assertTrue(np.all(np.abs(x2_counts - np.mean(x2_counts)) <= 0.05))
x3, x3_counts = np.unique(x[:, 2], return_counts=True)
self.assertTrue(np.all(x3 == [2, 3, 4]))
x3_counts = x3_counts / np.sum(x3_counts)
self.assertTrue(np.all(np.abs(x3_counts - np.mean(x3_counts)) <= 0.05))
def test_base_design_space(self):
ds = BaseDesignSpace(
[
CategoricalVariable(["A", "B"]),
IntegerVariable(0, 3),
FloatVariable(-0.5, 0.5),
]
)
self.assertEqual(ds.get_x_limits(), [["A", "B"], (0, 3), (-0.5, 0.5)])
self.assertTrue(np.all(ds.get_num_bounds() == [[0, 1], [0, 3], [-0.5, 0.5]]))
self.assertTrue(
np.all(
ds.get_unfolded_num_bounds() == [[0, 1], [0, 1], [0, 3], [-0.5, 0.5]]
)
)
x = np.array(
[
[0, 0, 0],
[1, 2, 0.5],
[0, 3, 0.5],
]
)
is_acting = np.array(
[
[True, True, False],
[True, False, True],
[False, True, True],
]
)
x_unfolded, is_acting_unfolded = ds.unfold_x(x, is_acting)
self.assertTrue(
np.all(
x_unfolded
== [
[1, 0, 0, 0],
[0, 1, 2, 0.5],
[1, 0, 3, 0.5],
]
)
)
self.assertEqual(is_acting_unfolded.dtype, bool)
self.assertTrue(
np.all(
is_acting_unfolded
== [
[True, True, True, False],
[True, True, False, True],
[False, False, True, True],
]
)
)
x_folded, is_acting_folded = ds.fold_x(x_unfolded, is_acting_unfolded)
self.assertTrue(np.all(x_folded == x))
self.assertTrue(np.all(is_acting_folded == is_acting))
def test_create_design_space(self):
DesignSpace([FloatVariable(0, 1)])
def test_design_space(self):
ds = DesignSpace(
[
CategoricalVariable(["A", "B", "C"]),
OrdinalVariable(["E", "F"]),
IntegerVariable(-1, 2),
FloatVariable(0.5, 1.5),
],
seed=42,
)
self.assertEqual(len(ds.design_variables), 4)
self.assertTrue(np.all(~ds.is_conditionally_acting))
ds.sample_valid_x(3)
x = np.array(
[
[1, 0, 0, 0.834],
[2, 0, -1, 0.6434],
[2, 0, 0, 1.151],
]
)
x, is_acting = ds.correct_get_acting(x)
self.assertEqual(x.shape, (3, 4))
self.assertEqual(is_acting.shape, x.shape)
self.assertEqual(ds.decode_values(x, i_dv=0), ["B", "C", "C"])
self.assertEqual(ds.decode_values(x, i_dv=1), ["E", "E", "E"])
self.assertEqual(ds.decode_values(np.array([0, 1, 2]), i_dv=0), ["A", "B", "C"])
self.assertEqual(ds.decode_values(np.array([0, 1]), i_dv=1), ["E", "F"])
self.assertEqual(ds.decode_values(x[0, :]), ["B", "E", 0, 0.834])
self.assertEqual(ds.decode_values(x[[0], :]), [["B", "E", 0, 0.834]])
self.assertEqual(
ds.decode_values(x),
[
["B", "E", 0, 0.834],
["C", "E", -1, 0.6434],
["C", "E", 0, 1.151],
],
)
x_corr, is_act_corr = ds.correct_get_acting(x)
self.assertTrue(np.all(x_corr == x))
self.assertTrue(np.all(is_act_corr == is_acting))
x_sampled_externally = LHS(
xlimits=ds.get_unfolded_num_bounds(), criterion="ese", random_state=42
)(3)
x_corr, is_acting_corr = ds.correct_get_acting(x_sampled_externally)
x_corr, is_acting_corr = ds.fold_x(x_corr, is_acting_corr)
self.assertTrue(
np.all(
np.abs(
x_corr
- np.array(
[
[2, 0, -1, 1.342],
[0, 1, 0, 0.552],
[1, 1, 2, 1.157],
]
)
)
< 1e-3
)
)
self.assertTrue(np.all(is_acting_corr))
x_unfolded, is_acting_unfolded = ds.sample_valid_x(3, unfolded=True)
self.assertEqual(x_unfolded.shape, (3, 6))
self.assertTrue(str(ds))
self.assertTrue(repr(ds))
ds.correct_get_acting(np.array([[0, 0, 0, 1.6]]))
def test_float_design_space(self):
ds = DesignSpace([(0, 1), (0.5, 2.5), (-0.4, 10)])
assert ds.n_dv == 3
assert all(isinstance(dv, FloatVariable) for dv in ds.design_variables)
assert np.all(ds.get_num_bounds() == np.array([[0, 1], [0.5, 2.5], [-0.4, 10]]))
ds = DesignSpace([[0, 1], [0.5, 2.5], [-0.4, 10]])
assert ds.n_dv == 3
assert all(isinstance(dv, FloatVariable) for dv in ds.design_variables)
assert np.all(ds.get_num_bounds() == np.array([[0, 1], [0.5, 2.5], [-0.4, 10]]))
ds = DesignSpace(np.array([[0, 1], [0.5, 2.5], [-0.4, 10]]))
assert ds.n_dv == 3
assert all(isinstance(dv, FloatVariable) for dv in ds.design_variables)
assert np.all(ds.get_num_bounds() == np.array([[0, 1], [0.5, 2.5], [-0.4, 10]]))
def test_design_space_hierarchical(self):
ds = DesignSpace(
[
CategoricalVariable(["A", "B", "C"]), # x0
CategoricalVariable(["E", "F"]), # x1
IntegerVariable(0, 1), # x2
FloatVariable(0, 1), # x3
],
seed=42,
)
ds.declare_decreed_var(
decreed_var=3, meta_var=0, meta_value="A"
) # Activate x3 if x0 == A
x_cartesian = np.array(
list(itertools.product([0, 1, 2], [0, 1], [0, 1], [0.25, 0.75]))
)
self.assertEqual(x_cartesian.shape, (24, 4))
self.assertTrue(
np.all(ds.is_conditionally_acting == [False, False, False, True])
)
x, is_acting = ds.correct_get_acting(x_cartesian)
_, is_unique = np.unique(x, axis=0, return_index=True)
self.assertEqual(len(is_unique), 16)
self.assertTrue(
np.all(
x[is_unique, :]
== np.array(
[
[0, 0, 0, 0.25],
[0, 0, 0, 0.75],
[0, 0, 1, 0.25],
[0, 0, 1, 0.75],
[0, 1, 0, 0.25],
[0, 1, 0, 0.75],
[0, 1, 1, 0.25],
[0, 1, 1, 0.75],
[1, 0, 0, 0.5],
[1, 0, 1, 0.5],
[1, 1, 0, 0.5],
[1, 1, 1, 0.5],
[2, 0, 0, 0.5],
[2, 0, 1, 0.5],
[2, 1, 0, 0.5],
[2, 1, 1, 0.5],
]
)
)
)
self.assertTrue(
np.all(
is_acting[is_unique, :]
== np.array(
[
[True, True, True, True],
[True, True, True, True],
[True, True, True, True],
[True, True, True, True],
[True, True, True, True],
[True, True, True, True],
[True, True, True, True],
[True, True, True, True],
[True, True, True, False],
[True, True, True, False],
[True, True, True, False],
[True, True, True, False],
[True, True, True, False],
[True, True, True, False],
[True, True, True, False],
[True, True, True, False],
]
)
)
)
x_sampled, is_acting_sampled = ds.sample_valid_x(100)
assert x_sampled.shape == (100, 4)
x_sampled[is_acting_sampled[:, 3], 3] = np.round(
x_sampled[is_acting_sampled[:, 3], 3]
)
x_corr, is_acting_corr = ds.correct_get_acting(x_sampled)
self.assertTrue(np.all(x_corr == x_sampled))
self.assertTrue(np.all(is_acting_corr == is_acting_sampled))
seen_x = set()
seen_is_acting = set()
for i, xi in enumerate(x_sampled):
seen_x.add(tuple(xi))
seen_is_acting.add(tuple(is_acting_sampled[i, :]))
assert len(seen_x) == 16
assert len(seen_is_acting) == 2
def test_check_conditionally_acting(self):
class WrongDesignSpace(DesignSpace):
def _is_conditionally_acting(self) -> np.ndarray:
return np.zeros((self.n_dv,), dtype=bool)
ds = WrongDesignSpace(
[
CategoricalVariable(["A", "B", "C"]), # x0
CategoricalVariable(["E", "F"]), # x1
IntegerVariable(0, 1), # x2
FloatVariable(0, 1), # x3
],
seed=42,
)
ds.declare_decreed_var(
decreed_var=3, meta_var=0, meta_value="A"
) # Activate x3 if x0 == A
self.assertRaises(RuntimeError, lambda: ds.sample_valid_x(10))
if __name__ == "__main__":
unittest.main()
| 13,086 | 33.898667 | 88 | py |
smt | smt-master/smt/utils/test/test_misc_utils.py | """
Author: P. Saves
This package is distributed under New BSD license.
"""
from smt.utils import misc
import unittest
import numpy as np
class TestMisc(unittest.TestCase):
def test_standardization(self):
X = np.array([[0], [1], [2]])
y = np.array([[1], [3], [5]])
X2, y2, X_offset, y_mean, X_scale, y_std = misc.standardization(
np.copy(X), np.copy(y)
)
self.assertTrue(np.array_equal(X2.T, np.array([[-1, 0, 1]])))
self.assertTrue(np.array_equal(y2.T, np.array([[-1, 0, 1]])))
self.assertTrue(np.array_equal(X_offset, np.array([1])))
self.assertTrue(np.array_equal(y_mean, np.array([3])))
self.assertTrue(np.array_equal(X_scale, np.array([1])))
self.assertTrue(np.array_equal(y_std, np.array([2])))
if __name__ == "__main__":
unittest.main()
| 852 | 26.516129 | 72 | py |
smt | smt-master/smt/utils/test/test_kriging_sampling_utils.py | """
Author: Paul Saves
"""
import unittest
import numpy as np
from smt.surrogate_models import KRG
from smt.utils.krg_sampling import (
covariance_matrix,
sample_trajectory,
gauss_legendre_grid,
rectangular_grid,
simpson_grid,
eig_grid,
sample_eigen,
)
class Test(unittest.TestCase):
def test_cov_matrix(self):
f = lambda x: x**2 * np.sin(x)
x_min, x_max = -10, 10
X_doe = np.array([-8.5, -4.0, -3.0, -1.0, 4.0, 7.5])
Y_doe = f(X_doe)
gp = KRG(theta0=[1e-2])
gp.set_training_values(X_doe, Y_doe)
gp._train()
cov_matrix = covariance_matrix(gp, np.array([[-2], [0], [2]]), conditioned=True)
self.assertAlmostEqual(cov_matrix.shape, (3, 3))
def test_matrix_decomposition(self):
f = lambda x: x**2 * np.sin(x)
x_min, x_max = -10, 10
X_doe = np.array([-8.5, -4.0, -3.0, -1.0, 4.0, 7.5])
Y_doe = f(X_doe)
gp = KRG(theta0=[1e-2])
gp.set_training_values(X_doe, Y_doe)
gp._train()
n_plot = 20
n_traj = 10
X_data = np.linspace(x_min, x_max, n_plot).reshape(-1, 1)
traj_chk = sample_trajectory(gp, X_data, n_traj, method="cholesky")
traj_eig = sample_trajectory(gp, X_data, n_traj, method="eigen")
self.assertEqual(traj_chk.shape, (n_plot, n_traj))
self.assertEqual(traj_eig.shape, (n_plot, n_traj))
def test_nystrom(self):
f = lambda x: x**2 * np.sin(x)
x_min, x_max = -10, 10
X_doe = np.array([-8.5, -4.0, -3.0, -1.0, 4.0, 7.5])
Y_doe = f(X_doe)
bounds = np.array([[x_min], [x_max]])
gp = KRG(theta0=[1e-2])
gp.set_training_values(X_doe, Y_doe)
gp._train()
n_points = 10
n_plot = 500
n_traj = 20
X_data = np.linspace(x_min, x_max, n_plot).reshape(-1, 1)
x_grid_gl, weights_grid_gl = gauss_legendre_grid(bounds, n_points)
x_grid_rec, weights_grid_rec = rectangular_grid(bounds, n_points)
x_grid_sim, weights_grid_sim = simpson_grid(bounds, n_points)
self.assertEqual(x_grid_gl.shape, (n_points, 1))
self.assertEqual(x_grid_rec.shape, (n_points, 1))
self.assertEqual(x_grid_sim.shape, (n_points, 1))
self.assertEqual(weights_grid_gl.shape, (n_points, 1))
self.assertEqual(weights_grid_rec.shape, (n_points, 1))
self.assertEqual(weights_grid_sim.shape, (n_points, 1))
eig_val_gl, eig_vec_gl, M_gl = eig_grid(gp, x_grid_gl, weights_grid_gl)
eig_val_rec, eig_vec_rec, M_rec = eig_grid(gp, x_grid_rec, weights_grid_rec)
eig_val_sim, eig_vec_sim, M_sim = eig_grid(gp, x_grid_sim, weights_grid_sim)
self.assertEqual(eig_val_gl.shape, (n_points,))
self.assertEqual(eig_val_rec.shape, (n_points,))
self.assertEqual(eig_val_sim.shape, (n_points,))
self.assertEqual(eig_vec_gl.shape, (n_points, n_points))
self.assertEqual(eig_vec_rec.shape, (n_points, n_points))
self.assertEqual(eig_vec_sim.shape, (n_points, n_points))
self.assertEqual(M_gl, 9)
self.assertEqual(M_rec, 9)
self.assertEqual(M_sim, 9)
traj_gl = sample_eigen(
gp, X_data, eig_val_gl, eig_vec_gl, x_grid_gl, weights_grid_gl, M_gl, n_traj
)
traj_rec = sample_eigen(
gp,
X_data,
eig_val_rec,
eig_vec_rec,
x_grid_rec,
weights_grid_rec,
M_rec,
n_traj,
)
traj_sim = sample_eigen(
gp,
X_data,
eig_val_sim,
eig_vec_sim,
x_grid_sim,
weights_grid_sim,
M_sim,
n_traj,
)
self.assertEqual(traj_gl.shape, (n_plot, n_traj))
self.assertEqual(traj_rec.shape, (n_plot, n_traj))
self.assertEqual(traj_sim.shape, (n_plot, n_traj))
if __name__ == "__main__":
unittest.main()
| 3,996 | 29.052632 | 88 | py |
smt | smt-master/smt/utils/neural_net/activation.py | """
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
import numpy as np
class Activation(object):
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def evaluate(self, z):
"""
Evaluate activation function
:param z: a scalar or numpy array of any size
:return: activation value at z
"""
pass
def first_derivative(self, z):
"""
Evaluate gradient of activation function
:param z: a scalar or numpy array of any size
:return: gradient at z
"""
pass
def second_derivative(self, z):
"""
Evaluate second derivative of activation function
:param z: a scalar or numpy array of any size
:return: second derivative at z
"""
pass
class Sigmoid(Activation):
def evaluate(self, z):
a = 1.0 / (1.0 + np.exp(-z))
return a
def first_derivative(self, z):
a = self.evaluate(z)
da = a * (1.0 - a)
return da
def second_derivative(self, z):
a = self.evaluate(z)
da = self.first_derivative(z)
dda = da * (1 - 2 * a)
return dda
class Tanh(Activation):
def evaluate(self, z):
numerator = np.exp(z) - np.exp(-z)
denominator = np.exp(z) + np.exp(-z)
a = np.divide(numerator, denominator)
return a
def first_derivative(self, z):
a = self.evaluate(z)
da = 1 - np.square(a)
return da
def second_derivative(self, z):
a = self.evaluate(z)
da = self.first_derivative(z)
dda = -2 * a * da
return dda
class Linear(Activation):
def evaluate(self, z):
return z
def first_derivative(self, z):
return np.ones(z.shape)
def second_derivative(self, z):
return np.zeros(z.shape)
def plot_activations(): # pragma: no cover
import matplotlib.pyplot as plt
x = np.linspace(-10, 10, 100)
activations = {"tanh": Tanh(), "sigmoid": Sigmoid()}
for name, activation in activations.items():
plt.plot(x, activation.evaluate(x))
plt.title(name)
plt.show()
if __name__ == "__main__": # pragma: no cover
plot_activations()
| 2,424 | 22.095238 | 76 | py |
smt | smt-master/smt/utils/neural_net/loss.py | """
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
import numpy as np
tensor = np.ndarray
EPS = np.finfo(float).eps # small number to avoid division by zero
def compute_regularization(w, lambd=0.0):
"""
Compute L2 norm penalty
:param: w: the weight parameters of each layer of the neural net
:param: lambd: float, regularization coefficient
:return: penalty: np.ndarray of shape (1,)
"""
lambd = max(0.0, lambd) # ensure 0 < lambda
penalty = 0.0
for theta in w:
penalty += np.squeeze(0.5 * lambd * np.sum(np.square(theta)))
return penalty
def compute_gradient_enhancement(dy_true, dy_pred, gamma=0.0):
"""
Compute gradient enhancement term (apply LSE to partials)
:param: dy_pred: np ndarray of shape (n_y, n_x, m) -- predicted partials: AL' = d(AL)/dX
where n_y = # outputs, n_x = # inputs, m = # examples
:param: dy_true: np ndarray of shape (n_y, n_x, m) -- true partials: Y' = d(Y)/dX
where n_y = # outputs, n_x = # inputs, m = # examples
:return: loss: np.ndarray of shape (1,)
"""
n_y, n_x, m = dy_pred.shape # number of outputs, inputs, training examples
loss = 0.0
gamma = min(max(0.0, gamma), 1.0) # ensure 0 < gamma < 1
for k in range(0, n_y):
for j in range(0, n_x):
dy_j_pred = dy_pred[k, j, :].reshape(1, m)
dy_j_true = dy_true[k, j, :].reshape(1, m)
loss += np.squeeze(
0.5 * gamma * np.dot((dy_j_pred - dy_j_true), (dy_j_pred - dy_j_true).T)
)
return loss
def lse(y_true, y_pred, lambd=0.0, w=None, dy_true=None, dy_pred=None, gamma=0.0):
"""
Compute least squares estimator loss for regression
:param: y_pred: np ndarray of shape (n_y, m) -- output of the forward propagation L_model_forward()
where n_y = no. outputs, m = no. examples
:param: y_true: np ndarray of shape (n_y, m) -- true labels (classification) or values (regression)
where n_y = no. outputs, m = no. examples
:return: loss: np.ndarray of shape (1,)
"""
n_y, m = y_true.shape # number of outputs, training examples
cost = 0.0
for k in range(0, n_y):
cost += np.squeeze(
0.5 * np.dot((y_pred[k, :] - y_true[k, :]), (y_pred[k, :] - y_true[k, :]).T)
)
if w is not None:
cost += compute_regularization(w, lambd)
if dy_true is not None and dy_pred is not None:
cost += compute_gradient_enhancement(dy_true, dy_pred, gamma)
return 1.0 / m * cost
if __name__ == "__main__": # pragma: no cover
# Check that LSE computes correctly
w = [np.array(1.0), np.array(2.0)]
f = lambda x: w[0] * x + w[1] * x**2
dfdx = lambda x: w[0] + 2 * w[1] * x
m = 100
lb = -5.0
ub = 5.0
x = np.linspace(lb, ub, m)
y_true = f(x).reshape(1, m)
y_pred = f(x).reshape(1, m) + 1.0
dy_true = dfdx(x).reshape(1, 1, m)
dy_pred = dfdx(x).reshape(1, 1, m) + 1.0
loss = lse(
y_true=y_true,
y_pred=y_pred,
dy_true=dy_true,
dy_pred=dy_pred,
w=w,
lambd=1.0,
gamma=1.0,
)
assert loss == 1.025
| 3,498 | 31.700935 | 116 | py |
smt | smt-master/smt/utils/neural_net/model.py | """
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import matplotlib.gridspec as gridspec
from smt.utils.neural_net.data import random_mini_batches
from smt.utils.neural_net.optimizer import Adam
from smt.utils.neural_net.activation import Tanh, Linear
from smt.utils.neural_net.bwd_prop import L_model_backward
from smt.utils.neural_net.fwd_prop import L_model_forward, L_grads_forward
from smt.utils.neural_net.loss import lse
from smt.utils.neural_net.metrics import rsquare
from smt.utils.neural_net.data import normalize_data, load_csv
# TODO: implement batch-norm (deeper networks might suffer from exploding/vanishing gradients during training)
# ------------------------------------ S U P P O R T F U N C T I O N S -----------------------------------------------
def initialize_parameters(layer_dims=None):
"""
Initialize neural network given topology using "He" initialization
:param: layer_dims: neural architecture [n_0, n_1, n_2, ..., n_L] where n = number of nodes, L = number of layer
:param: activation: the activation function to use: tanh, sigmoid, or relu (choice dependens on problem type)
:param: regression: True = regression problem (last layer will be linear)
False = classification problem (last layer will be sigmoid)
:return: parameters: dictionary containing the neural net parameters:
parameters["Wl"]: matrix of weights associated with layer l
parameters["bl"]: vector of biases associated with layer l
parameters["a1"]: activation function for each layer where: -1 -- linear activation
0 -- sigmoid activation
1 -- tanh activation
2 -- relu activation
"""
if not layer_dims:
raise Exception("Neural net does have any layers")
# Network topology
number_layers = len(layer_dims) - 1 # input layer doesn't count
# Parameters
parameters = {}
for l in range(1, number_layers + 1):
parameters["W" + str(l)] = np.random.randn(
layer_dims[l], layer_dims[l - 1]
) * np.sqrt(1.0 / layer_dims[l - 1])
parameters["b" + str(l)] = np.zeros((layer_dims[l], 1))
return parameters
# ------------------------------------ C L A S S -----------------------------------------------------------------------
class Model(object):
@property
def number_of_inputs(self):
return self._n_x
@property
def number_of_outputs(self):
return self._n_y
@property
def number_training_examples(self):
return self._m
@property
def layer_dims(self):
return self._layer_dims
@property
def activations(self):
return self._activations
@property
def parameters(self):
return self._parameters
@property
def training_history(self):
return self._training_history
@property
def scale_factors(self):
return self._scale_factors
@property
def training_data(self):
X = self._X_norm * self._scale_factors["x"][1] + self._scale_factors["x"][0]
Y = self._Y_norm * self._scale_factors["y"][1] + self._scale_factors["y"][0]
J = self._J_norm * self._scale_factors["y"][1] / self._scale_factors["x"][1]
return X, Y, J
def __init__(self, **kwargs):
self._parameters = dict()
self._layer_dims = list()
self._activations = list()
self._training_history = dict()
self._scale_factors = {"x": (1, 1), "y": (1, 1)}
self._X_norm = None
self._Y_norm = None
self._J_norm = None
self._n_x = None
self._n_y = None
self._m = None
self._caches = list()
self._J_caches = list()
for name, value in kwargs.items():
setattr(self, name, value)
@classmethod
def initialize(cls, n_x=None, n_y=None, deep=2, wide=12):
layer_dims = [n_x] + [wide] * deep + [n_y]
parameters = initialize_parameters(layer_dims)
activations = [Tanh()] * deep + [Linear()]
attributes = {
"_parameters": parameters,
"_activations": activations,
"_layer_dims": layer_dims,
"_n_x": n_x,
"_n_y": n_y,
}
return cls(**attributes)
def load_parameters(self, parameters):
L = len(parameters) // 2
deep = L - 1
wide = parameters["W1"].shape[0]
self._n_x = parameters["W1"].shape[1]
self._n_y = parameters["W" + str(L)].shape[0]
self._layer_dims = [self._n_x] + [wide] * deep + [self._n_y]
self._activations = [Tanh()] * deep + [Linear()]
self._parameters = parameters
def train(
self,
X,
Y,
J=None,
num_iterations=100,
mini_batch_size=None,
num_epochs=1,
alpha=0.01,
beta1=0.9,
beta2=0.99,
lambd=0.0,
gamma=0.0,
seed=None,
silent=False,
):
"""
Train the neural network
:param X: matrix of shape (n_x, m) where n_x = no. of inputs, m = no. of training examples
:param Y: matrix of shape (n_y, m) where n_y = no. of outputs
:param J: tensor of size (n_y, n_x, m) representing the Jacobian: dY1/dX1 = J[0][0]
dY1/dX2 = J[0][1]
...
dY2/dX1 = J[1][0]
dY2/dX2 = J[1][1]
...
Note: to retrieve the i^th example for dY2/dX1: J[1][0][i] for all i = 1,...,m
:param mini_batch_size: training data batches [batch_1, batch_2, ...] where batch_i = (X, Y, J)_i
:param num_epochs: number of random passes through the entire data set (usually only used with mini-batch)
:param alpha: learning rate
:param beta1: parameter for ADAM optimizer
:param beta2: parameter for ADAM optimizer
:param lambd: regularization parameter
:param gamma: gradient-enhancement parameter
:param num_iterations: maximum number of optimizer iterations (per mini batch)
:param seed: random seed in case user wants to ensure repeatability
:param silent: don't print anything
"""
self._load_training_data(X, Y, J)
if not mini_batch_size:
mini_batch_size = self.number_training_examples
if silent:
is_print = False
elif mini_batch_size != 1:
is_print = False
else:
is_print = True
for e in range(num_epochs):
self._training_history["epoch_" + str(e)] = dict()
mini_batches = random_mini_batches(
self._X_norm, self._Y_norm, self._J_norm, mini_batch_size, seed
)
for b, mini_batch in enumerate(mini_batches):
# Get training data from this mini-batch
X, Y, J = mini_batch
# Optimization (learn parameters by minimizing prediction error)
optimizer = Adam.initialize(
initial_guess=self._parameters,
cost_function=lambda p: self.cost(
p, self.activations, X, Y, J, lambd, gamma
),
grad_function=lambda p: self.grad(
p, self.activations, X, Y, J, lambd, gamma
),
learning_rate=alpha,
beta1=beta1,
beta2=beta2,
)
self._parameters = optimizer.optimize(
max_iter=num_iterations, is_print=is_print
)
# Compute average cost and print output
avg_cost = np.mean(optimizer.cost_history).squeeze()
self._training_history["epoch_" + str(e)][
"batch_" + str(b)
] = optimizer.cost_history
if not silent:
print(
"epoch = {:d}, mini-batch = {:d}, avg cost = {:6.3f}".format(
e, b, avg_cost
)
)
def evaluate(self, X):
"""
Predict output(s) given inputs X.
:param X: inputs to neural network, np array of shape (n_x, m) where n_x = no. inputs, m = no. training examples
:return: Y: prediction, Y = np array of shape (n_y, m) where n_y = no. of outputs and m = no. of examples
"""
assert X.shape[0] == self.number_of_inputs
number_of_examples = X.shape[1]
mu_x, sigma_x = self._scale_factors["x"]
mu_y, sigma_y = self._scale_factors["y"]
X_norm = (X - mu_x) / sigma_x
Y_norm, _ = L_model_forward(X_norm, self.parameters, self.activations)
Y = (Y_norm * sigma_y + mu_y).reshape(
self.number_of_outputs, number_of_examples
)
return Y
def print_parameters(self):
"""
Print model parameters to screen for the user
"""
for key, value in self._parameters.items():
try:
print("{}: {}".format(key, str(value.tolist())))
except:
print("{}: {}".format(key, value))
def print_training_history(self):
"""
Print model parameters to screen for the user
"""
if self._training_history:
for epoch, batches in self._training_history.items():
for batch, history in batches.items():
for iteration, cost in enumerate(history):
print(
"{}, {}, iteration_{}, cost = {}".format(
epoch, batch, iteration, cost
)
)
def plot_training_history(self, title="Training History", is_show_plot=True):
"""
Plot the convergence history of the neural network learning algorithm
"""
import matplotlib.pyplot as plt
if self.training_history:
if len(self.training_history.keys()) > 1:
x_label = "epoch"
y_label = "avg cost"
y = []
for epoch, batches in self.training_history.items():
avg_costs = []
for batch, values in batches.items():
avg_cost = np.mean(np.array(values))
avg_costs.append(avg_cost)
y.append(np.mean(np.array(avg_costs)))
y = np.array(y)
x = np.arange(len(y))
elif len(self.training_history["epoch_0"]) > 1:
x_label = "mini-batch"
y_label = "avg cost"
y = []
for batch, values in self.training_history["epoch_0"].items():
avg_cost = np.mean(np.array(values))
y.append(avg_cost)
y = np.array(y)
x = np.arange(y.size)
else:
x_label = "optimizer iteration"
y_label = "cost"
y = np.array(self.training_history["epoch_0"]["batch_0"])
x = np.arange(y.size)
plt.plot(x, y)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
if is_show_plot:
plt.show()
def _load_training_data(self, X, Y, J=None):
"""
Load and normalize training data
:param X: matrix of shape (n_x, m) where n_x = no. of inputs, m = no. of training examples
:param Y: matrix of shape (n_y, m) where n_y = no. of outputs
:param J: tensor of size (n_y, n_x, m) representing the Jacobian: dY1/dX1 = J[0][0]
dY1/dX2 = J[0][1]
...
dY2/dX1 = J[1][0]
dY2/dX2 = J[1][1]
...
Note: to retrieve the i^th example for dY2/dX1: J[1][0][i] for all i = 1,...,m
"""
assert X.shape[1] == Y.shape[1]
assert Y.shape[0] == Y.shape[0]
assert X.shape[0] == self._n_x
assert Y.shape[0] == self._n_y
if J is not None:
assert X.shape[1] == J.shape[2]
assert X.shape[0] == J.shape[1]
X_norm, Y_norm, J_norm, mu_x, sigma_x, mu_y, sigma_y = normalize_data(X, Y, J)
self._X_norm = X_norm
self._Y_norm = Y_norm
self._J_norm = J_norm
self._scale_factors["x"] = (mu_x, sigma_x)
self._scale_factors["y"] = (mu_y, sigma_y)
self._n_x, self._m = X.shape
self._n_y = Y.shape[0]
def cost(
self,
parameters,
activations,
x,
y_true=None,
dy_true=None,
lambd=0.0,
gamma=0.0,
):
"""
Cost function for training
:param x:
:param parameters:
:param activations:
:param y_true:
:param dy_true:
:param lambd:
:param gamma:
:return:
"""
y_pred, caches = L_model_forward(x, parameters, activations)
dy_pred, dy_caches = L_grads_forward(x, parameters, activations)
w = [value for name, value in parameters.items() if "W" in name]
cost = lse(y_true, y_pred, lambd, w, dy_true, dy_pred, gamma)
return cost
def grad(
self,
parameters,
activations,
x,
y_true=None,
dy_true=None,
lambd=0.0,
gamma=0.0,
):
"""
Gradient of cost function for training
:param x:
:param parameters:
:param activations:
:param y_true:
:param dy_true:
:param lambd:
:param gamma:
:return:
"""
y_pred, caches = L_model_forward(x, parameters, activations)
dy_pred, dy_caches = L_grads_forward(x, parameters, activations)
grad = L_model_backward(
y_pred, y_true, dy_pred, dy_true, caches, dy_caches, lambd, gamma
)
return grad
def gradient(self, X):
"""
Predict output(s) given inputs X.
:param X: inputs to neural network, np array of shape (n_x, m) where n_x = no. inputs, m = no. training examples
:return: J: prediction, J = np array of shape (n_y, n_x, m) = Jacobian
"""
assert X.shape[0] == self.number_of_inputs
number_of_examples = X.shape[1]
mu_x, sigma_x = self._scale_factors["x"]
mu_y, sigma_y = self._scale_factors["y"]
X_norm = (X - mu_x) / sigma_x
Y_norm, _ = L_model_forward(X_norm, self.parameters, self.activations)
J_norm, _ = L_grads_forward(X_norm, self.parameters, self.activations)
J = (J_norm * sigma_y / sigma_x).reshape(
self.number_of_outputs, self.number_of_inputs, number_of_examples
)
return J
def goodness_of_fit(self, X_test, Y_test, J_test=None, response=0, partial=0):
import matplotlib.pyplot as plt
assert X_test.shape[1] == Y_test.shape[1]
assert Y_test.shape[0] == Y_test.shape[0]
assert X_test.shape[0] == self.number_of_inputs
assert Y_test.shape[0] == self.number_of_outputs
if type(J_test) == np.ndarray:
assert X_test.shape[1] == J_test.shape[2]
assert X_test.shape[0] == J_test.shape[1]
number_test_examples = Y_test.shape[1]
Y_pred_test = self.evaluate(X_test)
J_pred_test = self.gradient(X_test)
X_train, Y_train, J_train = self.training_data
Y_pred_train = self.evaluate(X_train)
J_pred_train = self.gradient(X_train)
if type(J_test) == np.ndarray:
test = J_test[response, partial, :].reshape((1, number_test_examples))
test_pred = J_pred_test[response, partial, :].reshape(
(1, number_test_examples)
)
train = J_train[response, partial, :].reshape(
(1, self.number_training_examples)
)
train_pred = J_pred_train[response, partial, :].reshape(
(1, self.number_training_examples)
)
title = "Goodness of fit for dY" + str(response) + "/dX" + str(partial)
else:
test = Y_test[response, :].reshape((1, number_test_examples))
test_pred = Y_pred_test[response, :].reshape((1, number_test_examples))
train = Y_train[response, :].reshape((1, self.number_training_examples))
train_pred = Y_pred_train[response, :].reshape(
(1, self.number_training_examples)
)
title = "Goodness of fit for Y" + str(response)
metrics = dict()
metrics["R_squared"] = np.round(rsquare(test_pred, test), 2).squeeze()
metrics["std_error"] = np.round(
np.std(test_pred - test).reshape(1, 1), 2
).squeeze()
metrics["avg_error"] = np.round(
np.mean(test_pred - test).reshape(1, 1), 2
).squeeze()
# Reference line
y = np.linspace(
min(np.min(test), np.min(train)), max(np.max(test), np.max(train)), 100
)
# Prepare to plot
fig = plt.figure(figsize=(12, 6))
fig.suptitle(title, fontsize=16)
spec = gridspec.GridSpec(ncols=2, nrows=1, wspace=0.25)
# Plot
ax1 = fig.add_subplot(spec[0, 0])
ax1.plot(y, y)
ax1.scatter(test, test_pred, s=20, c="r")
ax1.scatter(train, train_pred, s=100, c="k", marker="+")
plt.legend(["perfect", "test", "train"])
plt.xlabel("actual")
plt.ylabel("predicted")
plt.title("RSquare = " + str(metrics["R_squared"]))
ax2 = fig.add_subplot(spec[0, 1])
error = (test_pred - test).T
weights = np.ones(error.shape) / test_pred.shape[1]
ax2.hist(error, weights=weights, facecolor="g", alpha=0.75)
plt.xlabel("Absolute Prediction Error")
plt.ylabel("Probability")
plt.title(
"$\mu$="
+ str(metrics["avg_error"])
+ ", $\sigma=$"
+ str(metrics["std_error"])
)
plt.grid(True)
plt.show()
return metrics
def run_example(
train_csv, test_csv, inputs, outputs, partials=None
): # pragma: no cover
"""
Example using 2D Rastrigin function (egg-crate-looking function)
usage: test_model(train_csv='train_data.csv',
test_csv='train_data.csv',
inputs=["X[0]", "X[1]"],
outputs=["Y[0]"],
partials=[["J[0][0]", "J[0][1]"]])
:param train_csv: str, csv file name containing training data
:param test_csv: str, csv file name containing test data
:param inputs: list(str), csv column labels corresponding to inputs
:param outputs: list(str), csv column labels corresponding to outputs
:param partials: list(str), csv column labels corresponding to partials
"""
# Sample data
X_train, Y_train, J_train = load_csv(
file=train_csv, inputs=inputs, outputs=outputs, partials=partials
)
X_test, Y_test, J_test = load_csv(
file=test_csv, inputs=inputs, outputs=outputs, partials=partials
)
# Hyper-parameters
alpha = 0.05
beta1 = 0.90
beta2 = 0.99
lambd = 0.1
gamma = 1.0
deep = 2
wide = 12
mini_batch_size = None # None = use all data as one batch
num_iterations = 25
num_epochs = 50
# Training
model = Model.initialize(
n_x=X_train.shape[0], n_y=Y_train.shape[0], deep=deep, wide=wide
)
model.train(
X=X_train,
Y=Y_train,
J=J_train,
alpha=alpha,
lambd=lambd,
gamma=gamma,
beta1=beta1,
beta2=beta2,
mini_batch_size=mini_batch_size,
num_iterations=num_iterations,
num_epochs=num_epochs,
silent=False,
)
model.plot_training_history()
model.goodness_of_fit(
X_test, Y_test
) # model.goodness_of_fit(X_test, Y_test, J_test, partial=1)
if __name__ == "__main__": # pragma: no cover
run_example(
train_csv="train_data.csv",
test_csv="train_data.csv",
inputs=["X[0]", "X[1]"],
outputs=["Y[0]"],
partials=[["J[0][0]", "J[0][1]"]],
)
| 21,367 | 34.495017 | 120 | py |
smt | smt-master/smt/utils/neural_net/data.py | """
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import os
import math
def load_csv(file=None, inputs=None, outputs=None, partials=None):
"""
Load neural net training data from CSV file using numpy
:param: file: csv filename containing training data (with headers as first row)
:param: inputs: labels of the inputs, e.g. ["X[0]", "X[1]", "X[2]"]
:param: outputs: labels of the inputs, e.g. ["Y[0]", "Y[1]", "Y[2]"]
:param: partials: labels of the partials, e.g. [ ["J[0][0]", "J[0][1]", "J[0][2]"],
["J[1][0]", "J[1][1]", "J[1][2]"],
["J[2][0]", "J[2][1]", "J[2][2]"] ]
Note 1: the name convention doesn't matter, but the order of the list does. Specifically,
the elements of the Jacobian should be listed in the same order as the elements of
the matrix reading from left to right, top to bottom (as shown above)
Note 2: if the user does not provide partials (partials=None), then the model will switch to just
a regular, fully connected neural net without gradient-enhancement.
:return: (X, Y, J): (np.ndarray, np.ndarray, np.ndarray) where
X -- matrix of shape (n_x, m) where n_x = no. of inputs, m = no. of training examples
Y -- matrix of shape (n_y, m) where n_y = no. of outputs
J -- tensor of size (n_y, n_x, m) representing the Jacobian: dY1/dX1 = J[0][0]
dY1/dX2 = J[0][1]
...
dY2/dX1 = J[1][0]
dY2/dX2 = J[1][1]
...
Note 3: to retrieve the i^th example for dY2/dX1: J[1][0][i] for all i = 1,...,m
"""
if not file:
raise Exception("No file specified")
else:
exists = os.path.isfile(file)
if exists:
headers = np.genfromtxt(file, delimiter=",", max_rows=1, dtype=str).tolist()
data = np.genfromtxt(file, delimiter=",", skip_header=1)
index = lambda header: headers.index(header)
else:
raise Exception("The file " + file + " does not exist")
n_x = len(inputs) # number of inputs
n_y = len(outputs) # number of outputs
# Check that there are inputs and outputs
if n_x == 0:
raise Exception("No inputs specified")
if n_y == 0:
raise Exception("No outputs specified")
m = data[:, index(inputs[0])].size # number of examples
X = np.zeros((n_x, m))
for i, x_label in enumerate(inputs):
X[i, :] = data[:, index(x_label)]
Y = np.zeros((n_y, m))
for i, y_label in enumerate(outputs):
Y[i, :] = data[:, index(y_label)]
if partials:
J = np.zeros((n_y, n_x, m))
if partials:
for i, response in enumerate(partials):
for j, dy_label in enumerate(response):
J[i][j] = data[:, index(dy_label)]
else:
J = None
return X, Y, J
def random_mini_batches(X, Y, J, mini_batch_size=64, seed=None):
"""
Creates a list of random minibatches from (X, Y)
:param: X: np ndarray of size (n_x, m) containing input features of the training data
:param: Y: np ndarray of size (n_y, m) containing output values of the training data
:param: J: np ndarray of size (n_y, n_x, m) where m = number of examples
n_y = number of outputs
n_x = number of inputs
:param: mini_batch_size: size of the mini-batches, integer
:return: mini_batches: list of synchronous (mini_batch_X, mini_batch_Y, mini_batch_J)
"""
np.random.seed(seed)
m = X.shape[1]
mini_batches = []
# Step 1: Shuffle (X, Y, J)
permutations = list(np.random.permutation(m))
shuffled_X = X[:, permutations].reshape(X.shape)
shuffled_Y = Y[:, permutations].reshape(Y.shape)
if J is not None:
shuffled_J = J[:, :, permutations].reshape(J.shape)
else:
shuffled_J = None
mini_batch_size = min(mini_batch_size, m)
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = int(math.floor(m / mini_batch_size))
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size : (k + 1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k + 1) * mini_batch_size]
if J is not None:
mini_batch_J = shuffled_J[
:, :, k * mini_batch_size : (k + 1) * mini_batch_size
]
else:
mini_batch_J = None
mini_batch = (mini_batch_X, mini_batch_Y, mini_batch_J)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, (k + 1) * mini_batch_size :]
mini_batch_Y = shuffled_Y[:, (k + 1) * mini_batch_size :]
if J is not None:
mini_batch_J = shuffled_J[:, :, (k + 1) * mini_batch_size :]
else:
mini_batch_J = None
mini_batch = (mini_batch_X, mini_batch_Y, mini_batch_J)
mini_batches.append(mini_batch)
return mini_batches
def normalize_data(X, Y, J=None, is_classification=False):
"""
Normalize training data to help with optimization, i.e. X_norm = (X - mu_x) / sigma_x where X is as below
Y_norm = (Y - mu_y) / sigma_y where Y is as below
J_norm = J * sigma_x/sigma_y
Concretely, normalizing training data is essential because the neural learns by minimizing a cost function.
Normalizing the data therefore rescales the problem in a way that aides the optimizer.
param: X: np ndarray of input features of shape (n_x, m) where n_x = no. of inputs, m = no. of training examples
param: Y: np ndarray of output labels of shape (n_y, m) where n_y = no. of outputs
param: J: np ndarray of size (n_y, n_x, m) representing the Jacobian of Y w.r.t. X:
dY1/dX1 = J[0][0]
dY1/dX2 = J[0][1]
...
dY2/dX1 = J[1][0]
dY2/dX2 = J[1][1]
...
N.B. To retrieve the i^th example for dY2/dX1: J[1][0][i] for all i = 1,...,m
:return: X_norm, Y_norm, J_norm, mu_x, sigma_x, mu_y, sigma_y: normalized data and associated scale factors used
"""
# Initialize
X_norm = np.zeros(X.shape)
Y_norm = np.zeros(Y.shape)
if J is not None:
J_norm = np.zeros(J.shape)
else:
J_norm = None
# Dimensions
n_x, m = X.shape
n_y, _ = Y.shape
# Normalize inputs
mu_x = np.zeros((n_x, 1))
sigma_x = np.ones((n_x, 1))
for i in range(0, n_x):
mu_x[i] = np.mean(X[i])
sigma_x[i] = np.std(X[i])
X_norm[i] = (X[i] - mu_x[i]) / sigma_x[i]
# Normalize outputs
mu_y = np.zeros((n_y, 1))
sigma_y = np.ones((n_y, 1))
if is_classification:
Y_norm = Y # no need to normalize {0, 1} classes
else:
for i in range(0, n_y):
mu_y[i] = np.mean(Y[i])
sigma_y[i] = np.std(Y[i])
Y_norm[i] = (Y[i] - mu_y[i]) / sigma_y[i]
# Normalize partials
if J is not None:
for i in range(0, n_y):
for j in range(0, n_x):
J_norm[i, j] = J[i, j] * sigma_x[j] / sigma_y[i]
return X_norm, Y_norm, J_norm, mu_x, sigma_x, mu_y, sigma_y
if __name__ == "__main__": # pragma: no cover
# Check that data is read in correctly
csv = "train_data.csv"
x_labels = ["X[0]", "X[1]"]
y_labels = ["Y[0]"]
dy_labels = [["J[0][0]", "J[0][1]"]]
X, Y, J = load_csv(file=csv, inputs=x_labels, outputs=y_labels, partials=dy_labels)
assert X[0, 6] == 0.071429
assert X[1, 15] == -0.821429
assert Y[0, 21] == 7.331321
assert J[0, 0, 57] == 51.409635
assert J[0, 1, 209] == 59.252401
X_norm, Y_norm, J_norm, mu_x, sigma_x, mu_y, sigma_y = normalize_data(X, Y, J)
for i in range(X_norm.shape[1]):
for j in range(X.shape[0]):
assert abs(np.squeeze(X_norm[j, i] * sigma_x[j] + mu_x[j]) - X[j, i]) < 1e-6
for i in range(Y_norm.shape[1]):
for j in range(Y.shape[0]):
assert abs(np.squeeze(Y_norm[j, i] * sigma_y[j] + mu_y[j]) - Y[j, i]) < 1e-6
for i in range(J_norm.shape[2]):
for j in range(X.shape[0]):
for k in range(Y.shape[0]):
assert (
abs(
np.squeeze(J_norm[k, j, i] * sigma_y[k] / sigma_x[j])
- J[k, j, i]
)
< 1e-6
)
mini_batches = random_mini_batches(
X_norm, Y_norm, J_norm, mini_batch_size=32, seed=1
)
for mini_batch in mini_batches:
X_batch, Y_batch, J_batch = mini_batch
assert len(mini_batch) == 3
assert X_batch.shape[0] == X.shape[0]
assert Y_batch.shape[0] == Y.shape[0]
assert J_batch.shape[0:2] == J.shape[0:2]
assert X_batch.shape[1] <= 32
assert X_batch.shape[1] == Y_batch.shape[1]
assert X_batch.shape[1] == J_batch.shape[2]
| 9,754 | 37.557312 | 116 | py |
smt | smt-master/smt/utils/neural_net/bwd_prop.py | """
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
import numpy as np
EPS = np.finfo(float).eps # small number to avoid division by zero
def initialize_back_prop(AL, Y, AL_prime, Y_prime):
"""
Initialize backward propagation
Arguments:
:param AL -- output of the forward propagation L_model_forward()... i.e. neural net predictions (if regression)
i.e. neural net probabilities (if classification)
>> a numpy array of shape (n_y, m) where n_y = no. outputs, m = no. examples
:param Y -- true "label" (classification) or "value" (regression)
>> a numpy array of shape (n_y, m) where n_y = no. outputs, m = no. examples
:param AL_prime -- the derivative of the last layer's activation output(s) w.r.t. the inputs x: AL' = d(AL)/dX
>> a numpy array of size (n_y, n_x, m) where n_y = no. outputs, n_x = no. inputs, m = no. examples
:param Y_prime -- the true derivative of the output(s) w.r.t. the inputs x: Y' = d(Y)/dX
>> a numpy array of shape (n_y, n_x, m) where n_y = no. outputs, n_x = no. inputs, m = no. examples
Returns:
:return dAL -- gradient of the loss function w.r.t. last layer activations: d(L)/dAL
>> a numpy array of shape (n_y, m)
:return dAL_prime -- gradient of the loss function w.r.t. last layer activations derivatives: d(L)/dAL' where AL' = d(AL)/dX
>> a numpy array of shape (n_y, n_x, m)
"""
n_y, _ = AL.shape # number layers, number examples
Y = Y.reshape(AL.shape)
dAL = AL - Y # derivative of loss function w.r.t. to activations: dAL = d(L)/dAL
dAL_prime = (
AL_prime - Y_prime
) # derivative of loss function w.r.t. to partials: dAL_prime = d(L)/d(AL_prime)
return dAL, dAL_prime
def linear_activation_backward(dA, dA_prime, cache, J_cache, lambd, gamma):
"""
Implement backward propagation for one LINEAR->ACTIVATION layer for the regression least squares estimation
Arguments:
:param dA -- post-activation gradient w.r.t. A for current layer l, dA = d(L)/dA where L is the loss function
>> a numpy array of shape (n_1, m) where n_l = no. nodes in current layer, m = no. of examples
:param dA_prime -- post-activation gradient w.r.t. A' for current layer l, dA' = d(L)/dA' where L is the loss function
and A' = d(AL) / dX
>> a numpy array of shape (n_l, n_x, m) where n_l = no. nodes in current layer
n_x = no. of inputs (X1, X2, ...)
m = no. of examples
:param cache -- tuple of values stored in linear_activation_forward()
>> a tuple containing (A_prev, Z, W, b, activation)
where
A_prev -- activations from previous layer
>> a numpy array of shape (n_prev, m) where n_prev is the no. nodes in layer L-1
Z -- input to activation functions for current layer
>> a numpy array of shape (n, m) where n is the no. nodes in layer L
W -- weight parameters for current layer
>> a numpy array of shape (n, n_prev)
b -- bias parameters for current layer
>> a numpy array of shape (n, 1)
activation -- activation function to use
:param J_cache -- list of caches containing every cache of L_grads_forward() where J stands for Jacobian
>> a list containing [..., (j, Z_prime_j, A_prime_j, G_prime, G_prime_prime), ...]
------------------ input j --------------------
where
j -- input variable associated with current cache
>> an integer representing the associated input variables (X1, X2, ..., Xj, ...)
Z_prime_j -- derivative of Z w.r.t. X_j: Z'_j = d(Z_j)/dX_j
>> a numpy array of shape (n_l, m) where n_l is the no. nodes in layer l
A_prime_j -- derivative of the activation w.r.t. X_j: A'_j = d(A_j)/dX_j
>> a numpy array of shape (n_l, m) where n_l is the no. nodes in layer l
:param lambd: float, regularization parameter
:param gamma: float, gradient-enhancement parameter
:return dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
:return dW -- Gradient of the cost with respect to W (current layer l), same shape as W
:return db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
# Extract information from current layer cache (avoids recomputing what was previously computed)
A_prev, Z, W, b, activation = cache
# Some dimensions that will be useful
m = A_prev.shape[1] # number of examples
n = len(J_cache) # number of inputs
# 1st derivative of activation function A = G(Z)
G_prime = activation.first_derivative(Z)
# Compute the contribution due to the 0th order terms (where regularization only affects dW)
dW = (
1.0 / m * np.dot(G_prime * dA, A_prev.T) + lambd / m * W
) # dW = d(J)/dW where J is the cost function
db = 1.0 / m * np.sum(G_prime * dA, axis=1, keepdims=True) # db = d(J)/db
dA_prev = np.dot(
W.T, G_prime * dA
) # dA_prev = d(L)/dA_prev where A_prev = previous layer activation
# Initialize dA_prime_prev = d(J)/dA_prime_prev
dA_prime_prev = np.zeros((W.shape[1], n, m))
# Gradient enhancement
if gamma != 0:
# 2nd derivative of activation function A = G(Z)
G_prime_prime = activation.second_derivative(Z)
# Loop over partials, d()/dX_j
for j_cache in J_cache:
# Extract information from current layer cache associated with derivative of A w.r.t. j^th input
j, Z_prime_j, A_prime_j_prev = j_cache
# Extract partials of A w.r.t. to j^th input, i.e. A_prime_j = d(A)/dX_j
dA_prime_j = dA_prime[:, j, :].reshape(Z_prime_j.shape)
# Compute contribution to cost function gradient, db = d(J)/db, dW = d(J)/dW, d(L)/dA, d(L)/dA'
dW += (
gamma
/ m
* (
np.dot(dA_prime_j * G_prime_prime * Z_prime_j, A_prev.T)
+ np.dot(dA_prime_j * G_prime, A_prime_j_prev.T)
)
)
db += (
gamma
/ m
* np.sum(dA_prime_j * G_prime_prime * Z_prime_j, axis=1, keepdims=True)
)
dA_prev += gamma * np.dot(W.T, dA_prime_j * G_prime_prime * Z_prime_j)
dA_prime_prev[:, j, :] = gamma * np.dot(W.T, dA_prime_j * G_prime)
return dA_prev, dW, db, dA_prime_prev
def L_model_backward(AL, Y, AL_prime, Y_prime, caches, J_caches, lambd, gamma):
"""
Implement backward propagation
Arguments:
:param AL -- output of the forward propagation L_model_forward()... i.e. neural net predictions (if regression)
i.e. neural net probabilities (if classification)
>> a numpy array of shape (n_y, m) where n_y = no. outputs, m = no. examples
:param Y -- true "label" (classification) or "value" (regression)
>> a numpy array of shape (n_y, m) where n_y = no. outputs, m = no. examples
:param AL_prime -- the derivative of the last layer's activation output(s) w.r.t. the inputs x: AL' = d(AL)/dX
>> a numpy array of size (n_y, n_x, m) where n_y = no. outputs, n_x = no. inputs, m = no. examples
:param Y_prime -- the true derivative of the output(s) w.r.t. the inputs x: Y' = d(Y)/dX
>> a numpy array of shape (n_y, n_x, m) where n_y = no. outputs, n_x = no. inputs, m = no. examples
:param caches -- list of caches containing every cache of L_model_forward()
>> a tuple containing {(A_prev, Z, W, b, activation), ..., (A_prev, Z, W, b, activation)}
-------- layer 1 ----------- -------- layer L ----------
where
A_prev -- activations from previous layer
>> a numpy array of shape (n_prev, m) where n_prev is the no. nodes in layer L-1
Z -- input to activation functions for current layer
>> a numpy array of shape (n, m) where n is the no. nodes in layer L
W -- weight parameters for current layer
>> a numpy array of shape (n, n_prev)
b -- bias parameters for current layer
>> a numpy array of shape (n, 1)
:param J_caches -- a list of lists containing every cache of L_grads_forward() for each layer (where J stands for Jacobian)
>> a tuple [ [[...], ..., [...]], ..., [..., (j, Z_prime_j, A_prime_j, G_prime, G_prime_prime), ...], ...]
--- layer 1 ------ ------------------ layer l, partial j ---------------------
where
j -- input variable number (i.e. X1, X2, ...) associated with cache
>> an integer representing the associated input variables (X1, X2, ..., Xj, ...)
Z_prime_j -- derivative of Z w.r.t. X_j: Z'_j = d(Z_j)/dX_j
>> a numpy array of shape (n_l, m) where n_l is the no. nodes in layer l
A_prime_j -- derivative of the activation w.r.t. X_j: A'_j = d(A_j)/dX_j
>> a numpy array of shape (n_l, m) where n_l is the no. nodes in layer l
:param lambd: float, regularization parameter
:param gamma: float, gradient-enhancement parameter
:return grads -- A dictionary with the gradients of the cost function w.r.t. to parameters:
grads["A" + str(l)] = ...
grads["W" + str(l)] = ...
grads["b" + str(l)] = ...
"""
# Initialize grads
grads = {}
# Some quantities needed
L = len(caches) # the number of layers
_, m = AL.shape
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the back propagation
dA, dA_prime = initialize_back_prop(AL, Y, AL_prime, Y_prime)
# Loop over each layer
for l in reversed(range(L)):
# Get cache
cache = caches[l]
J_cache = J_caches[l]
# Backprop step
dA, dW, db, dA_prime = linear_activation_backward(
dA, dA_prime, cache, J_cache, lambd, gamma
)
# Store result
grads["W" + str(l + 1)] = dW
grads["b" + str(l + 1)] = db
return grads
| 11,439 | 49.396476 | 128 | py |
smt | smt-master/smt/utils/neural_net/metrics.py | """
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
import numpy as np
def rsquare(Y_pred, Y_true):
"""
Compute R-square for a single response.
NOTE: If you have more than one response, then you'll either have to modify this method to handle many responses at
once or wrap a for loop around it (i.e. treat one response at a time).
Arguments:
Y_pred -- predictions, numpy array of shape (K, m) where n_y = no. of outputs, m = no. of examples
Y_true -- true values, numpy array of shape (K, m) where n_y = no. of outputs, m = no. of examples
Return:
R2 -- the R-square value, numpy array of shape (K, 1)
"""
epsilon = 1e-8 # small number to avoid division by zero
Y_bar = np.mean(Y_true)
SSE = np.sum(np.square(Y_pred - Y_true), axis=1)
SSTO = np.sum(np.square(Y_true - Y_bar) + epsilon, axis=1)
R2 = 1 - SSE / SSTO
return R2
| 1,049 | 31.8125 | 119 | py |
smt | smt-master/smt/utils/neural_net/__init__.py | from smt.utils.neural_net import activation
from smt.utils.neural_net import bwd_prop
from smt.utils.neural_net import fwd_prop
from smt.utils.neural_net import loss
from smt.utils.neural_net import model
from smt.utils.neural_net import optimizer
from smt.utils.neural_net import data
| 286 | 34.875 | 43 | py |
smt | smt-master/smt/utils/neural_net/fwd_prop.py | """
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
import numpy as np
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement forward propagation for one layer.
Arguments:
:param A_prev -- activations from previous layer
>> numpy array of size (n[l-1], 1) where n[l-1] = no. nodes in previous layer
:param W -- weights associated with current layer l
>> numpy array of size (n_l, n[l-1]) where n_l = no. nodes in current layer
:param b -- biases associated with current layer
>> numpy array of size (n_l, 1)
:param activation -- activation function for this layer
Return:
:return A -- a vector of post-activation values of current layer
:return cache -- parameters that can be used in other functions:
>> a tuple (A_prev, Z, W, b) where A_prev -- a numpy array of shape (n[l-1], m) containing previous
layer post-activation values where:
n[l-1] = no. nodes in previous layer
m = no. of training examples
Z -- a numpy array of shape (n[l], m) containing linear forward
values where n_l = no. nodes in current layer
W -- a numpy array of shape (n[l], n[l-1]) containing weights of
the current layer
b -- a numpy array of shape (n[l], 1) containing biases of
the current layer
"""
Z = np.dot(W, A_prev) + b
A = activation.evaluate(Z)
cache = (A_prev, Z, W, b, activation)
return A, cache
def L_model_forward(X, parameters, activations):
"""
Implements forward propagation for the entire neural network.
Arguments:
:param X -- data, numpy array of shape (n_x, m) where n_x = no. of inputs, m = no. of training examples
:param parameters -- parameters of the neural network as defined in initialize_parameters()
>> a dictionary containing: {"W1": a numpy array of shape (n[1], n[0])} N.B. n[0] = n_x
{"W2": a numpy array of shape (n[2], n[1])}
{"W3": a numpy array of shape (n[3], n[2])}
...
{"WL": a numpy array of shape (n[L], n[L-1])} N.B. n[L] = n_y
{"b1": a numpy array of shape (n[1], 1)}
{"b2": a numpy array of shape (n[2], 1)}
{"b3": a numpy array of shape (n[3], 1)}
...
{"bL": a numpy array of shape (n[L], 1)}
:param activations -- a list of Activation objective (one for each layer)
:return AL -- last post-activation value
>> numpy array of shape (n_y, m) where n_y = no. of outputs, m = no. of training examples
:return caches -- a list of tuples containing every cache of linear_activation_forward()
Note: there are L-1 of them, indexed from 0 to L-2
>> [(...), (A_prev, Z, W, b), (...)] where A_prev -- a numpy array of shape (n[l-1], m) containing previous
layer post-activation values where:
n[l-1] = no. nodes in previous layer
m = no. of training examples
Z -- a numpy array of shape (n[l], m) containing linear forward
values where n_l = no. nodes in current layer
W -- a numpy array of shape (n[l], n[l-1]) containing weights of
the current layer
b -- a numpy array of shape (n[l], 1) containing biases of
the current layer
"""
caches = []
A = X
L = len(
activations
) # number of layers in the network (doesn't include input layer)
for l in range(1, L + 1):
A_prev = A
W = parameters["W" + str(l)]
b = parameters["b" + str(l)]
A, cache = linear_activation_forward(
A_prev, W, b, activation=activations[l - 1]
)
caches.append(cache)
return A, caches
def L_grads_forward(X, parameters, activations):
"""
Compute the gradient of the neural network evaluated at X.
Argument:
:param X -- data, numpy array of shape (n_x, m) where n_x = no. of inputs, m = no. of training examples
:param parameters -- parameters of the neural network as defined in initialize_parameters()
>> a dictionary containing: {"W1": a numpy array of shape (n[1], n[0])} N.B. n[0] = n_x
{"W2": a numpy array of shape (n[2], n[1])}
{"W3": a numpy array of shape (n[3], n[2])}
...
{"WL": a numpy array of shape (n[L], n[L-1])} N.B. n[L] = n_y
{"b1": a numpy array of shape (n[1], 1)}
{"b2": a numpy array of shape (n[2], 1)}
{"b3": a numpy array of shape (n[3], 1)}
...
{"bL": a numpy array of shape (n[L], 1)}
:param activations -- a list of Activation objective (one for each layer)
:return JL -- numpy array of size (n_y, n_x, m) containing the Jacobian of w.r.t. X where n_y = no. of outputs
:return J_caches -- list of caches containing every cache of L_grads_forward() where J stands for Jacobian
>> a list containing [..., (j, Z_prime_j, A_prime_j, G_prime, G_prime_prime), ...]
------------------ input j --------------------
where
j -- input variable number (i.e. X1, X2, ...) associated with cache
>> an integer representing the associated input variables (X1, X2, ..., Xj, ...)
Z_prime_j -- derivative of Z w.r.t. X_j: Z'_j = d(Z_j)/dX_j
>> a numpy array of shape (n_l, m) where n_l is the no. nodes in layer l
A_prime_j -- derivative of the activation w.r.t. X_j: A'_j = d(A_j)/dX_j
>> a numpy array of shape (n_l, m) where n_l is the no. nodes in layer l
"""
J_caches = []
# Dimensions
L = len(activations) # number of layers in network
n_y = parameters["W" + str(L)].shape[0] # number of outputs
try:
n_x, m = X.shape # number of inputs, number of examples
except ValueError:
n_x = X.size
m = 1
X = X.reshape(n_x, m)
# Initialize Jacobian for layer 0 (one example)
I = np.eye(n_x, dtype=float)
# Initialize Jacobian for layer 0 (all m examples)
J0 = np.repeat(I.reshape((n_x, n_x, 1)), m, axis=2)
# Initialize Jacobian for last layer
JL = np.zeros((n_y, n_x, m))
# Initialize caches
for l in range(0, L):
J_caches.append([])
# Loop over partials
for j in range(0, n_x):
# Initialize (first layer)
A = np.copy(X).reshape(n_x, m)
A_prime_j = J0[:, j, :]
# Loop over layers
for l in range(1, L + 1):
# Previous layer
A_prev = A
A_prime_j_prev = A_prime_j
# Get parameters for this layer
W = parameters["W" + str(l)]
b = parameters["b" + str(l)]
activation = activations[l - 1]
# Linear
Z = np.dot(W, A_prev) + b
# The following is not needed here, but it is needed later, during backprop.
# We will thus compute it here and store it as a cache for later use.
Z_prime_j = np.dot(W, A_prime_j_prev)
# Activation
A = activation.evaluate(Z)
G_prime = activation.first_derivative(Z)
# Current layer output gradient
A_prime_j = G_prime * np.dot(W, A_prime_j_prev)
# Store cache
J_caches[l - 1].append((j, Z_prime_j, A_prime_j_prev))
# Store partial
JL[:, j, :] = A_prime_j
if m == 1:
JL = JL[:, :, 0]
return JL, J_caches
| 9,532 | 45.960591 | 120 | py |
smt | smt-master/smt/utils/neural_net/optimizer.py | """
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
import numpy as np
EPS = np.finfo(float).eps # small number to avoid division by zero
# ------------------------------------ S U P P O R T F U N C T I O N S -----------------------------------------------
def finite_difference(parameters, fun=None, dx=1e-6):
"""
Compute gradient using central difference
:param parameters: point at which to evaluate gradient
:param fun: function handle to use for finite difference
:param dx: finite difference step
:return: dy: the derivative of fun with respect to x
"""
grads = dict()
for key in parameters.keys():
x = np.copy(parameters[key])
n, p = x.shape
dy = np.zeros((n, p))
for i in range(0, n):
for j in range(0, p):
# Forward step
parameters[key][i, j] = x[i, j] + dx
y_fwd = fun(parameters)
parameters[key] = np.copy(x)
# Backward step
parameters[key][i, j] = x[i, j] - dx
y_bwd = fun(parameters)
parameters[key] = np.copy(x)
# Central difference
dy[i, j] = np.divide(y_fwd - y_bwd, 2 * dx)
grads[key] = dy
return grads
# ------------------------------------ O P T I M I Z E R C L A S S ---------------------------------------------------
class Optimizer(object):
@property
def optimum(self):
return self._optimum_design
@property
def current_design(self):
return self._current_design
def search_direction(self):
return self._search_direction
@property
def cost_history(self):
return self._cost_history
@property
def design_history(self):
return self._design_history
@property
def cost(self):
return self._current_cost
def __init__(self, **kwargs):
self.learning_rate = 0.1
self.beta_1 = 0.9
self.beta_2 = 0.99
self.user_cost_function = None
self.user_grad_function = None
self._current_design = None
self._previous_design = None
self._search_direction = None
self._cost_history = []
self._design_history = []
self._optimum_design = None
self._current_cost = None
self._current_iteration = 0
self.initial_guess = None
for name, value in kwargs.items():
setattr(self, name, value)
@classmethod
def initialize(
cls,
initial_guess,
cost_function,
grad_function=None,
learning_rate=0.05,
beta1=0.9,
beta2=0.99,
):
attributes = {
"user_cost_function": cost_function,
"user_grad_function": grad_function,
"learning_rate": learning_rate,
"beta_1": beta1,
"beta_2": beta2,
"initial_guess": initial_guess,
"_current_design": initial_guess.copy(),
}
return cls(**attributes)
def _cost_function(self, x):
return self.user_cost_function(x)
def _grad_function(self, x):
if self.user_grad_function is not None:
return self.user_grad_function(x)
else:
return finite_difference(x, fun=self.user_cost_function)
def _update_current_design(self, learning_rate=0.05):
"""
Implement one step of gradient descent
"""
pass
def grad_check(self, parameters, tol=1e-6): # pragma: no cover
"""
Check analytical gradient against to finite difference
:param parameters: point at which to evaluate gradient
:param tol: acceptable error between finite difference and analytical
"""
grads = self._grad_function(parameters)
grads_FD = finite_difference(parameters, fun=self.user_cost_function)
for key in parameters.keys():
numerator = np.linalg.norm(grads[key] - grads_FD[key])
denominator = np.linalg.norm(grads[key]) + np.linalg.norm(grads_FD[key])
difference = numerator / (denominator + EPS)
if difference <= tol or numerator <= tol:
print("The gradient of {} is correct".format(key))
else:
print("The gradient of {} is wrong".format(key))
print("Finite dif: grad[{}] = {}".format(key, str(grads_FD[key].squeeze())))
print("Analytical: grad[{}] = {}".format(key, str(grads[key].squeeze())))
def backtracking_line_search(self, tau=0.5):
"""
Perform backtracking line search
:param x0: initial inputs understood by the function 'update' and 'evaluate'
:param alpha: learning rate (maximum step size allowed)
:param update: function that updates X given alpha, i.e. X = update(alpha)
:param evaluate: function that updates cost given X, i.e. cost = evaluate(X)
:param tau: hyper-parameter between 0 and 1 used to reduce alpha during backtracking line search
:return: x: update inputs understood by the function 'update' and 'evaluate'
"""
tau = max(0.0, min(1.0, tau)) # make sure 0 < tau < 1
converged = False
self._previous_design = self._current_design.copy()
while not converged:
self._update_current_design(learning_rate=self.learning_rate * tau)
if self._cost_function(self._current_design) < self._cost_function(
self._previous_design
):
converged = True
elif self.learning_rate * tau < 1e-6:
converged = True
else:
tau *= tau
def optimize(self, max_iter=100, is_print=True):
"""
Optimization logic (main driver)
:param max_iter: maximum number of iterations
:param is_print: True = print cost at every iteration, False = silent
:return: optimum
"""
# Stopping criteria (Vanderplaats, ch. 3, p. 121)
converged = False
N1 = 0
N1_max = 100 # num consecutive passes over which abs convergence criterion must be satisfied before stopping
N2 = 0
N2_max = 100 # num of consecutive passes over which rel convergence criterion must be satisfied before stopping
epsilon_absolute = 1e-7 # absolute error criterion
epsilon_relative = 1e-7 # relative error criterion
self._current_cost = self._cost_function(self._current_design).squeeze()
self._cost_history.append(self._current_cost)
self._design_history.append(self._current_design.copy())
# Iterative update
for i in range(0, max_iter):
self._current_iteration = i
self._search_direction = self._grad_function(self._current_design)
self.backtracking_line_search()
self._current_cost = self._cost_function(self._current_design).squeeze()
self._cost_history.append(self._current_cost)
self._design_history.append(self._current_design.copy())
if is_print:
print(
"iteration = {:d}, cost = {:6.3f}".format(
i, float(self._current_cost)
)
)
# Absolute convergence criterion
if i > 1:
dF1 = abs(self._cost_history[-1] - self._cost_history[-2])
if dF1 < epsilon_absolute * self._cost_history[0]:
N1 += 1
else:
N1 = 0
if N1 > N1_max:
converged = True
if is_print:
print("Absolute stopping criterion satisfied")
# Relative convergence criterion
dF2 = abs(self._cost_history[-1] - self._cost_history[-2]) / max(
abs(self._cost_history[-1]), 1e-6
)
if dF2 < epsilon_relative:
N2 += 1
else:
N2 = 0
if N2 > N2_max:
converged = True
if is_print:
print("Relative stopping criterion satisfied")
# Maximum iteration convergence criterion
if i == max_iter:
if is_print:
print("Maximum optimizer iterations reached")
if converged:
break
self._optimum_design = self._current_design.copy()
return self.optimum
class GD(Optimizer):
def _update_current_design(self, learning_rate=0.05):
"""Gradient descent update"""
for key in self._previous_design.keys():
self._current_design[key] = (
self._previous_design[key] - learning_rate * self._search_direction[key]
)
class Adam(Optimizer):
def __init__(self, **kwargs):
super(Adam, self).__init__(**kwargs)
self.v = None
self.s = None
def _update_current_design(self, learning_rate=0.05, beta_1=0.9, beta_2=0.99):
"""Adam update"""
self.beta_1 = beta_1
self.beta_2 = beta_2
t = self._current_iteration + 1
if self.v is None:
self.v = {
key: np.zeros(value.shape)
for key, value in self._current_design.items()
}
if self.s is None:
self.s = {
key: np.zeros(value.shape)
for key, value in self._current_design.items()
}
for key in self._current_design.keys():
self.v[key] = (
self.beta_1 * self.v[key] + (1.0 - beta_1) * self._search_direction[key]
)
self.s[key] = self.beta_2 * self.s[key] + (1.0 - beta_2) * np.square(
self._search_direction[key]
)
v_corrected = self.v[key] / (1.0 - self.beta_1**t)
s_corrected = self.s[key] / (1.0 - self.beta_2**t)
self._current_design[key] = self._previous_design[
key
] - learning_rate * v_corrected / (np.sqrt(s_corrected) + EPS)
def run_example(use_adam=True): # pragma: no cover
"""visual example using 2D rosenbrock function"""
import matplotlib.pyplot as plt
# Test function
def rosenbrock(parameters):
x1 = parameters["x1"]
x2 = parameters["x2"]
y = (1 - x1) ** 2 + 100 * (x2 - x1**2) ** 2
y = y.reshape(1, 1)
dydx = dict()
dydx["x1"] = -2 * (1 - x1) - 400 * x1 * (x2 - x1**2)
dydx["x2"] = 200 * (x2 - x1**2)
return y, dydx
# Initial guess
initial_guess = dict()
initial_guess["x1"] = np.array([1.25]).reshape((1, 1))
initial_guess["x2"] = np.array([-1.75]).reshape((1, 1))
# Function handles to be pass
f = lambda x: rosenbrock(parameters=x)[0]
dfdx = lambda x: rosenbrock(parameters=x)[1]
# Learning rate
alpha = 0.5
# Optimize
if use_adam:
optimizer = Adam.initialize(
initial_guess=initial_guess,
cost_function=f,
grad_function=dfdx,
learning_rate=alpha,
)
else:
optimizer = GD.initialize(
initial_guess=initial_guess,
cost_function=f,
grad_function=dfdx,
learning_rate=alpha,
)
optimizer.grad_check(initial_guess)
optimizer.optimize(max_iter=1000)
# For plotting initial and final answer
x0 = np.array([initial_guess["x1"].squeeze(), initial_guess["x2"].squeeze()])
xf = np.array(
[optimizer.optimum["x1"].squeeze(), optimizer.optimum["x2"].squeeze()]
)
# For plotting contours
lb = -2.0
ub = 2.0
m = 100
x1 = np.linspace(lb, ub, m)
x2 = np.linspace(lb, ub, m)
X1, X2 = np.meshgrid(x1, x2)
Y = np.zeros(X1.shape)
for i in range(0, m):
for j in range(0, m):
Y[i, j] = f({"x1": np.array([X1[i, j]]), "x2": np.array([X2[i, j]])})
# Plot
x1_his = np.array([design["x1"] for design in optimizer.design_history]).squeeze()
x2_his = np.array([design["x2"] for design in optimizer.design_history]).squeeze()
plt.plot(x1_his, x2_his)
plt.plot(x0[0], x0[1], "+", ms=15)
plt.plot(xf[0], xf[1], "o")
plt.plot(np.array([1.0]), np.array([1.0]), "x")
plt.legend(["history", "initial guess", "predicted optimum", "true optimum"])
plt.contour(X1, X2, Y, 50, cmap="RdGy")
if use_adam:
plt.title("adam")
else:
plt.title("gradient descent")
plt.show()
if __name__ == "__main__": # pragma: no cover
run_example()
| 12,861 | 32.407792 | 120 | py |
smt | smt-master/smt/surrogate_models/rmtc.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import scipy.sparse
from numbers import Integral
from smt.utils.linear_solvers import get_solver
from smt.utils.line_search import get_line_search_class
from smt.surrogate_models.rmts import RMTS
from smt.surrogate_models.rmtsclib import PyRMTC
class RMTC(RMTS):
"""
Regularized Minimal-energy Tensor-product Cubic hermite spline (RMTC) interpolant.
RMTC divides the n-dimensional space using n-dimensional box elements.
Each n-D box is represented using a tensor-product of cubic functions,
one in each dimension. The coefficients of the cubic functions are
computed by minimizing the second derivatives of the interpolant under
the condition that it interpolates or approximates the training points.
Advantages:
- Extremely fast to evaluate
- Evaluation/training time are relatively insensitive to the number of
training points
- Avoids oscillations
Disadvantages:
- Training time scales poorly with the # dimensions (too slow beyond 4-D)
- The user must choose the number of elements in each dimension
"""
name = "RMTC"
def _initialize(self):
super(RMTC, self)._initialize()
declare = self.options.declare
declare(
"num_elements",
4,
types=(Integral, list, np.ndarray),
desc="# elements in each dimension - ndarray [nx]",
)
def _setup(self):
options = self.options
nx = self.training_points[None][0][0].shape[1]
for name in ["smoothness", "num_elements"]:
if isinstance(options[name], (int, float)):
options[name] = [options[name]] * nx
options[name] = np.atleast_1d(options[name])
self.printer.max_print_depth = options["max_print_depth"]
num = {}
# number of inputs and outputs
num["x"] = self.training_points[None][0][0].shape[1]
num["y"] = self.training_points[None][0][1].shape[1]
# number of elements
num["elem_list"] = np.array(options["num_elements"], int)
num["elem"] = np.prod(num["elem_list"])
# number of terms/coefficients per element
num["term_list"] = 4 * np.ones(num["x"], int)
num["term"] = np.prod(num["term_list"])
# number of nodes
num["uniq_list"] = num["elem_list"] + 1
num["uniq"] = np.prod(num["uniq_list"])
# total number of training points (function values and derivatives)
num["t"] = 0
for kx in self.training_points[None]:
num["t"] += self.training_points[None][kx][0].shape[0]
# for RMT
num["coeff"] = num["term"] * num["elem"]
num["support"] = num["term"]
num["dof"] = num["uniq"] * 2 ** num["x"]
self.num = num
self.rmtsc = PyRMTC()
self.rmtsc.setup(
num["x"],
np.array(self.options["xlimits"][:, 0]),
np.array(self.options["xlimits"][:, 1]),
np.array(num["elem_list"], np.int32),
np.array(num["term_list"], np.int32),
)
def _compute_jac_raw(self, ix1, ix2, x):
n = x.shape[0]
nnz = n * self.num["term"]
data = np.empty(nnz)
rows = np.empty(nnz, np.int32)
cols = np.empty(nnz, np.int32)
self.rmtsc.compute_jac(ix1 - 1, ix2 - 1, n, x.flatten(), data, rows, cols)
return data, rows, cols
def _compute_dof2coeff(self):
num = self.num
# This computes an num['term'] x num['term'] matrix called coeff2nodal.
# Multiplying this matrix with the list of coefficients for an element
# yields the list of function and derivative values at the element nodes.
# We need the inverse, but the matrix size is small enough to invert since
# RMTC is normally only used for 1 <= nx <= 4 in most cases.
elem_coeff2nodal = np.zeros(num["term"] * num["term"])
self.rmtsc.compute_coeff2nodal(elem_coeff2nodal)
elem_coeff2nodal = elem_coeff2nodal.reshape((num["term"], num["term"]))
elem_nodal2coeff = np.linalg.inv(elem_coeff2nodal)
# This computes a num_coeff_elem x num_coeff_uniq permutation matrix called
# uniq2elem. This sparse matrix maps the unique list of nodal function and
# derivative values to the same function and derivative values, but ordered
# by element, with repetition.
nnz = num["elem"] * num["term"]
data = np.empty(nnz)
rows = np.empty(nnz, np.int32)
cols = np.empty(nnz, np.int32)
self.rmtsc.compute_uniq2elem(data, rows, cols)
num_coeff_elem = num["term"] * num["elem"]
num_coeff_uniq = num["uniq"] * 2 ** num["x"]
full_uniq2elem = scipy.sparse.csc_matrix(
(data, (rows, cols)), shape=(num_coeff_elem, num_coeff_uniq)
)
# This computes the matrix full_dof2coeff, which maps the unique
# degrees of freedom to the list of coefficients ordered by element.
nnz = num["term"] ** 2 * num["elem"]
data = np.empty(nnz)
rows = np.empty(nnz, np.int32)
cols = np.empty(nnz, np.int32)
self.rmtsc.compute_full_from_block(elem_nodal2coeff.flatten(), data, rows, cols)
num_coeff = num["term"] * num["elem"]
full_nodal2coeff = scipy.sparse.csc_matrix(
(data, (rows, cols)), shape=(num_coeff, num_coeff)
)
full_dof2coeff = full_nodal2coeff * full_uniq2elem
return full_dof2coeff
| 5,612 | 36.42 | 88 | py |
smt | smt-master/smt/surrogate_models/rmts.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import scipy.sparse
from numbers import Integral
from smt.utils.linear_solvers import get_solver, LinearSolver, VALID_SOLVERS
from smt.utils.line_search import get_line_search_class, LineSearch, VALID_LINE_SEARCHES
from smt.utils.caching import cached_operation
from smt.surrogate_models.surrogate_model import SurrogateModel
class RMTS(SurrogateModel):
"""
Regularized Minimal-energy Tensor-product Spline interpolant base class for RMTC and RMTB.
"""
name = "RMTS"
def _initialize(self):
super(RMTS, self)._initialize()
declare = self.options.declare
supports = self.supports
declare(
"xlimits",
types=np.ndarray,
desc="Lower/upper bounds in each dimension - ndarray [nx, 2]",
)
declare(
"smoothness",
1.0,
types=(Integral, float, tuple, list, np.ndarray),
desc="Smoothness parameter in each dimension - length nx. None implies uniform",
)
declare(
"regularization_weight",
1e-14,
types=(Integral, float),
desc="Weight of the term penalizing the norm of the spline coefficients."
+ " This is useful as an alternative to energy minimization "
+ " when energy minimization makes the training time too long.",
)
declare(
"energy_weight",
1e-4,
types=(Integral, float),
desc="The weight of the energy minimization terms",
)
declare(
"extrapolate",
False,
types=bool,
desc="Whether to perform linear extrapolation for external evaluation points",
)
declare(
"min_energy",
True,
types=bool,
desc="Whether to perform energy minimization",
)
declare(
"approx_order", 4, types=Integral, desc="Exponent in the approximation term"
)
declare(
"solver",
"krylov",
values=VALID_SOLVERS,
types=LinearSolver,
desc="Linear solver",
)
declare(
"derivative_solver",
"krylov",
values=VALID_SOLVERS,
types=LinearSolver,
desc="Linear solver used for computing output derivatives (dy_dyt)",
)
declare(
"grad_weight",
0.5,
types=(Integral, float),
desc="Weight on gradient training data",
)
declare(
"solver_tolerance",
1e-12,
types=(Integral, float),
desc="Convergence tolerance for the nonlinear solver",
)
declare(
"nonlinear_maxiter",
10,
types=Integral,
desc="Maximum number of nonlinear solver iterations",
)
declare(
"line_search",
"backtracking",
values=VALID_LINE_SEARCHES,
types=LineSearch,
desc="Line search algorithm",
)
declare(
"save_energy_terms",
False,
types=bool,
desc="Whether to cache energy terms in the data_dir directory",
)
declare(
"data_dir",
None,
values=(None,),
types=str,
desc="Directory for loading / saving cached data; None means do not save or load",
)
declare(
"max_print_depth",
5,
types=Integral,
desc="Maximum depth (level of nesting) to print operation descriptions and times",
)
supports["training_derivatives"] = True
supports["derivatives"] = True
supports["output_derivatives"] = True
def _setup_hessian(self):
diag = np.ones(self.num["dof"])
arange = np.arange(self.num["dof"])
full_hess = scipy.sparse.csc_matrix((diag, (arange, arange)))
return full_hess
def _compute_jac(self, ix1, ix2, x):
data, rows, cols = self._compute_jac_raw(ix1, ix2, x)
n = x.shape[0]
full_jac = scipy.sparse.csc_matrix(
(data, (rows, cols)), shape=(n, self.num["coeff"])
)
if self.full_dof2coeff is not None:
full_jac = full_jac * self.full_dof2coeff
return full_jac
def _compute_approx_terms(self):
# This computes the approximation terms for the training points.
# We loop over kx: 0 is for values and kx>0 represents.
# the 1-based index of the derivative given by the training point data.
num = self.num
xlimits = self.options["xlimits"]
full_jac_dict = {}
for kx in self.training_points[None]:
xt, yt = self.training_points[None][kx]
xmin = np.min(xt, axis=0)
xmax = np.max(xt, axis=0)
assert np.all(xlimits[:, 0] <= xmin), (
"Training points below min for %s" % kx
)
assert np.all(xlimits[:, 1] >= xmax), (
"Training points above max for %s" % kx
)
if kx == 0:
c = 1.0
else:
self.options["grad_weight"] / xlimits.shape[0]
full_jac = self._compute_jac(kx, 0, xt)
full_jac_dict[kx] = (full_jac, full_jac.T.tocsc(), c)
return full_jac_dict
def _compute_energy_terms(self):
# This computes the energy terms that are to be minimized.
# The quadrature points are the centroids of the multi-dimensional elements.
num = self.num
xlimits = self.options["xlimits"]
inputs = {}
inputs["nx"] = xlimits.shape[0]
inputs["elem_list"] = num["elem_list"]
if self.__class__.__name__ == "RMTB":
inputs["num_ctrl_list"] = num["ctrl_list"]
inputs["order_list"] = num["order_list"]
if self.options["save_energy_terms"]:
cache_dir = self.options["data_dir"]
else:
cache_dir = None
with cached_operation(inputs, cache_dir) as outputs:
if outputs:
sq_mtx = outputs["sq_mtx"]
else:
n = np.prod(2 * num["elem_list"])
x = np.empty(n * num["x"])
self.rmtsc.compute_quadrature_points(
n, np.array(2 * num["elem_list"], dtype=np.int32), x
)
x = x.reshape((n, num["x"]))
sq_mtx = [None] * num["x"]
for kx in range(num["x"]):
mtx = self._compute_jac(kx + 1, kx + 1, x)
sq_mtx[kx] = (
mtx.T.tocsc() * mtx * (xlimits[kx, 1] - xlimits[kx, 0]) ** 4
)
outputs["sq_mtx"] = sq_mtx
elem_vol = np.prod((xlimits[:, 1] - xlimits[:, 0]) / (2 * num["elem_list"]))
total_vol = np.prod(xlimits[:, 1] - xlimits[:, 0])
full_hess = scipy.sparse.csc_matrix((num["dof"], num["dof"]))
for kx in range(num["x"]):
full_hess += sq_mtx[kx] * (
elem_vol
/ total_vol
* self.options["smoothness"][kx]
/ (xlimits[kx, 1] - xlimits[kx, 0]) ** 4
)
return full_hess
def _opt_func(self, sol, p, yt_dict):
full_hess = self.full_hess
full_jac_dict = self.full_jac_dict
func = 0.5 * np.dot(sol, full_hess * sol)
for kx in self.training_points[None]:
full_jac, full_jac_T, c = full_jac_dict[kx]
yt = yt_dict[kx]
func += 0.5 * c * np.sum((full_jac * sol - yt) ** p)
return func
def _opt_grad(self, sol, p, yt_dict):
full_hess = self.full_hess
full_jac_dict = self.full_jac_dict
grad = full_hess * sol
for kx in self.training_points[None]:
full_jac, full_jac_T, c = full_jac_dict[kx]
yt = yt_dict[kx]
grad += 0.5 * c * full_jac_T * p * (full_jac * sol - yt) ** (p - 1)
return grad
def _opt_dgrad_dyt(self, sol, p, yt_dict, kx):
full_hess = self.full_hess
full_jac_dict = self.full_jac_dict
full_jac, full_jac_T, c = full_jac_dict[kx]
yt = yt_dict[kx]
diag_vec = p * (p - 1) * (full_jac * sol - yt) ** (p - 2)
diag_mtx = scipy.sparse.diags(diag_vec, format="csc")
mtx = 0.5 * c * full_jac_T.dot(diag_mtx)
return -mtx.todense()
def _opt_hess(self, sol, p, yt_dict):
full_hess = self.full_hess
full_jac_dict = self.full_jac_dict
hess = scipy.sparse.csc_matrix(full_hess)
for kx in self.training_points[None]:
full_jac, full_jac_T, c = full_jac_dict[kx]
yt = yt_dict[kx]
diag_vec = p * (p - 1) * (full_jac * sol - yt) ** (p - 2)
diag_mtx = scipy.sparse.diags(diag_vec, format="csc")
hess += 0.5 * c * full_jac_T * diag_mtx * full_jac
return hess
def _opt_norm(self, sol, p, yt_dict):
full_hess = self.full_hess
full_jac_dict = self.full_jac_dict
grad = self._opt_grad(sol, p, yt_dict)
return np.linalg.norm(grad)
def _get_yt_dict(self, ind_y):
yt_dict = {}
for kx in self.training_points[None]:
xt, yt = self.training_points[None][kx]
yt_dict[kx] = yt[:, ind_y]
return yt_dict
def _run_newton_solver(self, sol):
num = self.num
options = self.options
solver = get_solver(options["solver"])
ls_class = get_line_search_class(options["line_search"])
total_size = int(num["dof"])
rhs = np.zeros((total_size, num["y"]))
d_sol = np.zeros((total_size, num["y"]))
p = self.options["approx_order"]
for ind_y in range(rhs.shape[1]):
with self.printer._timed_context("Solving for output %i" % ind_y):
yt_dict = self._get_yt_dict(ind_y)
norm = self._opt_norm(sol[:, ind_y], p, yt_dict)
fval = self._opt_func(sol[:, ind_y], p, yt_dict)
self.printer(
"Iteration (num., iy, grad. norm, func.) : %3i %3i %15.9e %15.9e"
% (0, ind_y, norm, fval)
)
iter_count = 0
while (
iter_count < options["nonlinear_maxiter"]
and norm > options["solver_tolerance"]
):
with self.printer._timed_context():
with self.printer._timed_context("Assembling linear system"):
mtx = self._opt_hess(sol[:, ind_y], p, yt_dict)
rhs[:, ind_y] = -self._opt_grad(sol[:, ind_y], p, yt_dict)
with self.printer._timed_context("Initializing linear solver"):
solver._setup(mtx, self.printer)
with self.printer._timed_context("Solving linear system"):
solver._solve(rhs[:, ind_y], d_sol[:, ind_y], ind_y=ind_y)
func = lambda x: self._opt_func(x, p, yt_dict)
grad = lambda x: self._opt_grad(x, p, yt_dict)
# sol[:, ind_y] += d_sol[:, ind_y]
ls = ls_class(sol[:, ind_y], d_sol[:, ind_y], func, grad)
with self.printer._timed_context("Performing line search"):
sol[:, ind_y] = ls(1.0)
norm = self._opt_norm(sol[:, ind_y], p, yt_dict)
fval = self._opt_func(sol[:, ind_y], p, yt_dict)
self.printer(
"Iteration (num., iy, grad. norm, func.) : %3i %3i %15.9e %15.9e"
% (iter_count, ind_y, norm, fval)
)
self.mtx = mtx
iter_count += 1
def _solve(self):
num = self.num
options = self.options
solver = get_solver(options["solver"])
ls_class = get_line_search_class(options["line_search"])
total_size = int(num["dof"])
rhs = np.zeros((total_size, num["y"]))
sol = np.zeros((total_size, num["y"]))
d_sol = np.zeros((total_size, num["y"]))
with self.printer._timed_context(
"Solving initial startup problem (n=%i)" % total_size
):
approx_order = options["approx_order"]
nonlinear_maxiter = options["nonlinear_maxiter"]
options["approx_order"] = 2
options["nonlinear_maxiter"] = 1
self._run_newton_solver(sol)
options["approx_order"] = approx_order
options["nonlinear_maxiter"] = nonlinear_maxiter
with self.printer._timed_context(
"Solving nonlinear problem (n=%i)" % total_size
):
self._run_newton_solver(sol)
return sol
def _new_train(self):
"""
Train the model
"""
with self.printer._timed_context("Pre-computing matrices", "assembly"):
with self.printer._timed_context("Computing dof2coeff", "dof2coeff"):
self.full_dof2coeff = self._compute_dof2coeff()
with self.printer._timed_context("Initializing Hessian", "init_hess"):
self.full_hess = (
self._setup_hessian() * self.options["regularization_weight"]
)
if self.options["min_energy"]:
with self.printer._timed_context("Computing energy terms", "energy"):
self.full_hess += (
self._compute_energy_terms() * self.options["energy_weight"]
)
with self.printer._timed_context("Computing approximation terms", "approx"):
self.full_jac_dict = self._compute_approx_terms()
with self.printer._timed_context(
"Solving for degrees of freedom", "total_solution"
):
self.sol = self._solve()
if self.full_dof2coeff is not None:
self.sol_coeff = self.full_dof2coeff * self.sol
else:
self.sol_coeff = self.sol
def _train(self):
"""
Train the model
"""
self._setup()
tmp = self.rmtsc
self.rmtsc = None
inputs = {"self": self}
with cached_operation(inputs, self.options["data_dir"]) as outputs:
self.rmtsc = tmp
if outputs:
self.sol_coeff = outputs["sol_coeff"]
self.sol = outputs["sol"]
self.mtx = outputs["mtx"]
self.full_dof2coeff = outputs["full_dof2coeff"]
self.full_hess = outputs["full_hess"]
self.full_jac_dict = outputs["full_jac_dict"]
else:
self._new_train()
outputs["sol_coeff"] = self.sol_coeff
outputs["sol"] = self.sol
outputs["mtx"] = self.mtx
outputs["full_dof2coeff"] = self.full_dof2coeff
outputs["full_hess"] = self.full_hess
outputs["full_jac_dict"] = self.full_jac_dict
def _predict_values(self, x):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
mtx = self._compute_prediction_mtx(x, 0)
y = mtx.dot(self.sol_coeff)
return y
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray
Derivative values.
"""
mtx = self._compute_prediction_mtx(x, kx + 1)
y = mtx.dot(self.sol_coeff)
return y
def _compute_prediction_mtx(self, x, kx):
n = x.shape[0]
num = self.num
options = self.options
data, rows, cols = self._compute_jac_raw(kx, 0, x)
# In the explanation below, n is the number of dimensions, and
# a_k and b_k are the lower and upper bounds for x_k.
#
# A C1 extrapolation can get very tricky, so we implement a simple C0
# extrapolation. We basically linarly extrapolate from the nearest
# domain point. For example, if n = 4 and x2 > b2 and x3 > b3:
# f(x1,x2,x3,x4) = f(x1,b2,b3,x4) + dfdx2 (x2-b2) + dfdx3 (x3-b3)
# where the derivatives are evaluated at x1,b2,b3,x4 (called b) and
# dfdx1|x = dfdx1|b + d2fdx1dx2|b (x2-b2) + d2fdx1dx3|b (x3-b3)
# dfdx2|x = dfdx2|b.
# The dfdx2|x derivative is what it is because f and all derivatives
# evaluated at x1,b2,b3,x4 are constant with respect to changes in x2.
# On the other hand, the dfdx1|x derivative is what it is because
# f and all derivatives evaluated at x1,b2,b3,x4 change with x1.
# The extrapolation function is non-differentiable at boundaries:
# i.e., where x_k = a_k or x_k = b_k for at least one k.
if options["extrapolate"]:
# First we evaluate the vector pointing to each evaluation points
# from the nearest point on the domain, in a matrix called dx.
# If the ith evaluation point is not external, dx[i, :] = 0.
dx = np.empty(n * num["support"] * num["x"])
self.rmtsc.compute_ext_dist(n, num["support"], x.flatten(), dx)
dx = dx.reshape((n * num["support"], num["x"]))
isexternal = np.array(np.array(dx, bool), float)
for ix in range(num["x"]):
# Now we compute the first order term where we have a
# derivative times (x_k - b_k) or (x_k - a_k).
data_tmp, rows, cols = self._compute_jac_raw(kx, ix + 1, x)
data_tmp *= dx[:, ix]
# If we are evaluating a derivative (with index kx),
# we zero the first order terms for which dx_k = 0.
if kx != 0:
data_tmp *= 1 - isexternal[:, kx - 1]
data += data_tmp
mtx = scipy.sparse.csc_matrix((data, (rows, cols)), shape=(n, num["coeff"]))
return mtx
def _predict_output_derivatives(self, x):
# dy_dyt = dy_dw * (dR_dw)^{-1} * dR_dyt
n = x.shape[0]
nw = self.mtx.shape[0]
nx = x.shape[1]
ny = self.sol.shape[1]
p = self.options["approx_order"]
dy_dw = self._compute_prediction_mtx(x, 0)
if self.full_dof2coeff is not None:
dy_dw = dy_dw * self.full_dof2coeff
dy_dw = dy_dw.todense()
dR_dw = self.mtx
dy_dyt = {}
for kx in self.training_points[None]:
nt = self.training_points[None][kx][0].shape[0]
dR_dyt = np.zeros((nw, nt, ny))
for ind_y in range(ny):
yt_dict = self._get_yt_dict(ind_y)
dR_dyt[:, :, ind_y] = self._opt_dgrad_dyt(
self.sol[:, ind_y], p, yt_dict, kx
)
solver = get_solver(self.options["derivative_solver"])
solver._setup(dR_dw, self.printer)
dw_dyt = np.zeros((nw, nt, ny))
for ind_t in range(nt):
for ind_y in range(ny):
solver._solve(
dR_dyt[:, ind_t, ind_y], dw_dyt[:, ind_t, ind_y], ind_y=ind_y
)
dw_dyt[:, ind_t, ind_y] *= -1.0
if kx == 0:
dy_dyt[None] = np.einsum("ij,jkl->ikl", dy_dw, dw_dyt)
else:
dy_dyt[kx - 1] = np.einsum("ij,jkl->ikl", dy_dw, dw_dyt)
return dy_dyt
| 20,336 | 34.003442 | 98 | py |
smt | smt-master/smt/surrogate_models/gekpls.py | """
Author: Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
from smt.surrogate_models import KPLS
from smt.utils.kriging import ge_compute_pls
class GEKPLS(KPLS):
name = "GEKPLS"
def _initialize(self):
super(GEKPLS, self)._initialize()
declare = self.options.declare
# Re-declare n_comp as default value should be at least 2
declare("n_comp", 2, types=int, desc="Number of principal components")
# Like KPLS, GEKPLS used only with "abs_exp" and "squar_exp" correlations
declare(
"corr",
"squar_exp",
values=("abs_exp", "squar_exp"),
desc="Correlation function type",
types=(str),
)
declare("delta_x", 1e-4, types=(int, float), desc="Step used in the FOTA")
declare(
"extra_points",
0,
types=int,
desc="Number of extra points per training point",
)
self.supports["training_derivatives"] = True
def _check_param(self):
super()._check_param()
if self.options["n_comp"] < 2:
raise ValueError(
f"GEKPLS needs at least 2 components, got {self.options['n_comp']}"
)
def _compute_pls(self, X, y):
if 0 in self.training_points[None]:
self.coeff_pls, XX, yy = ge_compute_pls(
X.copy(),
y.copy(),
self.options["n_comp"],
self.training_points,
self.options["delta_x"],
self.design_space.get_num_bounds(),
self.options["extra_points"],
)
if self.options["extra_points"] != 0:
self.nt *= self.options["extra_points"] + 1
X = np.vstack((X, XX))
y = np.vstack((y, yy))
return X, y
| 1,924 | 30.048387 | 83 | py |
smt | smt-master/smt/surrogate_models/idw.py | """
Author: Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>
Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
from scipy.sparse import csc_matrix
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.utils.caching import cached_operation
from smt.surrogate_models.idwclib import PyIDW
class IDW(SurrogateModel):
"""
Inverse distance weighting interpolant
This model uses the inverse distance between the unknown and training
points to predeict the unknown point.
We do not need to fit this model because the response of an unknown point x
is computed with respect to the distance between x and the training points.
"""
name = "IDW"
def _initialize(self):
super(IDW, self)._initialize()
declare = self.options.declare
supports = self.supports
declare("p", 2.5, types=(int, float), desc="order of distance norm")
declare(
"data_dir",
values=None,
types=str,
desc="Directory for loading / saving cached data; None means do not save or load",
)
supports["derivatives"] = True
supports["output_derivatives"] = True
def _setup(self):
xt = self.training_points[None][0][0]
nt = xt.shape[0]
nx = xt.shape[1]
self.idwc = PyIDW()
self.idwc.setup(nx, nt, self.options["p"], xt.flatten())
############################################################################
# Model functions
############################################################################
def _new_train(self):
"""
Train the model
"""
pass
def _train(self):
"""
Train the model
"""
self._setup()
tmp = self.idwc
self.idwc = None
inputs = {"self": self}
with cached_operation(inputs, self.options["data_dir"]) as outputs:
self.idwc = tmp
if outputs:
self.sol = outputs["sol"]
else:
self._new_train()
# outputs['sol'] = self.sol
def _predict_values(self, x):
"""
This function is used by _predict function. See _predict for more details.
"""
n = x.shape[0]
nt = self.nt
yt = self.training_points[None][0][1]
jac = np.empty(n * nt)
self.idwc.compute_jac(n, x.flatten(), jac)
jac = jac.reshape((n, nt))
y = jac.dot(yt)
return y
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
dy_dx : np.ndarray
Derivative values.
"""
n = x.shape[0]
nt = self.nt
yt = self.training_points[None][0][1]
jac = np.empty(n * nt)
self.idwc.compute_jac_derivs(n, kx, x.flatten(), jac)
jac = jac.reshape((n, nt))
dy_dx = jac.dot(yt)
return dy_dx
def _predict_output_derivatives(self, x):
n = x.shape[0]
nt = self.nt
ny = self.training_points[None][0][1].shape[1]
jac = np.empty(n * nt)
self.idwc.compute_jac(n, x.flatten(), jac)
jac = jac.reshape((n, nt))
jac = np.einsum("ij,k->ijk", jac, np.ones(ny))
dy_dyt = {None: jac}
return dy_dyt
| 3,670 | 25.992647 | 98 | py |
smt | smt-master/smt/surrogate_models/rbf.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
from scipy.sparse import csc_matrix
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.utils.linear_solvers import get_solver
from smt.utils.caching import cached_operation
from smt.surrogate_models.rbfclib import PyRBF
class RBF(SurrogateModel):
"""
Radial basis function interpolant with global polynomial trend.
"""
name = "RBF"
def _initialize(self):
super(RBF, self)._initialize()
declare = self.options.declare
supports = self.supports
declare(
"d0",
1.0,
types=(int, float, list, np.ndarray),
desc="basis function scaling parameter in exp(-d^2 / d0^2)",
)
declare(
"poly_degree",
-1,
types=int,
values=(-1, 0, 1),
desc="-1 means no global polynomial, 0 means constant, 1 means linear trend",
)
declare(
"data_dir",
values=None,
types=str,
desc="Directory for loading / saving cached data; None means do not save or load",
)
declare("reg", 1e-10, types=(int, float), desc="Regularization coeff.")
declare(
"max_print_depth",
5,
types=int,
desc="Maximum depth (level of nesting) to print operation descriptions and times",
)
supports["derivatives"] = True
supports["output_derivatives"] = True
def _setup(self):
options = self.options
nx = self.training_points[None][0][0].shape[1]
if isinstance(options["d0"], (int, float)):
options["d0"] = [options["d0"]] * nx
options["d0"] = np.array(np.atleast_1d(options["d0"]), dtype=float)
self.printer.max_print_depth = options["max_print_depth"]
num = {}
# number of inputs and outputs
num["x"] = self.training_points[None][0][0].shape[1]
num["y"] = self.training_points[None][0][1].shape[1]
# number of radial function terms
num["radial"] = self.training_points[None][0][0].shape[0]
# number of polynomial terms
if options["poly_degree"] == -1:
num["poly"] = 0
elif options["poly_degree"] == 0:
num["poly"] = 1
elif options["poly_degree"] == 1:
num["poly"] = 1 + num["x"]
num["dof"] = num["radial"] + num["poly"]
self.num = num
nt = self.training_points[None][0][0].shape[0]
xt, yt = self.training_points[None][0]
self.rbfc = PyRBF()
self.rbfc.setup(
num["x"],
nt,
num["dof"],
options["poly_degree"],
options["d0"],
xt.flatten(),
)
def _new_train(self):
num = self.num
xt, yt = self.training_points[None][0]
jac = np.empty(num["radial"] * num["dof"])
self.rbfc.compute_jac(num["radial"], xt.flatten(), jac)
jac = jac.reshape((num["radial"], num["dof"]))
mtx = np.zeros((num["dof"], num["dof"]))
mtx[: num["radial"], :] = jac
mtx[:, : num["radial"]] = jac.T
mtx[np.arange(num["radial"]), np.arange(num["radial"])] += self.options["reg"]
self.mtx = mtx
rhs = np.zeros((num["dof"], num["y"]))
rhs[: num["radial"], :] = yt
sol = np.zeros((num["dof"], num["y"]))
solver = get_solver("dense-lu")
with self.printer._timed_context("Initializing linear solver"):
solver._setup(mtx, self.printer)
for ind_y in range(rhs.shape[1]):
with self.printer._timed_context("Solving linear system (col. %i)" % ind_y):
solver._solve(rhs[:, ind_y], sol[:, ind_y], ind_y=ind_y)
self.sol = sol
def _train(self):
"""
Train the model
"""
self._setup()
tmp = self.rbfc
self.rbfc = None
inputs = {"self": self}
with cached_operation(inputs, self.options["data_dir"]) as outputs:
self.rbfc = tmp
if outputs:
self.sol = outputs["sol"]
else:
self._new_train()
outputs["sol"] = self.sol
def _predict_values(self, x):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
n = x.shape[0]
num = self.num
jac = np.empty(n * num["dof"])
self.rbfc.compute_jac(n, x.flatten(), jac)
jac = jac.reshape((n, num["dof"]))
y = jac.dot(self.sol)
return y
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
dy_dx : np.ndarray
Derivative values.
"""
n = x.shape[0]
num = self.num
jac = np.empty(n * num["dof"])
self.rbfc.compute_jac_derivs(n, kx, x.flatten(), jac)
jac = jac.reshape((n, num["dof"]))
dy_dx = jac.dot(self.sol)
return dy_dx
def _predict_output_derivatives(self, x):
n = x.shape[0]
nt = self.nt
ny = self.training_points[None][0][1].shape[1]
num = self.num
dy_dstates = np.empty(n * num["dof"])
self.rbfc.compute_jac(n, x.flatten(), dy_dstates)
dy_dstates = dy_dstates.reshape((n, num["dof"]))
dstates_dytl = np.linalg.inv(self.mtx)
ones = np.ones(self.nt)
arange = np.arange(self.nt)
dytl_dyt = csc_matrix((ones, (arange, arange)), shape=(num["dof"], self.nt))
dy_dyt = (dytl_dyt.T.dot(dstates_dytl.T).dot(dy_dstates.T)).T
dy_dyt = np.einsum("ij,k->ijk", dy_dyt, np.ones(ny))
return {None: dy_dyt}
| 6,314 | 28.101382 | 98 | py |
smt | smt-master/smt/surrogate_models/mgp.py | """
Author: Remy Priem (remy.priem@onera.fr)
This package is distributed under New BSD license.
"""
from __future__ import division
import numpy as np
from scipy import linalg
from smt.utils.kriging import differences, componentwise_distance
from smt.surrogate_models.krg_based import KrgBased
from smt.utils.checks import check_support, check_nx, ensure_2d_array
"""
The Active kriging class.
"""
class MGP(KrgBased):
name = "MGP"
def _initialize(self):
"""
Initialized MGP
"""
super(MGP, self)._initialize()
declare = self.options.declare
declare("n_comp", 1, types=int, desc="Number of active dimensions")
declare(
"prior",
{"mean": [0.0], "var": 5.0 / 4.0},
types=dict,
desc="Parameters for Gaussian prior of the Hyperparameters",
)
self.options["hyper_opt"] = "TNC"
self.options["corr"] = "act_exp"
def _componentwise_distance(self, dx, small=False, opt=0):
"""
Compute the componentwise distance with respect to the correlation kernel
Parameters
----------
dx : numpy.ndarray
Distance matrix.
small : bool, optional
Compute the componentwise distance in small (n_components) dimension
or in initial dimension. The default is False.
opt : int, optional
useless for MGP
Returns
-------
d : numpy.ndarray
Component wise distance.
"""
if small:
d = componentwise_distance(dx, self.options["corr"], self.options["n_comp"])
else:
d = componentwise_distance(dx, self.options["corr"], self.nx)
return d
def _predict_values(self, x, is_acting=None):
"""
Predict the value of the MGP for a given point
Parameters
----------
x : numpy.ndarray
Point to compute.
Raises
------
ValueError
The number fo dimension is not good.
Returns
-------
y : numpy.ndarray
Value of the MGP at the given point x.
"""
n_eval, n_features = x.shape
if n_features < self.nx:
if n_features != self.options["n_comp"]:
raise ValueError(
"dim(u) should be equal to %i" % self.options["n_comp"]
)
theta = np.eye(self.options["n_comp"]).reshape(
(self.options["n_comp"] ** 2,)
)
# Get pairwise componentwise L1-distances to the input training set
u = x
x = self.get_x_from_u(u)
u = u * self.embedding["norm"] - self.U_mean
du = differences(u, Y=self.U_norma.copy())
d = self._componentwise_distance(du, small=True)
# Get an approximation of x
x = (x - self.X_offset) / self.X_scale
dx = differences(x, Y=self.X_norma.copy())
d_x = self._componentwise_distance(dx)
else:
if n_features != self.nx:
raise ValueError("dim(x) should be equal to %i" % self.X_scale.shape[0])
theta = self.optimal_theta
# Get pairwise componentwise L1-distances to the input training set
x = (x - self.X_offset) / self.X_scale
dx = differences(x, Y=self.X_norma.copy())
d = self._componentwise_distance(dx)
d_x = None
# Compute the correlation function
r = self._correlation_types[self.options["corr"]](theta, d, d_x=d_x).reshape(
n_eval, self.nt
)
f = self._regression_types[self.options["poly"]](x)
# Scaled predictor
y_ = np.dot(f, self.optimal_par["beta"]) + np.dot(r, self.optimal_par["gamma"])
# Predictor
y = (self.y_mean + self.y_std * y_).ravel()
return y
def _predict_mgp_variances_base(self, x):
"""Base computation of MGP MSE used by predict_variances and predict_variances_no_uq"""
_, n_features = x.shape
if n_features < self.nx:
if n_features != self.options["n_comp"]:
raise ValueError(
"dim(u) should be equal to %i" % self.options["n_comp"]
)
u = x
x = self.get_x_from_u(u)
u = u * self.embedding["norm"] - self.U_mean
x = (x - self.X_offset) / self.X_scale
else:
if n_features != self.nx:
raise ValueError("dim(x) should be equal to %i" % self.X_scale.shape[0])
u = None
x = (x - self.X_offset) / self.X_scale
dy = self._predict_value_derivatives_hyper(x, u)
dMSE, MSE = self._predict_variance_derivatives_hyper(x, u)
return MSE, dy, dMSE
def _predict_variances(self, x: np.ndarray, is_acting=None) -> np.ndarray:
"""
Predict the variance of a specific point
Parameters
----------
x : numpy.ndarray
Point to compute.
Raises
------
ValueError
The number fo dimension is not good.
Returns
-------
numpy.nd array
MSE.
"""
MSE, dy, dMSE = self._predict_mgp_variances_base(x)
arg_1 = np.einsum("ij,ij->i", dy.T, linalg.solve(self.inv_sigma_R, dy).T)
arg_2 = np.einsum("ij,ij->i", dMSE.T, linalg.solve(self.inv_sigma_R, dMSE).T)
MGPMSE = np.zeros(x.shape[0])
MGPMSE[MSE != 0] = (
(4.0 / 3.0) * MSE[MSE != 0]
+ arg_1[MSE != 0]
+ (1.0 / (3.0 * MSE[MSE != 0])) * arg_2[MSE != 0]
)
MGPMSE[MGPMSE < 0.0] = 0.0
return MGPMSE
def predict_variances_no_uq(self, x):
"""Like predict_variances but without taking account hyperparameters uncertainty"""
check_support(self, "variances")
x = ensure_2d_array(x, "x")
self._check_xdim(x)
n = x.shape[0]
x2 = np.copy(x)
s2, _, _ = self._predict_mgp_variances_base(x)
s2[s2 < 0.0] = 0.0
return s2.reshape((n, self.ny))
def _check_xdim(self, x):
_, n_features = x.shape
nx = self.nx
if n_features < self.nx:
nx = self.options["n_comp"]
check_nx(nx, x)
def _reduced_log_prior(self, theta, grad=False, hessian=False):
"""
Compute the reduced log prior at given hyperparameters
Parameters
----------
theta : numpy.ndarray
Hyperparameters.
grad : bool, optional
True to compuyte gradient. The default is False.
hessian : bool, optional
True to compute hessian. The default is False.
Returns
-------
res : numpy.ndarray
Value, gradient, hessian of the reduced log prior.
"""
nb_theta = len(theta)
if theta.ndim < 2:
theta = np.atleast_2d(theta).T
mean = np.ones((nb_theta, 1)) * self.options["prior"]["mean"]
sig_inv = np.eye(nb_theta) / self.options["prior"]["var"]
if grad:
sig_inv_m = np.atleast_2d(np.sum(sig_inv, axis=0)).T
res = -2.0 * (theta - mean) * sig_inv_m
elif hessian:
res = -2.0 * np.atleast_2d(np.sum(sig_inv, axis=0)).T
else:
res = -np.dot((theta - mean).T, sig_inv.dot(theta - mean))
return res
def _predict_value_derivatives_hyper(self, x, u=None):
"""
Compute the derivatives of the mean of the GP with respect to the hyperparameters
Parameters
----------
x : numpy.ndarray
Point to compute in initial dimension.
u : numpy.ndarray, optional
Point to compute in small dimension. The default is None.
Returns
-------
dy : numpy.ndarray
Derivatives of the mean of the GP with respect to the hyperparameters.
"""
# Initialization
n_eval, _ = x.shape
# Get pairwise componentwise L1-distances to the input training set
dx = differences(x, Y=self.X_norma.copy())
d_x = self._componentwise_distance(dx)
if u is not None:
theta = np.eye(self.options["n_comp"]).reshape(
(self.options["n_comp"] ** 2,)
)
# Get pairwise componentwise L1-distances to the input training set
du = differences(u, Y=self.U_norma.copy())
d = self._componentwise_distance(du, small=True)
else:
theta = self.optimal_theta
# Get pairwise componentwise L1-distances to the input training set
d = d_x
d_x = None
# Compute the correlation function
r = self._correlation_types[self.options["corr"]](theta, d, d_x=d_x).reshape(
n_eval, self.nt
)
# Compute the regression function
f = self._regression_types[self.options["poly"]](x)
dy = np.zeros((len(self.optimal_theta), n_eval))
gamma = self.optimal_par["gamma"]
Rinv_dR_gamma = self.optimal_par["Rinv_dR_gamma"]
Rinv_dmu = self.optimal_par["Rinv_dmu"]
for omega in range(len(self.optimal_theta)):
drdomega = self._correlation_types[self.options["corr"]](
theta, d, grad_ind=omega, d_x=d_x
).reshape(n_eval, self.nt)
dbetadomega = self.optimal_par["dbeta_all"][omega]
dy_omega = (
f.dot(dbetadomega)
+ drdomega.dot(gamma)
- r.dot(Rinv_dR_gamma[omega] + Rinv_dmu[omega])
)
dy[omega, :] = dy_omega[:, 0]
return dy
def _predict_variance_derivatives_hyper(self, x, u=None):
"""
Compute the derivatives of the variance of the GP with respect to the hyperparameters
Parameters
----------
x : numpy.ndarray
Point to compute in initial dimension.
u : numpy.ndarray, optional
Point to compute in small dimension. The default is None.
Returns
-------
dMSE : numpy.ndarrray
derivatives of the variance of the GP with respect to the hyperparameters.
MSE : TYPE
Variance of the GP.
"""
# Initialization
n_eval, n_features_x = x.shape
# Get pairwise componentwise L1-distances to the input training set
dx = differences(x, Y=self.X_norma.copy())
d_x = self._componentwise_distance(dx)
if u is not None:
theta = np.eye(self.options["n_comp"]).reshape(
(self.options["n_comp"] ** 2,)
)
# Get pairwise componentwise L1-distances to the input training set
du = differences(u, Y=self.U_norma.copy())
d = self._componentwise_distance(du, small=True)
else:
theta = self.optimal_theta
# Get pairwise componentwise L1-distances to the input training set
d = d_x
d_x = None
# Compute the correlation function
r = (
self._correlation_types[self.options["corr"]](theta, d, d_x=d_x)
.reshape(n_eval, self.nt)
.T
)
f = self._regression_types[self.options["poly"]](x).T
C = self.optimal_par["C"]
G = self.optimal_par["G"]
Ft = self.optimal_par["Ft"]
sigma2 = self.optimal_par["sigma2"]
rt = linalg.solve_triangular(C, r, lower=True)
F_Rinv_r = np.dot(Ft.T, rt)
u_ = linalg.solve_triangular(G.T, f - F_Rinv_r)
MSE = self.optimal_par["sigma2"] * (
1.0 - (rt**2.0).sum(axis=0) + (u_**2.0).sum(axis=0)
)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.0] = 0.0
Ginv_u = linalg.solve_triangular(G, u_, lower=False)
Rinv_F = linalg.solve_triangular(C.T, Ft, lower=False)
Rinv_r = linalg.solve_triangular(C.T, rt, lower=False)
Rinv_F_Ginv_u = Rinv_F.dot(Ginv_u)
dMSE = np.zeros((len(self.optimal_theta), n_eval))
dr_all = self.optimal_par["dr"]
dsigma = self.optimal_par["dsigma"]
for omega in range(len(self.optimal_theta)):
drdomega = (
self._correlation_types[self.options["corr"]](
theta, d, grad_ind=omega, d_x=d_x
)
.reshape(n_eval, self.nt)
.T
)
dRdomega = np.zeros((self.nt, self.nt))
dRdomega[self.ij[:, 0], self.ij[:, 1]] = dr_all[omega][:, 0]
dRdomega[self.ij[:, 1], self.ij[:, 0]] = dr_all[omega][:, 0]
# Compute du2dtheta
dRdomega_Rinv_F_Ginv_u = dRdomega.dot(Rinv_F_Ginv_u)
r_Rinv_dRdomega_Rinv_F_Ginv_u = np.einsum(
"ij,ij->i", Rinv_r.T, dRdomega_Rinv_F_Ginv_u.T
)
drdomega_Rinv_F_Ginv_u = np.einsum("ij,ij->i", drdomega.T, Rinv_F_Ginv_u.T)
u_Ginv_F_Rinv_dRdomega_Rinv_F_Ginv_u = np.einsum(
"ij,ij->i", Rinv_F_Ginv_u.T, dRdomega_Rinv_F_Ginv_u.T
)
du2domega = (
2.0 * r_Rinv_dRdomega_Rinv_F_Ginv_u
- 2.0 * drdomega_Rinv_F_Ginv_u
+ u_Ginv_F_Rinv_dRdomega_Rinv_F_Ginv_u
)
du2domega = np.atleast_2d(du2domega)
# Compute drt2dtheta
drdomega_Rinv_r = np.einsum("ij,ij->i", drdomega.T, Rinv_r.T)
r_Rinv_dRdomega_Rinv_r = np.einsum(
"ij,ij->i", Rinv_r.T, dRdomega.dot(Rinv_r).T
)
drt2domega = 2.0 * drdomega_Rinv_r - r_Rinv_dRdomega_Rinv_r
drt2domega = np.atleast_2d(drt2domega)
dMSE[omega] = dsigma[omega] * MSE / sigma2 + sigma2 * (
-drt2domega + du2domega
)
return dMSE, MSE
def get_x_from_u(self, u):
"""
Compute the point in initial dimension from a point in low dimension
Parameters
----------
u : numpy.ndarray
Point in low dimension.
Returns
-------
res : numpy.ndarray
point in initial dimension.
"""
u = np.atleast_2d(u)
self.embedding["Q_C"], self.embedding["R_C"]
x_temp = np.dot(
self.embedding["Q_C"],
linalg.solve_triangular(self.embedding["R_C"].T, u.T, lower=True),
).T
res = np.atleast_2d(x_temp)
return res
def get_u_from_x(self, x):
"""
Compute the point in low dimension from a point in initial dimension
Parameters
----------
x : numpy.ndarray
Point in initial dimension.
Returns
-------
u : numpy.ndarray
Point in low dimension.
"""
u = x.dot(self.embedding["C"])
return u
def _specific_train(self):
"""
Compute the specific training values necessary for MGP (Hessian)
"""
# Compute covariance matrix of hyperparameters
var_R = np.zeros((len(self.optimal_theta), len(self.optimal_theta)))
r, r_ij, par = self._reduced_likelihood_hessian(self.optimal_theta)
var_R[r_ij[:, 0], r_ij[:, 1]] = r[:, 0]
var_R[r_ij[:, 1], r_ij[:, 0]] = r[:, 0]
self.inv_sigma_R = -var_R
# Compute normalise embedding
self.optimal_par = par
A = np.reshape(self.optimal_theta, (self.options["n_comp"], self.nx)).T
B = (A.T / self.X_scale).T
norm_B = np.linalg.norm(B)
C = B / norm_B
self.embedding = {}
self.embedding["A"] = A
self.embedding["C"] = C
self.embedding["norm"] = norm_B
self.embedding["Q_C"], self.embedding["R_C"] = linalg.qr(C, mode="economic")
# Compute normalisation in embeding base
self.U_norma = self.X_norma.dot(A)
self.U_mean = self.X_offset.dot(C) * norm_B
# Compute best number of Components for Active Kriging
svd = linalg.svd(A)
svd_cumsum = np.cumsum(svd[1])
svd_sum = np.sum(svd[1])
self.best_ncomp = min(np.argwhere(svd_cumsum > 0.99 * svd_sum)) + 1
def _check_param(self):
"""
Overrides KrgBased implementation
This function checks some parameters of the model.
"""
d = self.options["n_comp"] * self.nx
if self.options["corr"] != "act_exp":
raise ValueError("MGP must be used with act_exp correlation function")
if self.options["hyper_opt"] != "TNC":
raise ValueError("MGP must be used with TNC hyperparameters optimizer")
if len(self.options["theta0"]) != d:
if len(self.options["theta0"]) == 1:
self.options["theta0"] *= np.ones(d)
else:
raise ValueError(
"the number of dim %s should be equal to the length of theta0 %s."
% (d, len(self.options["theta0"]))
)
| 17,186 | 31.367232 | 95 | py |
smt | smt-master/smt/surrogate_models/ls.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>
Dr. Nathalie.bartoli <nathalie@onera.fr>
This package is distributed under New BSD license.
TO DO:
- define outputs['sol'] = self.sol
"""
import numpy as np
from sklearn import linear_model
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.utils.caching import cached_operation
class LS(SurrogateModel):
"""
Least square model.
This model uses the linear_model.LinearRegression class from scikit-learn.
Default-parameters from scikit-learn are used herein.
"""
name = "LS"
def _initialize(self):
super(LS, self)._initialize()
declare = self.options.declare
supports = self.supports
declare(
"data_dir",
values=None,
types=str,
desc="Directory for loading / saving cached data; None means do not save or load",
)
supports["derivatives"] = True
############################################################################
# Model functions
############################################################################
def _new_train(self):
"""
Train the model
"""
X = self.training_points[None][0][0]
y = self.training_points[None][0][1]
self.mod = linear_model.LinearRegression()
self.mod.fit(X, y)
def _train(self):
"""
Train the model
"""
inputs = {"self": self}
with cached_operation(inputs, self.options["data_dir"]) as outputs:
if outputs:
self.sol = outputs["sol"]
else:
self._new_train()
def _predict_values(self, x):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
y = self.mod.predict(x)
return y
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray
Derivative values.
"""
# Initialization
n_eval, n_features_x = x.shape
y = np.ones((n_eval, self.ny)) * self.mod.coef_[:, kx]
return y
| 2,682 | 25.303922 | 98 | py |
smt | smt-master/smt/surrogate_models/genn.py | """
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.utils.neural_net.model import Model
import numpy as np
# ------------------------------------ S U P P O R T F U N C T I O N S -----------------------------------------------
def load_smt_data(model, xt, yt, dyt_dxt=None):
"""
Utility function to load SMT data more easily
:param model: SurrogateModel object for which to load training data
:param xt: smt data points at which response is evaluated
:param yt: response at xt
:param dyt_dxt: gradient at xt
"""
# Dimensionality
if len(xt.shape) == 1:
n_x = 1 # number of variables, x
m = xt.size
else:
m, n_x = xt.shape
if len(yt.shape) == 1:
n_y = 1 # number of responses, y
else:
n_y = yt.shape[1]
# Reshape arrays
xt = xt.reshape((m, n_x))
yt = yt.reshape((m, n_y))
# Load values
model.set_training_values(xt, yt)
# Load partials
if dyt_dxt is not None:
dyt_dxt = dyt_dxt.reshape((m, n_x))
for i in range(n_x):
model.set_training_derivatives(xt, dyt_dxt[:, i].reshape((m, 1)), i)
def smt_to_genn(training_points):
"""
Translate from SMT data structure to GENN data structure.
Concretely, this neural net module works with numpy arrays in the form of (X, Y, J) as defined here-under. However,
SMT uses a different format. Hence, we need a function that takes care of the translation.
:param: training_points -- dict, training data in the format used by surrogate_model.py (see SMT API)
Returns:
:return X -- a numpy matrix of input features of shape (n_x, m) where n_x = no. of inputs, m = no. of train examples
:return Y -- a numpy matrix of output labels of shape (n_y, m) where n_y = no. of outputs
:return J -- a numpy array of size (n_y, n_x, m) representing the Jacobian of Y w.r.t. X:
dY1/dX1 = J[0][0][:]
dY1/dX2 = J[0][1][:]
...
dY2/dX1 = J[1][0][:]
dY2/dX2 = J[1][1][:]
...
N.B. To retrieve the i^th example for dY2/dX1: J[1][0][i] for all i = 1,...,m
"""
# Retrieve training data from SMT training_points
xt, yt = training_points[None][
0
] # training_points[name][0] = [np.array(xt), np.array(yt)]
# Deduce number of dimensions and training examples
m, n_x = xt.shape
_, n_y = yt.shape
# Assign training data but transpose to match neural net implementation
X = xt
Y = yt
# Loop to retrieve each partial derivative from SMT training_points
J = np.zeros((m, n_x, n_y))
for k in range(0, n_x):
xt, dyt_dxt = training_points[None][k + 1]
# assert that dimensions match
assert xt.shape[0] == m
assert xt.shape[1] == n_x
assert dyt_dxt.shape[0] == m
assert dyt_dxt.shape[1] == n_y
# Assert that derivatives provided are for the same training points
assert xt.all() == X.all()
# Assign training derivative but transpose to match neural net implementation
J[:, k, :] = dyt_dxt
return X.T, Y.T, J.T
# ------------------------------------ C L A S S -----------------------------------------------------------------------
class GENN(SurrogateModel):
name = "GENN"
def _initialize(self):
"""API function: set default values for user options"""
declare = self.options.declare
declare("alpha", 0.5, types=(int, float), desc="optimizer learning rate")
declare(
"beta1", 0.9, types=(int, float), desc="Adam optimizer tuning parameter"
)
declare(
"beta2", 0.99, types=(int, float), desc="Adam optimizer tuning parameter"
)
declare("lambd", 0.1, types=(int, float), desc="regularization coefficient")
declare(
"gamma", 1.0, types=(int, float), desc="gradient-enhancement coefficient"
)
declare("deep", 2, types=int, desc="number of hidden layers")
declare("wide", 2, types=int, desc="number of nodes per hidden layer")
declare(
"mini_batch_size",
64,
types=int,
desc="split data into batches of specified size",
)
declare(
"num_epochs", 10, types=int, desc="number of random passes through the data"
)
declare(
"num_iterations",
100,
types=int,
desc="number of optimizer iterations per mini-batch",
)
declare(
"seed",
None,
types=int,
desc="random seed to ensure repeatability of results when desired",
)
declare("is_print", True, types=bool, desc="print progress (or not)")
self.supports["derivatives"] = True
self.supports["training_derivatives"] = True
self.model = Model()
self._is_trained = False
def _train(self):
"""
API function: train the neural net
"""
# Convert training data to format expected by neural net module
X, Y, J = smt_to_genn(self.training_points)
# If there are no training derivatives, turn off gradient-enhancement
if type(J) == np.ndarray and J.size == 0:
self.options["gamma"] = 0.0
# Get hyperparameters from SMT API
alpha = self.options["alpha"]
beta1 = self.options["beta1"]
beta2 = self.options["beta2"]
lambd = self.options["lambd"]
gamma = self.options["gamma"]
deep = self.options["deep"]
wide = self.options["wide"]
mini_batch_size = self.options["mini_batch_size"]
num_iterations = self.options["num_iterations"]
num_epochs = self.options["num_epochs"]
seed = self.options["seed"]
is_print = self.options["is_print"]
# number of inputs and outputs
n_x = X.shape[0]
n_y = Y.shape[0]
# Train neural net
self.model = Model.initialize(n_x, n_y, deep, wide)
self.model.train(
X=X,
Y=Y,
J=J,
num_iterations=num_iterations,
mini_batch_size=mini_batch_size,
num_epochs=num_epochs,
alpha=alpha,
beta1=beta1,
beta2=beta2,
lambd=lambd,
gamma=gamma,
seed=seed,
silent=not is_print,
)
self._is_trained = True
def _predict_values(self, x):
"""
API method: predict values using appropriate methods from the neural_network.py module
:param x: np.ndarray[n, nx] -- Input values for the prediction points
:return y: np.ndarray[n, ny] -- Output values at the prediction points
"""
return self.model.evaluate(x.T).T
def _predict_derivatives(self, x, kx):
"""
API method: predict partials using appropriate methods from the neural_network.py module
:param x: np.ndarray[n, nx] -- Input values for the prediction points
:param kx: int -- The 0-based index of the input variable with respect to which derivatives are desired
:return: dy_dx: np.ndarray[n, ny] -- partial derivatives
"""
return self.model.gradient(x.T)[:, kx, :].T
def plot_training_history(self):
if self._is_trained:
self.model.plot_training_history()
def goodness_of_fit(self, xv, yv, dyv_dxv):
"""
Compute metrics to evaluate goodness of fit and show actual by predicted plot
:param xv: np.ndarray[n, nx], x validation points
:param yv: np.ndarray[n, 1], y validation response
:param dyv_dxv: np.ndarray[n, ny], dydx validation derivatives
"""
# Store current training points
training_points = self.training_points
# Replace training points with test (validation) points
load_smt_data(self, xv, yv, dyv_dxv)
# Convert from SMT format to a more convenient format for GENN
X, Y, J = smt_to_genn(self.training_points)
# Generate goodness of fit plots
self.model.goodness_of_fit(X, Y)
# Restore training points
self.training_points = training_points
def run_example(is_gradient_enhancement=True): # pragma: no cover
"""Test and demonstrate GENN using a 1D example"""
import matplotlib.pyplot as plt
# Test function
f = lambda x: x * np.sin(x)
df_dx = lambda x: np.sin(x) + x * np.cos(x)
# Domain
lb = -np.pi
ub = np.pi
# Training data
m = 4
xt = np.linspace(lb, ub, m)
yt = f(xt)
dyt_dxt = df_dx(xt)
# Validation data
xv = lb + np.random.rand(30, 1) * (ub - lb)
yv = f(xv)
dyv_dxv = df_dx(xv)
# Initialize GENN object
genn = GENN()
genn.options["alpha"] = 0.1
genn.options["beta1"] = 0.9
genn.options["beta2"] = 0.99
genn.options["lambd"] = 0.1
genn.options["gamma"] = int(is_gradient_enhancement)
genn.options["deep"] = 2
genn.options["wide"] = 6
genn.options["mini_batch_size"] = 64
genn.options["num_epochs"] = 20
genn.options["num_iterations"] = 100
genn.options["is_print"] = True
# Load data
load_smt_data(genn, xt, yt, dyt_dxt)
# Train
genn.train()
genn.plot_training_history()
genn.goodness_of_fit(xv, yv, dyv_dxv)
# Plot comparison
if genn.options["gamma"] == 1.0:
title = "with gradient enhancement"
else:
title = "without gradient enhancement"
x = np.arange(lb, ub, 0.01)
y = f(x)
y_pred = genn.predict_values(x)
fig, ax = plt.subplots()
ax.plot(x, y_pred)
ax.plot(x, y, "k--")
ax.plot(xv, yv, "ro")
ax.plot(xt, yt, "k+", mew=3, ms=10)
ax.set(xlabel="x", ylabel="y", title=title)
ax.legend(["Predicted", "True", "Test", "Train"])
plt.show()
if __name__ == "__main__": # pragma: no cover
run_example(is_gradient_enhancement=True)
| 10,170 | 30.489164 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.