repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
smt | smt-master/smt/surrogate_models/krg.py | """
Author: Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>
This package is distributed under New BSD license.
"""
from smt.surrogate_models.krg_based import KrgBased
from smt.utils.kriging import componentwise_distance
class KRG(KrgBased):
name = "Kriging"
def _initialize(self):
super()._initialize()
declare = self.options.declare
declare(
"corr",
"squar_exp",
values=("pow_exp", "abs_exp", "squar_exp", "matern52", "matern32"),
desc="Correlation function type",
types=(str),
)
def _componentwise_distance(self, dx, opt=0, theta=None, return_derivative=False):
d = componentwise_distance(
dx,
self.options["corr"],
self.nx,
self.options["pow_exp_power"],
theta=theta,
return_derivative=return_derivative,
)
return d
| 921 | 25.342857 | 86 | py |
smt | smt-master/smt/surrogate_models/kplsk.py | """
Author: Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>
This package is distributed under New BSD license.
"""
from smt.surrogate_models import KPLS
from smt.utils.kriging import componentwise_distance_PLS, componentwise_distance
class KPLSK(KPLS):
name = "KPLSK"
def _initialize(self):
super()._initialize()
declare = self.options.declare
# KPLSK used only with "squar_exp" correlations
declare(
"corr",
"squar_exp",
values=("squar_exp"),
desc="Correlation function type",
types=(str),
)
def _componentwise_distance(self, dx, opt=0, theta=None, return_derivative=False):
if opt == 0:
# Kriging step
d = componentwise_distance(
dx,
self.options["corr"],
self.nx,
power=self.options["pow_exp_power"],
theta=theta,
return_derivative=return_derivative,
)
else:
# KPLS step
d = componentwise_distance_PLS(
dx,
self.options["corr"],
self.options["n_comp"],
self.coeff_pls,
power=self.options["pow_exp_power"],
theta=theta,
return_derivative=return_derivative,
)
return d
| 1,388 | 27.346939 | 86 | py |
smt | smt-master/smt/surrogate_models/rmtb.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
import scipy.sparse
from numbers import Integral
from smt.utils.linear_solvers import get_solver
from smt.utils.line_search import get_line_search_class
from smt.surrogate_models.rmts import RMTS
from smt.surrogate_models.rmtsclib import PyRMTB
class RMTB(RMTS):
"""
Regularized Minimal-energy Tensor-product B-spline (RMTB) interpolant.
RMTB builds an approximation from a tensor product of B-spline curves.
In 1-D it is a B-spline curve, in 2-D it is a B-spline surface, in 3-D
it is a B-spline volume, and so on - it works for any arbitrary number
of dimensions. However, the training points should preferably be
arranged in a structured fashion.
Advantages:
- Evaluation time is independent of the number of training points
- The smoothness can be tuned by adjusting the B-spline order and the
number of B-spline control points (the latter also affects performance)
Disadvantages:
- Training time scales poorly with the # dimensions
- The data should be structured - RMTB does not handle track data well
- RMTB approximates, not interpolates - it does not pass through the
training points
"""
name = "RMTB"
def _initialize(self):
super(RMTB, self)._initialize()
declare = self.options.declare
declare(
"order",
3,
types=(Integral, tuple, list, np.ndarray),
desc="B-spline order in each dimension - length [nx]",
)
declare(
"num_ctrl_pts",
15,
types=(Integral, tuple, list, np.ndarray),
desc="# B-spline control points in each dimension - length [nx]",
)
def _setup(self):
options = self.options
nx = self.training_points[None][0][0].shape[1]
for name in ["smoothness", "num_ctrl_pts", "order"]:
if isinstance(options[name], (int, float)):
options[name] = [options[name]] * nx
options[name] = np.atleast_1d(options[name])
self.printer.max_print_depth = options["max_print_depth"]
num = {}
# number of inputs and outputs
num["x"] = self.training_points[None][0][0].shape[1]
num["y"] = self.training_points[None][0][1].shape[1]
num["order_list"] = np.array(options["order"], int)
num["order"] = np.prod(num["order_list"])
num["ctrl_list"] = np.array(options["num_ctrl_pts"], int)
num["ctrl"] = np.prod(num["ctrl_list"])
num["elem_list"] = np.array(num["ctrl_list"] - num["order_list"] + 1, int)
num["elem"] = np.prod(num["elem_list"])
num["knots_list"] = num["order_list"] + num["ctrl_list"]
num["knots"] = np.sum(num["knots_list"])
# total number of training points (function values and derivatives)
num["t"] = 0
for kx in self.training_points[None]:
num["t"] += self.training_points[None][kx][0].shape[0]
# for RMT
num["coeff"] = num["ctrl"]
num["support"] = num["order"]
num["dof"] = num["ctrl"]
self.num = num
self.rmtsc = PyRMTB()
self.rmtsc.setup(
num["x"],
np.array(self.options["xlimits"][:, 0]),
np.array(self.options["xlimits"][:, 1]),
np.array(num["order_list"], np.int32),
np.array(num["ctrl_list"], np.int32),
)
def _compute_jac_raw(self, ix1, ix2, x):
xlimits = self.options["xlimits"]
t = np.zeros(x.shape)
for kx in range(self.num["x"]):
t[:, kx] = (x[:, kx] - xlimits[kx, 0]) / (xlimits[kx, 1] - xlimits[kx, 0])
t = np.maximum(t, 0.0 + 1e-15)
t = np.minimum(t, 1.0 - 1e-15)
n = x.shape[0]
nnz = n * self.num["order"]
# data, rows, cols = RMTBlib.compute_jac(ix1, ix2, self.num['x'], n, nnz,
# self.num['order_list'], self.num['ctrl_list'], t)
data = np.empty(nnz)
rows = np.empty(nnz, dtype=np.int32)
cols = np.empty(nnz, dtype=np.int32)
self.rmtsc.compute_jac(ix1 - 1, ix2 - 1, n, t.flatten(), data, rows, cols)
if ix1 != 0:
data /= xlimits[ix1 - 1, 1] - xlimits[ix1 - 1, 0]
if ix2 != 0:
data /= xlimits[ix2 - 1, 1] - xlimits[ix2 - 1, 0]
return data, rows, cols
def _compute_dof2coeff(self):
return None
| 4,507 | 33.945736 | 86 | py |
smt | smt-master/smt/surrogate_models/surrogate_model.py | """
Author: Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>
Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Paul Saves : Mixed Integer
"""
from typing import Optional
import numpy as np
from collections import defaultdict
from abc import ABCMeta, abstractmethod
from smt.utils.printer import Printer
from smt.utils.options_dictionary import OptionsDictionary
from smt.utils.checks import check_support, check_nx, ensure_2d_array
class SurrogateModel(metaclass=ABCMeta):
"""
Base class for all surrogate models.
Attributes
----------
options : OptionsDictionary
Dictionary of options. Options values can be set on this attribute directly
or they can be passed in as keyword arguments during instantiation.
supports : dict
Dictionary containing information about what this surrogate model supports.
Examples
--------
>>> from smt.surrogate_models import RBF
>>> sm = RBF(print_training=False)
>>> sm.options['print_prediction'] = False
"""
def __init__(self, **kwargs):
"""
Constructor where values of options can be passed in.
For the list of options, see the documentation for the surrogate model being used.
Parameters
----------
**kwargs : named arguments
Set of options that can be optionally set; each option must have been declared.
Examples
--------
>>> from smt.surrogate_models import RBF
>>> sm = RBF(print_global=False)
"""
self.options = OptionsDictionary()
self.supports = supports = {}
supports["training_derivatives"] = False
supports["derivatives"] = False
supports["output_derivatives"] = False
supports["adjoint_api"] = False
supports["variances"] = False
supports["variance_derivatives"] = False
supports["x_hierarchy"] = False
declare = self.options.declare
declare(
"print_global",
True,
types=bool,
desc="Global print toggle. If False, all printing is suppressed",
)
declare(
"print_training",
True,
types=bool,
desc="Whether to print training information",
)
declare(
"print_prediction",
True,
types=bool,
desc="Whether to print prediction information",
)
declare(
"print_problem",
True,
types=bool,
desc="Whether to print problem information",
)
declare(
"print_solver", True, types=bool, desc="Whether to print solver information"
)
self._initialize()
self.options.update(kwargs)
self.training_points = defaultdict(dict)
self.printer = Printer()
self._final_initialize()
@property
@abstractmethod
def name(self):
pass
def set_training_values(self, xt: np.ndarray, yt: np.ndarray, name=None) -> None:
"""
Set training data (values).
Parameters
----------
xt : np.ndarray[nt, nx] or np.ndarray[nt]
The input values for the nt training points.
yt : np.ndarray[nt, ny] or np.ndarray[nt]
The output values for the nt training points.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
"""
xt = ensure_2d_array(xt, "xt")
yt = ensure_2d_array(yt, "yt")
if xt.shape[0] != yt.shape[0]:
raise ValueError(
"the first dimension of xt and yt must have the same length"
)
self.nt = xt.shape[0]
self.nx = xt.shape[1]
self.ny = yt.shape[1]
kx = 0
self.training_points[name][kx] = [np.array(xt), np.array(yt)]
def update_training_values(
self, yt: np.ndarray, name: Optional[str] = None
) -> None:
"""
Update the training data (values) at the previously set input values.
Parameters
----------
yt : np.ndarray[nt, ny] or np.ndarray[nt]
The output values for the nt training points.
name : str or None, optional
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications). The default is None.
Raises
------
ValueError
The training points must be set first with set_training_values before calling update_training_values.
The number of training points does not agree with the earlier call of set_training_values.
"""
yt = ensure_2d_array(yt, "yt")
kx = 0
if kx not in self.training_points[name]:
raise ValueError(
"The training points must be set first with set_training_values "
+ "before calling update_training_values."
)
xt = self.training_points[name][kx][0]
if xt.shape[0] != yt.shape[0]:
raise ValueError(
"The number of training points does not agree with the earlier call of "
+ "set_training_values."
)
self.training_points[name][kx][1] = np.array(yt)
def set_training_derivatives(
self, xt: np.ndarray, dyt_dxt: np.ndarray, kx: int, name: Optional[str] = None
) -> None:
"""
Set training data (derivatives).
Parameters
----------
xt : np.ndarray[nt, nx] or np.ndarray[nt]
The input values for the nt training points.
dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]
The derivatives values for the nt training points.
kx : int
0-based index of the derivatives being set.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
"""
check_support(self, "training_derivatives")
xt = ensure_2d_array(xt, "xt")
dyt_dxt = ensure_2d_array(dyt_dxt, "dyt_dxt")
if xt.shape[0] != dyt_dxt.shape[0]:
raise ValueError(
"the first dimension of xt and dyt_dxt must have the same length"
)
if not isinstance(kx, int):
raise ValueError("kx must be an int")
self.training_points[name][kx + 1] = [np.array(xt), np.array(dyt_dxt)]
def update_training_derivatives(
self, dyt_dxt: np.ndarray, kx: int, name: Optional[str] = None
) -> None:
"""
Update the training data (values) at the previously set input values.
Parameters
----------
dyt_dxt : np.ndarray[nt, ny] or np.ndarray[nt]
The derivatives values for the nt training points.
kx : int
0-based index of the derivatives being set.
name :str or None, optional
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
Raises
------
ValueError
The training points must be set first with set_training_values before calling update_training_values..
The number of training points does not agree with the earlier call of set_training_values.
"""
check_support(self, "training_derivatives")
dyt_dxt = ensure_2d_array(dyt_dxt, "dyt_dxt")
if kx not in self.training_points[name]:
raise ValueError(
"The training points must be set first with set_training_values "
+ "before calling update_training_values."
)
xt = self.training_points[name][kx][0]
if xt.shape[0] != dyt_dxt.shape[0]:
raise ValueError(
"The number of training points does not agree with the earlier call of "
+ "set_training_values."
)
self.training_points[name][kx + 1][1] = np.array(dyt_dxt)
def train(self) -> None:
"""
Train the model
"""
n_exact = self.training_points[None][0][0].shape[0]
self.printer.active = self.options["print_global"]
self.printer._line_break()
self.printer._center(self.name)
self.printer.active = (
self.options["print_global"] and self.options["print_problem"]
)
self.printer._title("Problem size")
self.printer(" %-25s : %i" % ("# training points.", n_exact))
self.printer()
self.printer.active = (
self.options["print_global"] and self.options["print_training"]
)
if self.name == "MixExp":
# Mixture of experts model
self.printer._title("Training of the Mixture of experts")
else:
self.printer._title("Training")
# Train the model using the specified model-method
with self.printer._timed_context("Training", "training"):
self._train()
def predict_values(self, x: np.ndarray) -> np.ndarray:
"""
Predict the output values at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
Returns
-------
y : np.ndarray[nt, ny]
Output values at the prediction points.
"""
x = ensure_2d_array(x, "x")
self._check_xdim(x)
n = x.shape[0]
x2 = np.copy(x)
self.printer.active = (
self.options["print_global"] and self.options["print_prediction"]
)
if self.name == "MixExp":
# Mixture of experts model
self.printer._title("Evaluation of the Mixture of experts")
else:
self.printer._title("Evaluation")
self.printer(" %-12s : %i" % ("# eval points.", n))
self.printer()
# Evaluate the unknown points using the specified model-method
with self.printer._timed_context("Predicting", key="prediction"):
y = self._predict_values(x2)
time_pt = self.printer._time("prediction")[-1] / n
self.printer()
self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
self.printer()
return y.reshape((n, self.ny))
def predict_derivatives(self, x: np.ndarray, kx: int) -> np.ndarray:
"""
Predict the dy_dx derivatives at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
dy_dx : np.ndarray[nt, ny]
Derivatives.
"""
check_support(self, "derivatives")
x = ensure_2d_array(x, "x")
self._check_xdim(x)
n = x.shape[0]
self.printer.active = (
self.options["print_global"] and self.options["print_prediction"]
)
if self.name == "MixExp":
# Mixture of experts model
self.printer._title("Evaluation of the Mixture of experts")
else:
self.printer._title("Evaluation")
self.printer(" %-12s : %i" % ("# eval points.", n))
self.printer()
# Evaluate the unknown points using the specified model-method
with self.printer._timed_context("Predicting", key="prediction"):
y = self._predict_derivatives(x, kx)
time_pt = self.printer._time("prediction")[-1] / n
self.printer()
self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
self.printer()
return y.reshape((n, self.ny))
def predict_output_derivatives(self, x: np.ndarray) -> dict:
"""
Predict the derivatives dy_dyt at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
Returns
-------
dy_dyt : dict of np.ndarray[nt, nt]
Dictionary of output derivatives.
Key is None for derivatives wrt yt and kx for derivatives wrt dyt_dxt.
"""
check_support(self, "output_derivatives")
x = ensure_2d_array(x, "x")
self._check_xdim(x)
dy_dyt = self._predict_output_derivatives(x)
return dy_dyt
def predict_variances(self, x: np.ndarray) -> np.ndarray:
"""
Predict the variances at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
Returns
-------
s2 : np.ndarray[nt, ny]
Variances.
"""
check_support(self, "variances")
x = ensure_2d_array(x, "x")
self._check_xdim(x)
n = x.shape[0]
x2 = np.copy(x)
s2 = self._predict_variances(x2)
return s2.reshape((n, self.ny))
def predict_variance_derivatives(self, x, kx):
"""
Predict the derivation of the variance at a point
Parameters:
-----------
x : np.ndarray
Input value for the prediction point.
Returns:
--------
derived_variance: np.ndarray
The jacobian of the variance
"""
x = ensure_2d_array(x, "x")
check_support(self, "variance_derivatives")
self._check_xdim(x)
n = x.shape[0]
self.printer.active = (
self.options["print_global"] and self.options["print_prediction"]
)
if self.name == "MixExp":
# Mixture of experts model
self.printer._title("Evaluation of the Mixture of experts")
else:
self.printer._title("Evaluation")
self.printer(" %-12s : %i" % ("# eval points.", n))
self.printer()
# Evaluate the unknown points using the specified model-method
with self.printer._timed_context("Predicting", key="prediction"):
y = self._predict_variance_derivatives(x, kx)
time_pt = self.printer._time("prediction")[-1] / n
self.printer()
self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
self.printer()
return y
def _initialize(self):
"""
Implemented by surrogate models to declare options and declare what they support (optional).
Examples
--------
self.options.declare('option_name', default_value, types=(bool, int), desc='description')
self.supports['derivatives'] = True
"""
pass
def _train(self) -> None:
"""
Implemented by surrogate models to perform training (optional, but typically implemented).
"""
pass
def _final_initialize(self):
"""
Implemented by surrogate models to complete the initialization after options are declared and possibly updated by the user.
"""
pass
@abstractmethod
def _predict_values(self, x: np.ndarray) -> np.ndarray:
"""
Implemented by surrogate models to predict the output values.
Parameters
----------
x : np.ndarray[nt, nx]
Input values for the prediction points.
Returns
-------
y : np.ndarray[nt, ny]
Output values at the prediction points.
"""
raise Exception("This surrogate model is incorrectly implemented")
def _predict_derivatives(self, x: np.ndarray, kx: int) -> np.ndarray:
"""
Implemented by surrogate models to predict the dy_dx derivatives (optional).
If this method is implemented, the surrogate model should have
::
self.supports['derivatives'] = True
in the _initialize() implementation.
Parameters
----------
x : np.ndarray[nt, nx]
Input values for the prediction points.
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
dy_dx : np.ndarray[nt, ny]
Derivatives.
"""
check_support(self, "derivatives", fail=True)
def _predict_output_derivatives(self, x: np.ndarray) -> dict:
"""
Implemented by surrogate models to predict the dy_dyt derivatives (optional).
If this method is implemented, the surrogate model should have
::
self.supports['output_derivatives'] = True
in the _initialize() implementation.
Parameters
----------
x : np.ndarray[nt, nx]
Input values for the prediction points.
Returns
-------
dy_dyt : dict of np.ndarray[nt, nt]
Dictionary of output derivatives.
Key is None for derivatives wrt yt and kx for derivatives wrt dyt_dxt.
"""
check_support(self, "output_derivatives", fail=True)
return {}
def _predict_variances(self, x: np.ndarray) -> np.ndarray:
"""
Implemented by surrogate models to predict the variances at a set of points (optional).
If this method is implemented, the surrogate model should have
::
self.supports['variances'] = True
in the _initialize() implementation.
Parameters
----------
x : np.ndarray[nt, nx]
Input values for the prediction points.
is_acting : np.ndarray[nt, nx] or np.ndarray[nt]
Matrix specifying for each design variable whether it is acting or not (for hierarchical design spaces)
Returns
-------
s2 : np.ndarray[nt, ny]
Variances.
"""
check_support(self, "variances", fail=True)
def _predict_variance_derivatives(self, x):
"""
Implemented by surrogate models to predict the derivation of the variance at a point (optional).
Parameters:
-----------
x : np.ndarray
Input value for the prediction point.
Returns:
--------
derived_variance: np.ndarray
The jacobian of the variance
"""
check_support(self, "variance_derivatives", fail=True)
def _check_xdim(self, x):
"""Raise a ValueError if x dimension is not consistent with surrogate model training data dimension.
This method is used as a guard in preamble of predict methods"""
check_nx(self.nx, x)
| 18,858 | 31.571675 | 131 | py |
smt | smt-master/smt/surrogate_models/kpls.py | """
Author: Dr. Mohamed A. Bouhlel <mbouhlel@umich.edu>
This package is distributed under New BSD license.
"""
import numpy as np
from sklearn.cross_decomposition import PLSRegression as pls
from smt.surrogate_models.krg_based import KrgBased
from smt.utils.kriging import componentwise_distance_PLS
import warnings
import sys
class KPLS(KrgBased):
name = "KPLS"
def _initialize(self):
super(KPLS, self)._initialize()
declare = self.options.declare
declare("n_comp", 1, types=int, desc="Number of principal components")
# KPLS used only with "abs_exp" and "squar_exp" correlations
declare(
"corr",
"squar_exp",
values=("abs_exp", "squar_exp", "pow_exp"),
desc="Correlation function type",
types=(str),
)
declare(
"eval_n_comp",
False,
types=(bool),
values=(True, False),
desc="n_comp evaluation flag",
)
declare(
"eval_comp_treshold",
1.0,
types=(float),
desc="n_comp evaluation treshold for Wold's R criterion",
)
declare(
"cat_kernel_comps",
None,
types=list,
desc="Number of components for PLS categorical kernel",
)
def _compute_pls(self, X, y):
_pls = pls(self.options["n_comp"])
self.coeff_pls = 0
if np.shape(X)[0] < self.options["n_comp"] + 1:
raise ValueError(
"ValueError: The database should be at least "
+ str(self.options["n_comp"] + 1)
+ " points (currently "
+ str(np.shape(X)[0])
+ ")."
)
else:
if np.shape(X)[1] == 1:
self.coeff_pls = np.atleast_2d(np.array([1]))
else:
self.coeff_pls = abs(_pls.fit(X.copy(), y.copy()).x_rotations_)
return X, y
def _componentwise_distance(self, dx, opt=0, theta=None, return_derivative=False):
d = componentwise_distance_PLS(
dx,
self.options["corr"],
self.options["n_comp"],
self.coeff_pls,
power=self.options["pow_exp_power"],
theta=theta,
return_derivative=return_derivative,
)
return d
def _estimate_number_of_components(self):
"""
self.options[n_comp] value from user is ignored and replaced by an estimated one wrt Wold's R criterion.
"""
eval_comp_treshold = self.options["eval_comp_treshold"]
X = self.training_points[None][0][0]
y = self.training_points[None][0][1]
k_fold = 4
nbk = int(self.nt / k_fold)
press_m = 0.0
press_m1 = 0.0
self.options["n_comp"] = 0
nextcomp = True
while nextcomp:
self.options["n_comp"] += 1
press_m = press_m1
press_m1 = 0
self.options["theta0"] = [0.1]
for fold in range(k_fold):
self.nt = len(X) - nbk
todel = np.arange(fold * nbk, (fold + 1) * nbk)
Xfold = np.copy(X)
Xfold = np.delete(X, todel, axis=0)
yfold = np.copy(y)
yfold = np.delete(y, todel, axis=0)
Xtest = np.copy(X)[fold * nbk : (fold + 1) * nbk, :]
ytest = np.copy(y)[fold * nbk : (fold + 1) * nbk, :]
self.training_points[None][0][0] = Xfold
self.training_points[None][0][1] = yfold
try:
self._new_train()
except ValueError:
self.options["n_comp"] -= 1
nextcomp = False
break
ye = self._predict_values(Xtest)
press_m1 = press_m1 + np.sum(np.power((1 / len(X)) * (ye - ytest), 2))
if self.options["n_comp"] > 1 and press_m1 / press_m > eval_comp_treshold:
self.options["n_comp"] -= 1
nextcomp = False
self.training_points[None][0][0] = X
self.training_points[None][0][1] = y
self.nt = len(X)
self.options["theta0"] = [0.1]
def _train(self):
"""
Train the model
"""
# outputs['sol'] = self.sol
if self.options["eval_n_comp"]:
self._estimate_number_of_components()
self._new_train()
| 4,500 | 31.854015 | 112 | py |
smt | smt-master/smt/surrogate_models/__init__.py | from .ls import LS
from .qp import QP
from .krg import KRG
from .kpls import KPLS
from .gekpls import GEKPLS
from .kplsk import KPLSK
from .genn import GENN
from .mgp import MGP
from .krg_based import MixIntKernelType
from smt.utils.design_space import (
DesignSpace,
FloatVariable,
IntegerVariable,
OrdinalVariable,
CategoricalVariable,
)
from smt.utils.kriging import MixHrcKernelType
try:
from .idw import IDW
from .rbf import RBF
from .rmtc import RMTC
from .rmtb import RMTB
except:
pass
| 535 | 18.851852 | 46 | py |
smt | smt-master/smt/surrogate_models/krg_based.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>
Some functions are copied from gaussian_process submodule (Scikit-learn 0.14)
This package is distributed under New BSD license.
"""
import numpy as np
from enum import Enum
from scipy import linalg, optimize
from copy import deepcopy
import warnings
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.utils.kriging import (
differences,
constant,
linear,
quadratic,
pow_exp,
squar_exp,
abs_exp,
act_exp,
cross_distances,
matern52,
matern32,
gower_componentwise_distances,
componentwise_distance,
componentwise_distance_PLS,
compute_X_cont,
cross_levels,
compute_X_cross,
cross_levels_homo_space,
MixHrcKernelType,
matrix_data_corr_levels_cat_matrix,
matrix_data_corr_levels_cat_mod,
matrix_data_corr_levels_cat_mod_comps,
)
from smt.utils.misc import standardization
from smt.utils.checks import ensure_2d_array, check_support
from scipy.stats import multivariate_normal as m_norm
from smt.sampling_methods import LHS
from smt.utils.design_space import (
BaseDesignSpace,
ensure_design_space,
CategoricalVariable,
)
class MixIntKernelType(Enum):
EXP_HOMO_HSPHERE = "EXP_HOMO_HSPHERE"
HOMO_HSPHERE = "HOMO_HSPHERE"
CONT_RELAX = "CONT_RELAX"
GOWER = "GOWER"
class KrgBased(SurrogateModel):
_regression_types = {"constant": constant, "linear": linear, "quadratic": quadratic}
_correlation_types = {
"pow_exp": pow_exp,
"abs_exp": abs_exp,
"squar_exp": squar_exp,
"act_exp": act_exp,
"matern52": matern52,
"matern32": matern32,
}
name = "KrigingBased"
def _initialize(self):
super(KrgBased, self)._initialize()
declare = self.options.declare
supports = self.supports
declare(
"poly",
"constant",
values=("constant", "linear", "quadratic"),
desc="Regression function type",
types=(str),
)
declare(
"corr",
"squar_exp",
values=(
"pow_exp",
"abs_exp",
"squar_exp",
"act_exp",
"matern52",
"matern32",
),
desc="Correlation function type",
)
declare(
"pow_exp_power",
1.9,
types=(float),
desc="Power for the pow_exp kernel function (valid values in (0.0, 2.0]), This option is set automatically when corr option is squar, abs, or matern.",
)
declare(
"categorical_kernel",
MixIntKernelType.CONT_RELAX,
values=[
MixIntKernelType.CONT_RELAX,
MixIntKernelType.GOWER,
MixIntKernelType.EXP_HOMO_HSPHERE,
MixIntKernelType.HOMO_HSPHERE,
],
desc="The kernel to use for categorical inputs. Only for non continuous Kriging",
)
declare(
"hierarchical_kernel",
MixHrcKernelType.ALG_KERNEL,
values=[
MixHrcKernelType.ALG_KERNEL,
MixHrcKernelType.ARC_KERNEL,
],
desc="The kernel to use for mixed hierarchical inputs. Only for non continuous Kriging",
)
declare(
"nugget",
100.0 * np.finfo(np.double).eps,
types=(float),
desc="a jitter for numerical stability",
)
declare(
"theta0", [1e-2], types=(list, np.ndarray), desc="Initial hyperparameters"
)
# In practice, in 1D and for X in [0,1], theta^{-2} in [1e-2,infty), i.e.
# theta in (0,1e1], is a good choice to avoid overfitting. By standardising
# X in R, X_norm = (X-X_mean)/X_std, then X_norm in [-1,1] if considering
# one std intervals. This leads to theta in (0,2e1]
declare(
"theta_bounds",
[1e-6, 2e1],
types=(list, np.ndarray),
desc="bounds for hyperparameters",
)
declare(
"hyper_opt",
"Cobyla",
values=("Cobyla", "TNC"),
desc="Optimiser for hyperparameters optimisation",
types=str,
)
declare(
"eval_noise",
False,
types=bool,
values=(True, False),
desc="noise evaluation flag",
)
declare(
"noise0",
[0.0],
types=(list, np.ndarray),
desc="Initial noise hyperparameters",
)
declare(
"noise_bounds",
[100.0 * np.finfo(np.double).eps, 1e10],
types=(list, np.ndarray),
desc="bounds for noise hyperparameters",
)
declare(
"use_het_noise",
False,
types=bool,
values=(True, False),
desc="heteroscedastic noise evaluation flag",
)
declare(
"n_start",
10,
types=int,
desc="number of optimizer runs (multistart method)",
)
declare(
"xlimits",
None,
types=(list, np.ndarray),
desc="definition of a design space of float (continuous) variables: "
"array-like of size nx x 2 (lower, upper bounds)",
)
declare(
"design_space",
None,
types=(BaseDesignSpace, list, np.ndarray),
desc="definition of the (hierarchical) design space: "
"use `smt.utils.design_space.DesignSpace` as the main API. Also accepts list of float variable bounds",
)
self.best_iteration_fail = None
self.nb_ill_matrix = 5
self.is_acting_points = {}
supports["derivatives"] = True
supports["variances"] = True
supports["variance_derivatives"] = True
supports["x_hierarchy"] = True
def _final_initialize(self):
# initialize default power values
if self.options["corr"] == "squar_exp":
self.options["pow_exp_power"] = 2.0
elif self.options["corr"] in ["abs_exp", "matern32", "matern52"]:
self.options["pow_exp_power"] = 1.0
# Check the pow_exp_power is >0 and <=2
assert (
self.options["pow_exp_power"] > 0 and self.options["pow_exp_power"] <= 2
), (
"The power value for exponential power function can only be >0 and <=2, but %s was given"
% self.options["pow_exp_power"]
)
@property
def design_space(self) -> BaseDesignSpace:
xt = self.training_points.get(None)
if xt is not None:
xt = xt[0][0]
if self.options["design_space"] is None:
self.options["design_space"] = ensure_design_space(xt=xt)
elif not isinstance(self.options["design_space"], BaseDesignSpace):
ds_input = self.options["design_space"]
self.options["design_space"] = ensure_design_space(
xt=xt, xlimits=ds_input, design_space=ds_input
)
return self.options["design_space"]
@property
def is_continuous(self) -> bool:
return self.design_space.is_all_cont
def set_training_values(
self, xt: np.ndarray, yt: np.ndarray, name=None, is_acting=None
) -> None:
"""
Set training data (values).
Parameters
----------
xt : np.ndarray[nt, nx] or np.ndarray[nt]
The input values for the nt training points.
yt : np.ndarray[nt, ny] or np.ndarray[nt]
The output values for the nt training points.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
is_acting : np.ndarray[nt, nx] or np.ndarray[nt]
Matrix specifying which of the design variables is acting in a hierarchical design space
"""
super().set_training_values(xt, yt, name=name)
if is_acting is not None:
self.is_acting_points[name] = is_acting
def _new_train(self):
# Sampling points X and y
X = self.training_points[None][0][0]
y = self.training_points[None][0][1]
# Get is_acting status from design space model if needed (might correct training points)
is_acting = self.is_acting_points.get(None)
if is_acting is None:
X, is_acting = self.design_space.correct_get_acting(X)
self.training_points[None][0][0] = X
self.is_acting_points[None] = is_acting
# Compute PLS-coefficients (attr of self) and modified X and y (if GEKPLS is used)
if self.name not in ["Kriging", "MGP"]:
if self.is_continuous:
X, y = self._compute_pls(X.copy(), y.copy())
self._check_param()
self.X_train = X
self.is_acting_train = is_acting
self._corr_params = None
if not (self.is_continuous):
D, self.ij, X = gower_componentwise_distances(
X=X,
x_is_acting=is_acting,
design_space=self.design_space,
hierarchical_kernel=self.options["hierarchical_kernel"],
)
if self.options["categorical_kernel"] == MixIntKernelType.CONT_RELAX:
X2, _ = self.design_space.unfold_x(self.training_points[None][0][0])
(
self.X2_norma,
_,
self.X2_offset,
_,
self.X2_scale,
_,
) = standardization(X2, self.training_points[None][0][1])
D, _ = cross_distances(self.X2_norma)
self.Lij, self.n_levels = cross_levels(
X=self.X_train, ij=self.ij, design_space=self.design_space
)
_, self.cat_features = compute_X_cont(self.X_train, self.design_space)
# Center and scale X and y
(
self.X_norma,
self.y_norma,
self.X_offset,
self.y_mean,
self.X_scale,
self.y_std,
) = standardization(X, y)
if not self.options["eval_noise"]:
self.optimal_noise = np.array(self.options["noise0"])
elif self.options["use_het_noise"]:
# hetGP works with unique design variables when noise variance are not given
(
self.X_norma,
index_unique,
nt_reps,
) = np.unique(self.X_norma, return_inverse=True, return_counts=True, axis=0)
self.nt = self.X_norma.shape[0]
# computing the mean of the output per unique design variable (see Binois et al., 2018)
y_norma_unique = []
for i in range(self.nt):
y_norma_unique.append(np.mean(self.y_norma[index_unique == i]))
# pointwise sensible estimates of the noise variances (see Ankenman et al., 2010)
self.optimal_noise = self.options["noise0"] * np.ones(self.nt)
for i in range(self.nt):
diff = self.y_norma[index_unique == i] - y_norma_unique[i]
if np.sum(diff**2) != 0.0:
self.optimal_noise[i] = np.std(diff, ddof=1) ** 2
self.optimal_noise = self.optimal_noise / nt_reps
self.y_norma = y_norma_unique
if self.is_continuous:
# Calculate matrix of distances D between samples
D, self.ij = cross_distances(self.X_norma)
if np.min(np.sum(np.abs(D), axis=1)) == 0.0:
warnings.warn(
"Warning: multiple x input features have the same value (at least same row twice)."
)
####
# Regression matrix and parameters
self.F = self._regression_types[self.options["poly"]](self.X_norma)
n_samples_F = self.F.shape[0]
if self.F.ndim > 1:
p = self.F.shape[1]
else:
p = 1
self._check_F(n_samples_F, p)
# Optimization
(
self.optimal_rlf_value,
self.optimal_par,
self.optimal_theta,
) = self._optimize_hyperparam(D)
if self.name in ["MGP"]:
self._specific_train()
else:
if self.options["eval_noise"] and not self.options["use_het_noise"]:
self.optimal_noise = self.optimal_theta[-1]
self.optimal_theta = self.optimal_theta[:-1]
# if self.name != "MGP":
# del self.y_norma, self.D
def _train(self):
"""
Train the model
"""
# outputs['sol'] = self.sol
self._new_train()
def _initialize_theta(self, theta, n_levels, cat_features, cat_kernel):
self.n_levels_origin = n_levels
if self._corr_params is not None:
return self._corr_params
nx = self.nx
try:
cat_kernel_comps = self.options["cat_kernel_comps"]
if cat_kernel_comps is not None:
n_levels = np.array(cat_kernel_comps)
except KeyError:
cat_kernel_comps = None
try:
ncomp = self.options["n_comp"]
try:
self.pls_coeff_cont
except AttributeError:
self.pls_coeff_cont = []
except KeyError:
cat_kernel_comps = None
ncomp = 1e5
theta_cont_features = np.zeros((len(theta), 1), dtype=bool)
theta_cat_features = np.zeros((len(theta), len(n_levels)), dtype=bool)
i = 0
j = 0
n_theta_cont = 0
for feat in cat_features:
if feat:
if cat_kernel in [
MixIntKernelType.EXP_HOMO_HSPHERE,
MixIntKernelType.HOMO_HSPHERE,
]:
theta_cat_features[
j : j + int(n_levels[i] * (n_levels[i] - 1) / 2), i
] = [True] * int(n_levels[i] * (n_levels[i] - 1) / 2)
j += int(n_levels[i] * (n_levels[i] - 1) / 2)
i += 1
else:
if n_theta_cont < ncomp:
theta_cont_features[j] = True
j += 1
n_theta_cont += 1
theta_cat_features = (
[
np.where(theta_cat_features[:, i_lvl])[0]
for i_lvl in range(len(n_levels))
],
np.any(theta_cat_features, axis=1) if len(n_levels) > 0 else None,
)
self._corr_params = params = (
cat_kernel_comps,
ncomp,
theta_cat_features,
theta_cont_features,
nx,
n_levels,
)
return params
def _matrix_data_corr(
self,
corr,
design_space,
power,
theta,
theta_bounds,
dx,
Lij,
n_levels,
cat_features,
cat_kernel,
x=None,
):
"""
matrix kernel correlation model.
Parameters
----------
corr: correlation_types
- The autocorrelation model
design_space: BaseDesignSpace
- The design space definition
theta : list[small_d * n_comp]
Hyperparameters of the correlation model
dx: np.ndarray[n_obs * (n_obs - 1) / 2, n_comp]
- The gower_componentwise_distances between the samples.
Lij: np.ndarray [n_obs * (n_obs - 1) / 2, 2]
- The levels corresponding to the indices i and j of the vectors in X.
n_levels: np.ndarray
- The number of levels for every categorical variable.
cat_features: np.ndarray [dim]
- Indices of the categorical input dimensions.
cat_kernel : string
- The kernel to use for categorical inputs. Only for non continuous Kriging",
x : np.ndarray[n_obs , n_comp]
- The input instead of dx for homo_hs prediction
Returns
-------
r: np.ndarray[n_obs * (n_obs - 1) / 2,1]
An array containing the values of the autocorrelation model.
"""
_correlation_types = {
"pow_exp": pow_exp,
"abs_exp": abs_exp,
"squar_exp": squar_exp,
"act_exp": act_exp,
"matern52": matern52,
"matern32": matern32,
}
# Initialize static parameters
(
cat_kernel_comps,
ncomp,
theta_cat_features,
theta_cont_features,
nx,
n_levels,
) = self._initialize_theta(theta, n_levels, cat_features, cat_kernel)
# Sampling points X and y
X = self.training_points[None][0][0]
y = self.training_points[None][0][1]
if cat_kernel == MixIntKernelType.CONT_RELAX:
X_pls_space, _ = design_space.unfold_x(X)
nx = len(theta)
elif cat_kernel == MixIntKernelType.GOWER:
X_pls_space = np.copy(X)
else:
X_pls_space, _ = compute_X_cont(X, design_space)
if cat_kernel_comps is not None or ncomp < 1e5:
###Modifier la condition : if PLS cont
if self.pls_coeff_cont == []:
X, y = self._compute_pls(X_pls_space.copy(), y.copy())
self.pls_coeff_cont = self.coeff_pls
if cat_kernel in [MixIntKernelType.GOWER, MixIntKernelType.CONT_RELAX]:
d = componentwise_distance_PLS(
dx,
corr,
self.options["n_comp"],
self.pls_coeff_cont,
power,
theta=None,
return_derivative=False,
)
r = _correlation_types[corr](theta, d)
return r
else:
d_cont = componentwise_distance_PLS(
dx[:, np.logical_not(cat_features)],
corr,
self.options["n_comp"],
self.pls_coeff_cont,
power,
theta=None,
return_derivative=False,
)
else:
d = componentwise_distance(
dx,
corr,
nx,
power,
theta=None,
return_derivative=False,
)
if cat_kernel in [MixIntKernelType.GOWER, MixIntKernelType.CONT_RELAX]:
r = _correlation_types[corr](theta, d)
return r
else:
d_cont = d[:, np.logical_not(cat_features)]
theta_cont = theta[theta_cont_features[:, 0]]
r_cont = _correlation_types[corr](theta_cont, d_cont)
r_cat = np.copy(r_cont) * 0
r = np.copy(r_cont)
##Theta_cat_i loop
try:
self.coeff_pls_cat
except AttributeError:
self.coeff_pls_cat = []
theta_cat_kernel = theta
if len(n_levels) > 0:
theta_cat_kernel = theta.copy()
if cat_kernel == MixIntKernelType.EXP_HOMO_HSPHERE:
theta_cat_kernel[theta_cat_features[1]] *= 0.5 * np.pi / theta_bounds[1]
elif cat_kernel == MixIntKernelType.HOMO_HSPHERE:
theta_cat_kernel[theta_cat_features[1]] *= 2.0 * np.pi / theta_bounds[1]
for i in range(len(n_levels)):
theta_cat = theta_cat_kernel[theta_cat_features[0][i]]
T = matrix_data_corr_levels_cat_matrix(
i,
n_levels,
theta_cat,
theta_bounds,
is_ehh=cat_kernel == MixIntKernelType.EXP_HOMO_HSPHERE,
)
if cat_kernel_comps is not None:
# Sampling points X and y
X = self.training_points[None][0][0]
y = self.training_points[None][0][1]
X_icat = X[:, cat_features]
X_icat = X_icat[:, i]
old_n_comp = (
self.options["n_comp"] if "n_comp" in self.options else None
)
self.options["n_comp"] = int(n_levels[i] / 2 * (n_levels[i] - 1))
X_full_space = compute_X_cross(X_icat, self.n_levels_origin[i])
try:
self.coeff_pls = self.coeff_pls_cat[i]
except IndexError:
_, _ = self._compute_pls(X_full_space.copy(), y.copy())
self.coeff_pls_cat.append(self.coeff_pls)
if x is not None:
x_icat = x[:, cat_features]
x_icat = x_icat[:, i]
x_full_space = compute_X_cross(x_icat, self.n_levels_origin[i])
dx_cat_i = cross_levels_homo_space(
x_full_space, self.ij, y=X_full_space
)
else:
dx_cat_i = cross_levels_homo_space(X_full_space, self.ij)
d_cat_i = componentwise_distance_PLS(
dx_cat_i,
"squar_exp",
self.options["n_comp"],
self.coeff_pls,
power=self.options["pow_exp_power"],
theta=None,
return_derivative=False,
)
matrix_data_corr_levels_cat_mod_comps(
i,
Lij,
r_cat,
n_levels,
T,
d_cat_i,
has_cat_kernel=cat_kernel
in [
MixIntKernelType.EXP_HOMO_HSPHERE,
MixIntKernelType.HOMO_HSPHERE,
],
)
else:
matrix_data_corr_levels_cat_mod(
i,
Lij,
r_cat,
T,
has_cat_kernel=cat_kernel
in [
MixIntKernelType.EXP_HOMO_HSPHERE,
MixIntKernelType.HOMO_HSPHERE,
],
)
r = np.multiply(r, r_cat)
if cat_kernel_comps is not None:
if old_n_comp == None:
self.options._dict.pop("n_comp", None)
else:
self.options["n_comp"] = old_n_comp
return r
def _reduced_likelihood_function(self, theta):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta: list(n_comp), optional
- An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Returns
-------
reduced_likelihood_function_value: real
- The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par: dict()
- A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or for Ordinary Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
Q, G
QR decomposition of the matrix Ft.
"""
# Initialize output
reduced_likelihood_function_value = -np.inf
par = {}
# Set up R
nugget = self.options["nugget"]
if self.options["eval_noise"]:
nugget = 0
noise = self.noise0
tmp_var = theta
if self.options["use_het_noise"]:
noise = self.optimal_noise
if self.options["eval_noise"] and not self.options["use_het_noise"]:
theta = tmp_var[0:-1]
noise = tmp_var[-1]
if not (self.is_continuous):
dx = self.D
r = self._matrix_data_corr(
corr=self.options["corr"],
design_space=self.design_space,
power=self.options["pow_exp_power"],
theta=theta,
theta_bounds=self.options["theta_bounds"],
dx=dx,
Lij=self.Lij,
n_levels=self.n_levels,
cat_features=self.cat_features,
cat_kernel=self.options["categorical_kernel"],
).reshape(-1, 1)
else:
r = self._correlation_types[self.options["corr"]](theta, self.D).reshape(
-1, 1
)
R = np.eye(self.nt) * (1.0 + nugget + noise)
R[self.ij[:, 0], self.ij[:, 1]] = r[:, 0]
R[self.ij[:, 1], self.ij[:, 0]] = r[:, 0]
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except (linalg.LinAlgError, ValueError) as e:
print("exception : ", e)
print(np.linalg.eig(R)[0])
return reduced_likelihood_function_value, par
# Get generalized least squared solution
Ft = linalg.solve_triangular(C, self.F, lower=True)
Q, G = linalg.qr(Ft, mode="economic")
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(self.F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception(
"F is too ill conditioned. Poor combination "
"of regression model and observations."
)
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y_norma, lower=True)
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
rho = Yt - np.dot(Ft, beta)
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2.0 / self.nt)).prod()
# Compute/Organize output
p = 0
q = 0
if self.name in ["MFK", "MFKPLS", "MFKPLSK"]:
p = self.p
q = self.q
sigma2 = (rho**2.0).sum(axis=0) / (self.nt - p - q)
reduced_likelihood_function_value = -(self.nt - p - q) * np.log10(
sigma2.sum()
) - self.nt * np.log10(detR)
par["sigma2"] = sigma2 * self.y_std**2.0
par["beta"] = beta
par["gamma"] = linalg.solve_triangular(C.T, rho)
par["C"] = C
par["Ft"] = Ft
par["G"] = G
par["Q"] = Q
if self.name in ["MGP"]:
reduced_likelihood_function_value += self._reduced_log_prior(theta)
# A particular case when f_min_cobyla fail
if (self.best_iteration_fail is not None) and (
not np.isinf(reduced_likelihood_function_value)
):
if reduced_likelihood_function_value > self.best_iteration_fail:
self.best_iteration_fail = reduced_likelihood_function_value
self._thetaMemory = np.array(tmp_var)
elif (self.best_iteration_fail is None) and (
not np.isinf(reduced_likelihood_function_value)
):
self.best_iteration_fail = reduced_likelihood_function_value
self._thetaMemory = np.array(tmp_var)
if reduced_likelihood_function_value > 1e15:
reduced_likelihood_function_value = 1e15
return reduced_likelihood_function_value, par
def _reduced_likelihood_gradient(self, theta):
"""
Evaluates the reduced_likelihood_gradient at a set of hyperparameters.
Parameters
---------
theta : list(n_comp), optional
- An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Returns
-------
grad_red : np.ndarray (dim,1)
Derivative of the reduced_likelihood
par: dict()
- A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or for Ordinary Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
Q, G
QR decomposition of the matrix Ft.
dr
List of all the correlation matrix derivative
tr
List of all the trace part in the reduce likelihood derivatives
dmu
List of all the mean derivatives
arg
List of all minus_Cinv_dRdomega_gamma
dsigma
List of all sigma derivatives
"""
red, par = self._reduced_likelihood_function(theta)
C = par["C"]
gamma = par["gamma"]
Q = par["Q"]
G = par["G"]
sigma_2 = par["sigma2"]
nb_theta = len(theta)
grad_red = np.zeros(nb_theta)
dr_all = []
tr_all = []
dmu_all = []
arg_all = []
dsigma_all = []
dbeta_all = []
for i_der in range(nb_theta):
# Compute R derivatives
dr = self._correlation_types[self.options["corr"]](
theta, self.D, grad_ind=i_der
)
dr_all.append(dr)
dR = np.zeros((self.nt, self.nt))
dR[self.ij[:, 0], self.ij[:, 1]] = dr[:, 0]
dR[self.ij[:, 1], self.ij[:, 0]] = dr[:, 0]
# Compute beta derivatives
Cinv_dR_gamma = linalg.solve_triangular(C, np.dot(dR, gamma), lower=True)
dbeta = -linalg.solve_triangular(G, np.dot(Q.T, Cinv_dR_gamma))
arg_all.append(Cinv_dR_gamma)
dbeta_all.append(dbeta)
# Compute mu derivatives
dmu = np.dot(self.F, dbeta)
dmu_all.append(dmu)
# Compute log(detR) derivatives
tr_1 = linalg.solve_triangular(C, dR, lower=True)
tr = linalg.solve_triangular(C.T, tr_1)
tr_all.append(tr)
# Compute Sigma2 Derivatives
dsigma_2 = (
(1 / self.nt)
* (
-dmu.T.dot(gamma)
- gamma.T.dot(dmu)
- np.dot(gamma.T, dR.dot(gamma))
)
* self.y_std**2.0
)
dsigma_all.append(dsigma_2)
# Compute reduced log likelihood derivatives
grad_red[i_der] = (
-self.nt / np.log(10) * (dsigma_2 / sigma_2 + np.trace(tr) / self.nt)
)
par["dr"] = dr_all
par["tr"] = tr_all
par["dmu"] = dmu_all
par["arg"] = arg_all
par["dsigma"] = dsigma_all
par["dbeta_all"] = dbeta_all
grad_red = np.atleast_2d(grad_red).T
if self.name in ["MGP"]:
grad_red += self._reduced_log_prior(theta, grad=True)
return grad_red, par
def _reduced_likelihood_hessian(self, theta):
"""
Evaluates the reduced_likelihood_gradient at a set of hyperparameters.
Parameters
----------
theta : list(n_comp), optional
- An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Returns
-------
hess : np.ndarray
Hessian values.
hess_ij: np.ndarray [nb_theta * (nb_theta + 1) / 2, 2]
- The indices i and j of the vectors in theta associated to the hessian in hess.
par: dict()
- A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squared regression weights for
Universal Kriging or for Ordinary Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
Q, G
QR decomposition of the matrix Ft.
dr
List of all the correlation matrix derivative
tr
List of all the trace part in the reduce likelihood derivatives
dmu
List of all the mean derivatives
arg
List of all minus_Cinv_dRdomega_gamma
dsigma
List of all sigma derivatives
"""
dred, par = self._reduced_likelihood_gradient(theta)
C = par["C"]
gamma = par["gamma"]
Q = par["Q"]
G = par["G"]
sigma_2 = par["sigma2"]
nb_theta = len(theta)
dr_all = par["dr"]
tr_all = par["tr"]
dmu_all = par["dmu"]
arg_all = par["arg"]
dsigma = par["dsigma"]
Rinv_dRdomega_gamma_all = []
Rinv_dmudomega_all = []
n_val_hess = nb_theta * (nb_theta + 1) // 2
hess_ij = np.zeros((n_val_hess, 2), dtype=np.int32)
hess = np.zeros((n_val_hess, 1))
ind_1 = 0
if self.name in ["MGP"]:
log_prior = self._reduced_log_prior(theta, hessian=True)
for omega in range(nb_theta):
ind_0 = ind_1
ind_1 = ind_0 + nb_theta - omega
hess_ij[ind_0:ind_1, 0] = omega
hess_ij[ind_0:ind_1, 1] = np.arange(omega, nb_theta)
dRdomega = np.zeros((self.nt, self.nt))
dRdomega[self.ij[:, 0], self.ij[:, 1]] = dr_all[omega][:, 0]
dRdomega[self.ij[:, 1], self.ij[:, 0]] = dr_all[omega][:, 0]
dmudomega = dmu_all[omega]
Cinv_dmudomega = linalg.solve_triangular(C, dmudomega, lower=True)
Rinv_dmudomega = linalg.solve_triangular(C.T, Cinv_dmudomega)
Rinv_dmudomega_all.append(Rinv_dmudomega)
Rinv_dRdomega_gamma = linalg.solve_triangular(C.T, arg_all[omega])
Rinv_dRdomega_gamma_all.append(Rinv_dRdomega_gamma)
for i, eta in enumerate(hess_ij[ind_0:ind_1, 1]):
dRdeta = np.zeros((self.nt, self.nt))
dRdeta[self.ij[:, 0], self.ij[:, 1]] = dr_all[eta][:, 0]
dRdeta[self.ij[:, 1], self.ij[:, 0]] = dr_all[eta][:, 0]
dr_eta_omega = self._correlation_types[self.options["corr"]](
theta, self.D, grad_ind=omega, hess_ind=eta
)
dRdetadomega = np.zeros((self.nt, self.nt))
dRdetadomega[self.ij[:, 0], self.ij[:, 1]] = dr_eta_omega[:, 0]
dRdetadomega[self.ij[:, 1], self.ij[:, 0]] = dr_eta_omega[:, 0]
# Compute beta second derivatives
dRdeta_Rinv_dmudomega = np.dot(dRdeta, Rinv_dmudomega)
dmudeta = dmu_all[eta]
Cinv_dmudeta = linalg.solve_triangular(C, dmudeta, lower=True)
Rinv_dmudeta = linalg.solve_triangular(C.T, Cinv_dmudeta)
dRdomega_Rinv_dmudeta = np.dot(dRdomega, Rinv_dmudeta)
dRdeta_Rinv_dRdomega_gamma = np.dot(dRdeta, Rinv_dRdomega_gamma)
Rinv_dRdeta_gamma = linalg.solve_triangular(C.T, arg_all[eta])
dRdomega_Rinv_dRdeta_gamma = np.dot(dRdomega, Rinv_dRdeta_gamma)
dRdetadomega_gamma = np.dot(dRdetadomega, gamma)
beta_sum = (
dRdeta_Rinv_dmudomega
+ dRdomega_Rinv_dmudeta
+ dRdeta_Rinv_dRdomega_gamma
+ dRdomega_Rinv_dRdeta_gamma
- dRdetadomega_gamma
)
Qt_Cinv_beta_sum = np.dot(
Q.T, linalg.solve_triangular(C, beta_sum, lower=True)
)
dbetadetadomega = linalg.solve_triangular(G, Qt_Cinv_beta_sum)
# Compute mu second derivatives
dmudetadomega = np.dot(self.F, dbetadetadomega)
# Compute sigma2 second derivatives
sigma_arg_1 = (
-np.dot(dmudetadomega.T, gamma)
+ np.dot(dmudomega.T, Rinv_dRdeta_gamma)
+ np.dot(dmudeta.T, Rinv_dRdomega_gamma)
)
sigma_arg_2 = (
-np.dot(gamma.T, dmudetadomega)
+ np.dot(gamma.T, dRdeta_Rinv_dmudomega)
+ np.dot(gamma.T, dRdomega_Rinv_dmudeta)
)
sigma_arg_3 = np.dot(dmudeta.T, Rinv_dmudomega) + np.dot(
dmudomega.T, Rinv_dmudeta
)
sigma_arg_4_in = (
-dRdetadomega_gamma
+ dRdeta_Rinv_dRdomega_gamma
+ dRdomega_Rinv_dRdeta_gamma
)
sigma_arg_4 = np.dot(gamma.T, sigma_arg_4_in)
dsigma2detadomega = (
(1 / self.nt)
* (sigma_arg_1 + sigma_arg_2 + sigma_arg_3 + sigma_arg_4)
* self.y_std**2.0
)
# Compute Hessian
dreddetadomega_tr_1 = np.trace(np.dot(tr_all[eta], tr_all[omega]))
dreddetadomega_tr_2 = np.trace(
linalg.solve_triangular(
C.T, linalg.solve_triangular(C, dRdetadomega, lower=True)
)
)
dreddetadomega_arg1 = (self.nt / sigma_2) * (
dsigma2detadomega - (1 / sigma_2) * dsigma[omega] * dsigma[eta]
)
dreddetadomega = (
-(dreddetadomega_arg1 - dreddetadomega_tr_1 + dreddetadomega_tr_2)
/ self.nt
)
hess[ind_0 + i, 0] = self.nt / np.log(10) * dreddetadomega
if self.name in ["MGP"] and eta == omega:
hess[ind_0 + i, 0] += log_prior[eta]
par["Rinv_dR_gamma"] = Rinv_dRdomega_gamma_all
par["Rinv_dmu"] = Rinv_dmudomega_all
return hess, hess_ij, par
def predict_values(self, x: np.ndarray, is_acting=None) -> np.ndarray:
"""
Predict the output values at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
is_acting : np.ndarray[nt, nx] or np.ndarray[nt]
Matrix specifying for each design variable whether it is acting or not (for hierarchical design spaces)
Returns
-------
y : np.ndarray[nt, ny]
Output values at the prediction points.
"""
x = ensure_2d_array(x, "x")
self._check_xdim(x)
if is_acting is not None:
is_acting = ensure_2d_array(is_acting, "is_acting")
if is_acting.shape != x.shape:
raise ValueError(
f"is_acting should have the same dimensions as x: {is_acting.shape} != {x.shape}"
)
n = x.shape[0]
x2 = np.copy(x)
self.printer.active = (
self.options["print_global"] and self.options["print_prediction"]
)
if self.name == "MixExp":
# Mixture of experts model
self.printer._title("Evaluation of the Mixture of experts")
else:
self.printer._title("Evaluation")
self.printer(" %-12s : %i" % ("# eval points.", n))
self.printer()
# Evaluate the unknown points using the specified model-method
with self.printer._timed_context("Predicting", key="prediction"):
y = self._predict_values(x2, is_acting=is_acting)
time_pt = self.printer._time("prediction")[-1] / n
self.printer()
self.printer("Prediction time/pt. (sec) : %10.7f" % time_pt)
self.printer()
return y.reshape((n, self.ny))
def _predict_values(self, x: np.ndarray, is_acting=None) -> np.ndarray:
"""
Evaluates the model at a set of points.
Parameters
----------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
is_acting : np.ndarray[nt, nx] or np.ndarray[nt]
Matrix specifying for each design variable whether it is acting or not (for hierarchical design spaces)
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
# Initialization
if is_acting is None:
x, is_acting = self.design_space.correct_get_acting(x)
n_eval, n_features_x = x.shape
if not (self.is_continuous):
dx = gower_componentwise_distances(
x,
x_is_acting=is_acting,
design_space=self.design_space,
hierarchical_kernel=self.options["hierarchical_kernel"],
y=np.copy(self.X_train),
y_is_acting=self.is_acting_train,
)
if self.options["categorical_kernel"] == MixIntKernelType.CONT_RELAX:
Xpred, _ = self.design_space.unfold_x(x)
Xpred_norma = (Xpred - self.X2_offset) / self.X2_scale
dx = differences(Xpred_norma, Y=self.X2_norma.copy())
_, ij = cross_distances(x, self.X_train)
Lij, _ = cross_levels(
X=x, ij=ij, design_space=self.design_space, y=self.X_train
)
self.ij = ij
r = self._matrix_data_corr(
corr=self.options["corr"],
design_space=self.design_space,
power=self.options["pow_exp_power"],
theta=self.optimal_theta,
theta_bounds=self.options["theta_bounds"],
dx=dx,
Lij=Lij,
n_levels=self.n_levels,
cat_features=self.cat_features,
cat_kernel=self.options["categorical_kernel"],
x=x,
).reshape(n_eval, self.nt)
X_cont, _ = compute_X_cont(x, self.design_space)
X_cont = (X_cont - self.X_offset) / self.X_scale
else:
X_cont = (x - self.X_offset) / self.X_scale
# Get pairwise componentwise L1-distances to the input training set
dx = differences(X_cont, Y=self.X_norma.copy())
d = self._componentwise_distance(dx)
# Compute the correlation function
r = self._correlation_types[self.options["corr"]](
self.optimal_theta, d
).reshape(n_eval, self.nt)
y = np.zeros(n_eval)
# Compute the regression function
f = self._regression_types[self.options["poly"]](X_cont)
# Scaled predictor
y_ = np.dot(f, self.optimal_par["beta"]) + np.dot(r, self.optimal_par["gamma"])
# Predictor
y = (self.y_mean + self.y_std * y_).ravel()
return y
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Parameters
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray
Derivative values.
"""
# Initialization
n_eval, n_features_x = x.shape
x = (x - self.X_offset) / self.X_scale
# Get pairwise componentwise L1-distances to the input training set
dx = differences(x, Y=self.X_norma.copy())
d = self._componentwise_distance(dx)
dd = self._componentwise_distance(
dx, theta=self.optimal_theta, return_derivative=True
)
# Compute the correlation function
derivative_dic = {"dx": dx, "dd": dd}
r, dr = self._correlation_types[self.options["corr"]](
self.optimal_theta, d, derivative_params=derivative_dic
)
r = r.reshape(n_eval, self.nt)
drx = dr[:, kx].reshape(n_eval, self.nt)
if self.options["poly"] == "constant":
df = np.zeros((1, self.nx))
elif self.options["poly"] == "linear":
df = np.zeros((self.nx + 1, self.nx))
df[1:, :] = np.eye(self.nx)
else:
raise ValueError(
"The derivative is only available for ordinary kriging or "
+ "universal kriging using a linear trend"
)
# Beta and gamma = R^-1(y-FBeta)
beta = self.optimal_par["beta"]
gamma = self.optimal_par["gamma"]
df_dx = np.dot(df.T, beta)
y = (df_dx[kx] + np.dot(drx, gamma)) * self.y_std / self.X_scale[kx]
return y
def predict_variances(self, x: np.ndarray, is_acting=None) -> np.ndarray:
"""
Predict the variances at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
is_acting : np.ndarray[nt, nx] or np.ndarray[nt]
Matrix specifying for each design variable whether it is acting or not (for hierarchical design spaces)
Returns
-------
s2 : np.ndarray[nt, ny]
Variances.
"""
check_support(self, "variances")
x = ensure_2d_array(x, "x")
self._check_xdim(x)
if is_acting is not None:
is_acting = ensure_2d_array(is_acting, "is_acting")
if is_acting.shape != x.shape:
raise ValueError(
f"is_acting should have the same dimensions as x: {is_acting.shape} != {x.shape}"
)
n = x.shape[0]
x2 = np.copy(x)
s2 = self._predict_variances(x2, is_acting=is_acting)
return s2.reshape((n, self.ny))
def _predict_variances(self, x: np.ndarray, is_acting=None) -> np.ndarray:
"""
Provide uncertainty of the model at a set of points
Parameters
----------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
is_acting : np.ndarray[nt, nx] or np.ndarray[nt]
Matrix specifying for each design variable whether it is acting or not (for hierarchical design spaces)
Returns
-------
MSE : np.ndarray
Evaluation point output variable MSE
"""
# Initialization
if is_acting is None:
x, is_acting = self.design_space.correct_get_acting(x)
n_eval, n_features_x = x.shape
X_cont = x
if not (self.is_continuous):
dx = gower_componentwise_distances(
x,
x_is_acting=is_acting,
design_space=self.design_space,
hierarchical_kernel=self.options["hierarchical_kernel"],
y=np.copy(self.X_train),
y_is_acting=self.is_acting_train,
)
if self.options["categorical_kernel"] == MixIntKernelType.CONT_RELAX:
Xpred, _ = self.design_space.unfold_x(x)
Xpred_norma = (Xpred - self.X2_offset) / self.X2_scale
dx = differences(Xpred_norma, Y=self.X2_norma.copy())
_, ij = cross_distances(x, self.X_train)
Lij, _ = cross_levels(
X=x, ij=ij, design_space=self.design_space, y=self.X_train
)
self.ij = ij
r = self._matrix_data_corr(
corr=self.options["corr"],
design_space=self.design_space,
power=self.options["pow_exp_power"],
theta=self.optimal_theta,
theta_bounds=self.options["theta_bounds"],
dx=dx,
Lij=Lij,
n_levels=self.n_levels,
cat_features=self.cat_features,
cat_kernel=self.options["categorical_kernel"],
x=x,
).reshape(n_eval, self.nt)
X_cont, _ = compute_X_cont(x, self.design_space)
X_cont = (X_cont - self.X_offset) / self.X_scale
else:
x = (x - self.X_offset) / self.X_scale
X_cont = np.copy(x)
# Get pairwise componentwise L1-distances to the input training set
dx = differences(x, Y=self.X_norma.copy())
d = self._componentwise_distance(dx)
# Compute the correlation function
r = self._correlation_types[self.options["corr"]](
self.optimal_theta, d
).reshape(n_eval, self.nt)
C = self.optimal_par["C"]
rt = linalg.solve_triangular(C, r.T, lower=True)
u = linalg.solve_triangular(
self.optimal_par["G"].T,
np.dot(self.optimal_par["Ft"].T, rt)
- self._regression_types[self.options["poly"]](X_cont).T,
)
A = self.optimal_par["sigma2"]
B = 1.0 - (rt**2.0).sum(axis=0) + (u**2.0).sum(axis=0)
MSE = np.einsum("i,j -> ji", A, B)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.0] = 0.0
return MSE
def _predict_variance_derivatives(self, x, kx):
"""
Provide the derivative of the variance of the model at a set of points
Parameters
-----------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
derived_variance: np.ndarray
The jacobian of the variance of the kriging model (the kx-th derivative)
"""
# Initialization
n_eval, n_features_x = x.shape
x = (x - self.X_offset) / self.X_scale
theta = self.optimal_theta
# Get pairwise componentwise L1-distances to the input training set
dx = differences(x, Y=self.X_norma.copy())
d = self._componentwise_distance(dx)
dd = self._componentwise_distance(
dx, theta=self.optimal_theta, return_derivative=True
)
derivative_dic = {"dx": dx, "dd": dd}
sigma2 = self.optimal_par["sigma2"]
C = self.optimal_par["C"]
# p1 : derivative of (rt**2.0).sum(axis=0)
r, dr = self._correlation_types[self.options["corr"]](
theta, d, derivative_params=derivative_dic
)
r = r.reshape(n_eval, self.nt)
drx = dr[:, kx]
drx = drx.reshape(n_eval, self.nt)
rt = linalg.solve_triangular(C, r.T, lower=True)
invKr = linalg.solve_triangular(C.T, rt)
p1 = 2 * np.dot(drx, invKr).T
# p2 : derivative of (u**2.0).sum(axis=0)
f_x = self._regression_types[self.options["poly"]](x).T
F = self.F
rho2 = linalg.solve_triangular(C, F, lower=True)
invKF = linalg.solve_triangular(C.T, rho2)
A = f_x.T - np.dot(r, invKF)
B = np.dot(F.T, invKF)
rho3 = linalg.cholesky(B, lower=True)
invBAt = linalg.solve_triangular(rho3, A.T, lower=True)
D = linalg.solve_triangular(rho3.T, invBAt)
if self.options["poly"] == "constant":
df = np.zeros((1, self.nx))
elif self.options["poly"] == "linear":
df = np.zeros((self.nx + 1, self.nx))
df[1:, :] = np.eye(self.nx)
else:
raise ValueError(
"The derivative is only available for ordinary kriging or "
+ "universal kriging using a linear trend"
)
dA = df[:, kx].T - np.dot(drx, invKF)
p3 = 2 * np.dot(dA, D).T
# prime : derivative of MSE
# MSE ~1.0 - (rt**2.0).sum(axis=0) + (u**2.0).sum(axis=0)
prime = 0 - p1 + p3
## scaling factors
x_std = self.X_scale[kx]
derived_variance = np.array((np.outer(sigma2, np.diag(prime.T)) / x_std))
return np.atleast_2d(derived_variance.T)
def _optimize_hyperparam(self, D):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
D: np.ndarray [n_obs * (n_obs - 1) / 2, dim]
- The componentwise cross-spatial-correlation-distance between the
vectors in X.
Returns
-------
best_optimal_rlf_value: real
- The value of the reduced likelihood function associated to the
best autocorrelation parameters theta.
best_optimal_par: dict()
- A dictionary containing the requested Gaussian Process model
parameters.
best_optimal_theta: list(n_comp) or list(dim)
- The best hyperparameters found by the optimization.
"""
# reinitialize optimization best values
self.best_iteration_fail = None
self._thetaMemory = None
# Initialize the hyperparameter-optimization
if self.name in ["MGP"]:
def minus_reduced_likelihood_function(theta):
res = -self._reduced_likelihood_function(theta)[0]
return res
def grad_minus_reduced_likelihood_function(theta):
grad = -self._reduced_likelihood_gradient(theta)[0]
return grad
else:
def minus_reduced_likelihood_function(log10t):
return -self._reduced_likelihood_function(theta=10.0**log10t)[0]
def grad_minus_reduced_likelihood_function(log10t):
log10t_2d = np.atleast_2d(log10t).T
res = (
-np.log(10.0)
* (10.0**log10t_2d)
* (self._reduced_likelihood_gradient(10.0**log10t_2d)[0])
)
return res
limit, _rhobeg = max(10 * len(self.options["theta0"]), 25), 0.5
exit_function = False
if "KPLSK" in self.name:
n_iter = 1
else:
n_iter = 0
for ii in range(n_iter, -1, -1):
(
best_optimal_theta,
best_optimal_rlf_value,
best_optimal_par,
constraints,
) = (
[],
[],
[],
[],
)
bounds_hyp = []
self.theta0 = deepcopy(self.options["theta0"])
for i in range(len(self.theta0)):
# In practice, in 1D and for X in [0,1], theta^{-2} in [1e-2,infty),
# i.e. theta in (0,1e1], is a good choice to avoid overfitting.
# By standardising X in R, X_norm = (X-X_mean)/X_std, then
# X_norm in [-1,1] if considering one std intervals. This leads
# to theta in (0,2e1]
theta_bounds = self.options["theta_bounds"]
if self.theta0[i] < theta_bounds[0] or self.theta0[i] > theta_bounds[1]:
self.theta0[i] = np.random.rand()
self.theta0[i] = (
self.theta0[i] * (theta_bounds[1] - theta_bounds[0])
+ theta_bounds[0]
)
warnings.warn(
"Warning: theta0 is out the feasible bounds. A random initialisation is used instead."
)
if self.name in ["MGP"]: # to be discussed with R. Priem
constraints.append(lambda theta, i=i: theta[i] + theta_bounds[1])
constraints.append(lambda theta, i=i: theta_bounds[1] - theta[i])
bounds_hyp.append((-theta_bounds[1], theta_bounds[1]))
else:
log10t_bounds = np.log10(theta_bounds)
constraints.append(lambda log10t, i=i: log10t[i] - log10t_bounds[0])
constraints.append(lambda log10t, i=i: log10t_bounds[1] - log10t[i])
bounds_hyp.append(log10t_bounds)
if self.name in ["MGP"]:
theta0_rand = m_norm.rvs(
self.options["prior"]["mean"] * len(self.theta0),
self.options["prior"]["var"],
1,
)
theta0 = self.theta0
else:
theta_bounds = self.options["theta_bounds"]
log10t_bounds = np.log10(theta_bounds)
theta0_rand = np.random.rand(len(self.theta0))
theta0_rand = (
theta0_rand * (log10t_bounds[1] - log10t_bounds[0])
+ log10t_bounds[0]
)
theta0 = np.log10(self.theta0)
if not (self.is_continuous):
self.D = D
else:
##from abs distance to kernel distance
self.D = self._componentwise_distance(D, opt=ii)
# Initialization
k, incr, stop, best_optimal_rlf_value, max_retry = 0, 0, 1, -1e20, 10
while k < stop:
# Use specified starting point as first guess
self.noise0 = np.array(self.options["noise0"])
noise_bounds = self.options["noise_bounds"]
if self.options["eval_noise"] and not self.options["use_het_noise"]:
self.noise0[self.noise0 == 0.0] = noise_bounds[0]
for i in range(len(self.noise0)):
if (
self.noise0[i] < noise_bounds[0]
or self.noise0[i] > noise_bounds[1]
):
self.noise0[i] = noise_bounds[0]
warnings.warn(
"Warning: noise0 is out the feasible bounds. The lowest possible value is used instead."
)
theta0 = np.concatenate(
[theta0, np.log10(np.array([self.noise0]).flatten())]
)
theta0_rand = np.concatenate(
[
theta0_rand,
np.log10(np.array([self.noise0]).flatten()),
]
)
for i in range(len(self.noise0)):
noise_bounds = np.log10(noise_bounds)
constraints.append(
lambda log10t: log10t[i + len(self.theta0)]
- noise_bounds[0]
)
constraints.append(
lambda log10t: noise_bounds[1]
- log10t[i + len(self.theta0)]
)
bounds_hyp.append(noise_bounds)
theta_limits = np.repeat(
np.log10([theta_bounds]), repeats=len(theta0), axis=0
)
theta_all_loops = np.vstack((theta0, theta0_rand))
if self.options["n_start"] > 1:
sampling = LHS(
xlimits=theta_limits, criterion="maximin", random_state=41
)
theta_lhs_loops = sampling(self.options["n_start"])
theta_all_loops = np.vstack((theta_all_loops, theta_lhs_loops))
optimal_theta_res = {"fun": float("inf")}
optimal_theta_res_loop = None
try:
if self.options["hyper_opt"] == "Cobyla":
for theta0_loop in theta_all_loops:
optimal_theta_res_loop = optimize.minimize(
minus_reduced_likelihood_function,
theta0_loop,
constraints=[
{"fun": con, "type": "ineq"} for con in constraints
],
method="COBYLA",
options={
"rhobeg": _rhobeg,
"tol": 1e-4,
"maxiter": limit,
},
)
if optimal_theta_res_loop["fun"] < optimal_theta_res["fun"]:
optimal_theta_res = optimal_theta_res_loop
elif self.options["hyper_opt"] == "TNC":
theta_all_loops = 10**theta_all_loops
for theta0_loop in theta_all_loops:
optimal_theta_res_loop = optimize.minimize(
minus_reduced_likelihood_function,
theta0_loop,
method="TNC",
jac=grad_minus_reduced_likelihood_function,
bounds=bounds_hyp,
options={"maxiter": 100},
)
if optimal_theta_res_loop["fun"] < optimal_theta_res["fun"]:
optimal_theta_res = optimal_theta_res_loop
if "x" not in optimal_theta_res:
raise ValueError(
f"Optimizer encountered a problem: {optimal_theta_res_loop!s}"
)
optimal_theta = optimal_theta_res["x"]
if self.name not in ["MGP"]:
optimal_theta = 10**optimal_theta
optimal_rlf_value, optimal_par = self._reduced_likelihood_function(
theta=optimal_theta
)
# Compare the new optimizer to the best previous one
if k > 0:
if np.isinf(optimal_rlf_value):
stop += 1
if incr != 0:
return
if stop > max_retry:
raise ValueError(
"%d attempts to train the model failed" % max_retry
)
else:
if optimal_rlf_value >= self.best_iteration_fail:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
if (
self.best_iteration_fail
> best_optimal_rlf_value
):
best_optimal_theta = self._thetaMemory
(
best_optimal_rlf_value,
best_optimal_par,
) = self._reduced_likelihood_function(
theta=best_optimal_theta
)
else:
if np.isinf(optimal_rlf_value):
stop += 1
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
k += 1
except ValueError as ve:
# raise ve
# If iteration is max when fmin_cobyla fail is not reached
if self.nb_ill_matrix > 0:
self.nb_ill_matrix -= 1
k += 1
stop += 1
# One evaluation objectif function is done at least
if self.best_iteration_fail is not None:
if self.best_iteration_fail > best_optimal_rlf_value:
best_optimal_theta = self._thetaMemory
(
best_optimal_rlf_value,
best_optimal_par,
) = self._reduced_likelihood_function(
theta=best_optimal_theta
)
# Optimization fail
elif best_optimal_par == []:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
# Break the while loop
else:
k = stop + 1
print("fmin_cobyla failed but the best value is retained")
if "KPLSK" in self.name:
if self.options["eval_noise"]:
# best_optimal_theta contains [theta, noise] if eval_noise = True
theta = best_optimal_theta[:-1]
else:
# best_optimal_theta contains [theta] if eval_noise = False
theta = best_optimal_theta
if exit_function:
return best_optimal_rlf_value, best_optimal_par, best_optimal_theta
if self.options["corr"] == "squar_exp":
self.options["theta0"] = (theta * self.coeff_pls**2).sum(1)
else:
self.options["theta0"] = (theta * np.abs(self.coeff_pls)).sum(1)
self.options["n_comp"] = int(self.nx)
limit = 10 * self.options["n_comp"]
self.best_iteration_fail = None
exit_function = True
return best_optimal_rlf_value, best_optimal_par, best_optimal_theta
def _check_param(self):
"""
This function checks some parameters of the model
and amend theta0 if possible (see _amend_theta0_option).
"""
d = self.options["n_comp"] if "n_comp" in self.options else self.nx
if self.name in ["KPLS"]:
if self.options["corr"] not in ["pow_exp", "squar_exp", "abs_exp"]:
raise ValueError(
"KPLS only works with a squared exponential, or an absolute exponential kernel with variable power"
)
if (
self.options["categorical_kernel"]
not in [
MixIntKernelType.EXP_HOMO_HSPHERE,
MixIntKernelType.HOMO_HSPHERE,
]
and self.name == "KPLS"
):
if self.options["cat_kernel_comps"] is not None:
raise ValueError(
"cat_kernel_comps option is for homoscedastic kernel."
)
mat_dim = (
self.options["cat_kernel_comps"]
if "cat_kernel_comps" in self.options
else None
)
n_comp = self.options["n_comp"] if "n_comp" in self.options else None
n_param = compute_n_param(
self.design_space,
self.options["categorical_kernel"],
d,
n_comp,
mat_dim,
)
self.options["theta0"] *= np.ones(n_param)
if len(self.options["theta0"]) != d and (
self.options["categorical_kernel"] == MixIntKernelType.GOWER
or self.is_continuous
):
if len(self.options["theta0"]) == 1:
self.options["theta0"] *= np.ones(d)
else:
raise ValueError(
"the length of theta0 (%s) should be equal to the number of dim (%s)."
% (len(self.options["theta0"]), d)
)
if self.options["use_het_noise"] and not self.options["eval_noise"]:
if len(self.options["noise0"]) != self.nt:
if len(self.options["noise0"]) == 1:
self.options["noise0"] *= np.ones(self.nt)
else:
raise ValueError(
"for the heteroscedastic case, the length of noise0 (%s) should be equal to the number of observations (%s)."
% (len(self.options["noise0"]), self.nt)
)
if not self.options["use_het_noise"]:
if len(self.options["noise0"]) != 1:
raise ValueError(
"for the homoscedastic noise case, the length of noise0 (%s) should be equal to one."
% (len(self.options["noise0"]))
)
if self.supports["training_derivatives"]:
if not (1 in self.training_points[None]):
raise Exception("Derivative values are needed for using the GEK model.")
def _check_F(self, n_samples_F, p):
"""
This function check the F-parameters of the model.
"""
if n_samples_F != self.nt:
raise Exception(
"Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model."
)
if p > n_samples_F:
raise Exception(
(
"Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d."
)
% (self.nt, p)
)
def compute_n_param(design_space, cat_kernel, d, n_comp, mat_dim):
"""
Returns the he number of parameters needed for an homoscedastic or full group kernel.
Parameters
----------
design_space: BaseDesignSpace
- design space definition
cat_kernel : string
-The kernel to use for categorical inputs. Only for non continuous Kriging,
d: int
- n_comp or nx
n_comp : int
- if PLS, then it is the number of components else None,
mat_dim : int
- if PLS, then it is the number of components for matrix kernel (mixed integer) else None,
Returns
-------
n_param: int
- The number of parameters.
"""
n_param = design_space.n_dv
if n_comp is not None:
n_param = d
if cat_kernel == MixIntKernelType.CONT_RELAX:
return n_param
if mat_dim is not None:
return int(np.sum([l * (l - 1) / 2 for l in mat_dim]) + n_param)
if cat_kernel == MixIntKernelType.GOWER:
return n_param
for i, dv in enumerate(design_space.design_variables):
if isinstance(dv, CategoricalVariable):
n_values = dv.n_values
if design_space.n_dv == d:
n_param -= 1
if cat_kernel in [
MixIntKernelType.EXP_HOMO_HSPHERE,
MixIntKernelType.HOMO_HSPHERE,
]:
n_param += int(n_values * (n_values - 1) / 2)
if cat_kernel == MixIntKernelType.CONT_RELAX:
n_param += int(n_values)
return n_param
| 72,518 | 36.987952 | 163 | py |
smt | smt-master/smt/surrogate_models/qp.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>
Dr. Nathalie.bartoli <nathalie@onera.fr>
This package is distributed under New BSD license.
TO DO:
- define outputs['sol'] = self.sol
"""
import numpy as np
import scipy
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.utils.caching import cached_operation
from smt.utils.misc import standardization
class QP(SurrogateModel):
"""
Square polynomial approach
"""
name = "QP"
def _initialize(self):
super(QP, self)._initialize()
declare = self.options.declare
supports = self.supports
declare(
"data_dir",
values=None,
types=str,
desc="Directory for loading / saving cached data; None means do not save or load",
)
supports["derivatives"] = True
############################################################################
# Model functions
############################################################################
def _new_train(self):
"""
Train the model
"""
X = self.training_points[None][0][0]
y = self.training_points[None][0][1]
(
self.X_norma,
self.y_norma,
self.X_offset,
self.y_mean,
self.X_scale,
self.y_std,
) = standardization(X, y)
if X.shape[0] < (self.nx + 1) * (self.nx + 2) / 2.0:
raise Exception(
"Number of training points should be greater or equal to %d."
% ((self.nx + 1) * (self.nx + 2) / 2.0)
)
X = self._response_surface(self.X_norma)
self.coef = np.dot(np.linalg.inv(np.dot(X.T, X)), (np.dot(X.T, self.y_norma)))
def _train(self):
"""
Train the model
"""
inputs = {"self": self}
with cached_operation(inputs, self.options["data_dir"]) as outputs:
if outputs:
self.sol = outputs["sol"]
else:
self._new_train()
def _response_surface(self, x):
"""
Build the response surface of degree 2
argument
-----------
x : np.ndarray [nt, nx]
Training points
Returns
-------
M : np.ndarray
Matrix of the surface
"""
dim = self.nx
n = x.shape[0]
n_app = int(scipy.special.binom(dim + 2, dim))
M = np.zeros((n_app, n))
x = x.T
M[0, :] = np.ones((1, n))
for i in range(1, dim + 1):
M[i, :] = x[i - 1, :]
for i in range(dim + 1, 2 * dim + 1):
M[i, :] = x[i - (dim + 1), :] ** 2
for i in range(dim - 1):
for j in range(i + 1, dim):
k = int(2 * dim + 2 + (i) * dim - ((i + 1) * (i)) / 2 + (j - (i + 2)))
M[k, :] = x[i, :] * x[j, :]
return M.T
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray
Derivative values.
"""
dim = self.nx
x = (x - self.X_offset) / self.X_scale
linear_coef = self.coef[1 + kx, :]
quad_coef = 2 * self.coef[1 + dim + kx, :] * x[:, kx]
neval = np.size(quad_coef, 0)
cross_coef = np.zeros(neval)
for i in range(dim):
if i > kx:
k = int(
2 * dim + 2 + (kx) * dim - ((kx + 1) * (kx)) / 2 + (i - (kx + 2))
)
cross_coef += self.coef[k, :] * x[:, i]
elif i < kx:
k = int(2 * dim + 2 + (i) * dim - ((i + 1) * (i)) / 2 + (kx - (i + 2)))
cross_coef += self.coef[k, :] * x[:, i]
y = (
(linear_coef + quad_coef + cross_coef).reshape((x.shape[0], self.ny))
* self.y_std
/ self.X_scale[kx]
)
return y
def _predict_values(self, x):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, nx]
Evaluation point input variable values
Returns
-------
y : np.ndarray [n_evals, ny]
Evaluation point output variable values
"""
x = (x - self.X_offset) / self.X_scale
M = self._response_surface(x)
y_ = np.dot(M, self.coef)
y = (self.y_mean + self.y_std * y_).ravel()
return y
| 4,775 | 28.300613 | 98 | py |
smt | smt-master/smt/surrogate_models/tests/test_krg_het_noise.py | """
Author: Andres Lopez-Lopera <<andres.lopez_lopera@onera.fr>>
This package is distributed under New BSD license.
"""
import unittest
import numpy as np
from smt.surrogate_models import KRG
from smt.utils.sm_test_case import SMTestCase
from smt.utils import compute_rms_error
class Test(SMTestCase):
def test_predict_output(self):
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 1.1, 1.0])
# Adding noisy repetitions
np.random.seed(6)
yt_std_rand = np.std(yt) * np.random.uniform(size=yt.shape)
xt_full = np.array(3 * xt.tolist())
yt_full = np.concatenate((yt, yt + 0.2 * yt_std_rand, yt - 0.2 * yt_std_rand))
sm = KRG(theta0=[1.0], eval_noise=True, use_het_noise=True, n_start=1)
sm.set_training_values(xt_full, yt_full)
sm.train()
yt = yt.reshape(-1, 1)
y = sm.predict_values(xt)
t_error = np.linalg.norm(y - yt) / np.linalg.norm(yt)
self.assert_error(t_error, 0.0, 1e-2)
if __name__ == "__main__":
unittest.main()
| 1,072 | 27.236842 | 86 | py |
smt | smt-master/smt/surrogate_models/tests/test_kpls.py | """
Author: Remi Lafage <<remi.lafage@onera.fr>>
This package is distributed under New BSD license.
"""
import unittest
import numpy as np
from smt.surrogate_models import KPLS
from smt.problems import Sphere
from smt.sampling_methods import FullFactorial, LHS
class TestKPLS(unittest.TestCase):
def test_predict_output(self):
for corr_str in [
"pow_exp",
"abs_exp",
"squar_exp",
]:
d, n = (3, 3)
sx = LHS(
xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), d, axis=0),
criterion="m",
random_state=42,
)
x = sx(n)
sy = LHS(
xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), 1, axis=0),
criterion="m",
random_state=42,
)
y = sy(n)
kriging = KPLS(n_comp=2, corr=corr_str)
kriging.set_training_values(x, y)
kriging.train()
x_fail_1 = np.asarray([[0, 0, 0, 0]])
x_fail_2 = np.asarray([[0]])
self.assertRaises(ValueError, lambda: kriging.predict_values(x_fail_1))
self.assertRaises(ValueError, lambda: kriging.predict_values(x_fail_2))
var = kriging.predict_variances(x)
self.assertEqual(y.shape[0], var.shape[0])
kriging = KPLS(n_comp=3)
kriging.set_training_values(x, y)
self.assertRaises(ValueError, lambda: kriging.train())
def test_kpls_training_with_zeroed_outputs(self):
# Test scikit-learn 0.24 regression cf. https://github.com/SMTorg/smt/issues/274
for corr_str in [
"pow_exp",
"abs_exp",
"squar_exp",
]:
x = np.random.rand(50, 3)
y = np.zeros(50)
kpls = KPLS()
kpls.options["corr"] = corr_str
kpls.set_training_values(x, y)
kpls.train()
x_test = np.asarray([[0, 0, 0], [0.5, 0.5, 0.5], [1, 1, 1]])
y_test = kpls.predict_values(x_test)
# KPLS training fails anyway but not due to PLS exception StopIteration
self.assertEqual(np.linalg.norm(y_test - np.asarray([[0, 0, 0]])), 0)
if __name__ == "__main__":
unittest.main()
| 2,294 | 30.875 | 88 | py |
smt | smt-master/smt/surrogate_models/tests/test_mgp.py | """
Author: Remi Lafage <<remi.lafage@onera.fr>>
This package is distributed under New BSD license.
"""
import unittest
import numpy as np
from smt.surrogate_models import MGP
from smt.problems import Sphere
from smt.sampling_methods import FullFactorial, LHS
class TestMGP(unittest.TestCase):
def test_predict_output_shapes(self):
d, n = (3, 10)
sx = LHS(
xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), d, axis=0),
criterion="m",
random_state=42,
)
x = sx(n)
sy = LHS(
xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), 1, axis=0),
criterion="m",
random_state=42,
)
y = sy(n)
y = y.flatten()
mgp = MGP(n_comp=2)
mgp.set_training_values(x, y)
mgp.train()
x_fail_1 = np.asarray([0, 0, 0, 0])
x_fail_2 = np.asarray([0])
self.assertRaises(ValueError, lambda: mgp.predict_values(x_fail_1))
self.assertRaises(ValueError, lambda: mgp.predict_values(x_fail_2))
self.assertRaises(ValueError, lambda: mgp.predict_variances(x_fail_1))
self.assertRaises(ValueError, lambda: mgp.predict_variances(x_fail_2))
xtest = np.array([[0.0, 0.0, 0.0], [1.0, 2.0, 3.0]])
n_samples = xtest.shape[0]
ypred = mgp.predict_values(xtest)
self.assertEqual(ypred.shape, (n_samples, 1))
var = mgp.predict_variances(xtest)
self.assertEqual(var.shape, (n_samples, 1))
var_no_uq = mgp.predict_variances_no_uq(xtest)
self.assertEqual(var_no_uq.shape, (n_samples, 1))
if __name__ == "__main__":
unittest.main()
| 1,659 | 28.122807 | 78 | py |
smt | smt-master/smt/surrogate_models/tests/test_krg_based.py | """
Author: Remi Lafage <<remi.lafage@onera.fr>>
This package is distributed under New BSD license.
"""
import unittest
import numpy as np
from smt.surrogate_models.krg_based import KrgBased
class TestKrgBased(unittest.TestCase):
def test_theta0_default_init(self):
krg = KrgBased()
krg.set_training_values(np.array([[1, 2, 3]]), np.array([[1]]))
krg._check_param()
self.assertTrue(np.array_equal(krg.options["theta0"], [1e-2, 1e-2, 1e-2]))
def test_theta0_one_dim_init(self):
krg = KrgBased(theta0=[2e-2])
krg.set_training_values(np.array([[1, 2, 3]]), np.array([[1]]))
krg._check_param()
self.assertTrue(np.array_equal(krg.options["theta0"], [2e-2, 2e-2, 2e-2]))
def test_theta0_erroneous_init(self):
krg = KrgBased(theta0=[2e-2, 1e-2])
krg.set_training_values(np.array([[1, 2]]), np.array([[1]])) # correct
krg._check_param()
krg.set_training_values(np.array([[1, 2, 3]]), np.array([[1]])) # erroneous
self.assertRaises(ValueError, krg._check_param)
if __name__ == "__main__":
unittest.main()
| 1,127 | 30.333333 | 84 | py |
smt | smt-master/smt/surrogate_models/tests/test_rmts.py | """
Author: Remi Lafage <<remi.lafage@onera.fr>>
This package is distributed under New BSD license.
"""
import unittest
import numpy as np
import matplotlib.pyplot as plt
from smt.utils.silence import Silence
from smt.utils.sm_test_case import SMTestCase
from smt.utils import compute_rms_error
from smt.surrogate_models import RMTB, RMTC
def function_test_1d(x):
# function xsinx
x = np.reshape(x, (-1,))
y = np.zeros(x.shape)
y = (x - 3.5) * np.sin((x - 3.5) / (np.pi))
return y.reshape((-1, 1))
class TestRMTS(SMTestCase):
def setUp(self):
# xt = np.random.rand(5, 1) * 10.0
self.xt = np.array(
[[3.6566495, 4.64266046, 7.23645433, 6.04862594, 8.85571712]]
).T
self.yt = function_test_1d(self.xt)
self.xlimits = np.array([[0.0, 25.0]])
smref = RMTB(xlimits=self.xlimits, print_global=False)
smref.set_training_values(self.xt, self.yt)
with Silence():
smref.train()
self.xref = np.array([[0.0, 6.25, 12.5, 18.75, 25.0]]).T
self.yref = smref.predict_values(self.xref)
self.sms = {}
def test_linear_search(self):
for ls in ["bracketed", "cubic", "quadratic", "null"]:
self.sms[ls] = RMTB(
xlimits=self.xlimits, line_search=ls, print_global=False
)
self.sms[ls].set_training_values(self.xt, self.yt)
with Silence():
self.sms[ls].train()
error = compute_rms_error(self.sms[ls], self.xref, self.yref)
self.assert_error(error, 0.0, 1e-1)
def test_linear_solver(self):
for ls in [
"krylov-dense",
"dense-chol",
"lu",
"ilu",
"krylov",
"krylov-lu",
"krylov-mg",
"gs",
"jacobi",
"mg",
"null",
]:
self.sms[ls] = RMTB(xlimits=self.xlimits, solver=ls, print_global=False)
self.sms[ls].set_training_values(self.xt, self.yt)
with Silence():
self.sms[ls].train()
error = compute_rms_error(self.sms[ls], self.xref, self.yref)
self.assert_error(error, 0.0, 1.1)
if __name__ == "__main__":
unittest.main()
| 2,291 | 26.614458 | 84 | py |
smt | smt-master/smt/surrogate_models/tests/test_krg_training.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 15:20:29 2020
@author: ninamoello
"""
from __future__ import print_function, division
import numpy as np
import unittest
from smt.utils.sm_test_case import SMTestCase
from smt.utils.kriging import (
pow_exp,
abs_exp,
squar_exp,
act_exp,
cross_distances,
componentwise_distance,
matern52,
matern32,
)
from smt.utils.misc import standardization
from smt.sampling_methods.lhs import LHS
from smt.surrogate_models import KRG, MGP
print_output = False
class Test(SMTestCase):
def setUp(self):
eps = 1e-8
xlimits = np.asarray([[0, 1], [0, 1]])
self.random = np.random.RandomState(42)
lhs = LHS(xlimits=xlimits, random_state=self.random)
X = lhs(8)
y = LHS(xlimits=np.asarray([[0, 1]]), random_state=self.random)(8)
X_norma, y_norma, X_offset, y_mean, X_scale, y_std = standardization(X, y)
D, ij = cross_distances(X_norma)
theta = self.random.rand(2)
corr_str = [
"pow_exp",
"abs_exp",
"squar_exp",
"act_exp",
"matern32",
"matern52",
]
corr_def = [pow_exp, abs_exp, squar_exp, act_exp, matern32, matern52]
power_val = {
"pow_exp": 1.9,
"abs_exp": 1.0,
"squar_exp": 2.0,
"act_exp": 1.0,
"matern32": 1.0,
"matern52": 1.0,
}
self.eps = eps
self.X = X
self.y = y
(
self.X_norma,
self.y_norma,
self.X_offset,
self.y_mean,
self.X_scale,
self.y_std,
) = (
X_norma,
y_norma,
X_offset,
y_mean,
X_scale,
y_std,
)
self.D, self.ij = D, ij
self.theta = theta
self.corr_str = corr_str
self.corr_def = corr_def
self.power_val = power_val
def test_noise_estimation(self):
xt = np.array([[0.0], [1.0], [2.0], [3.0], [4.0]])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = KRG(hyper_opt="Cobyla", eval_noise=True, noise0=[1e-4])
sm.set_training_values(xt, yt)
sm.train()
self.assert_error(np.array(sm.optimal_theta), np.array([1.6]), 1e-1, 1e-1)
def test_corr_derivatives(self):
for ind, corr in enumerate(self.corr_def): # For every kernel
# self.corr_str[ind] = self.corr_def[ind]
D = componentwise_distance(
self.D,
self.corr_str[ind],
self.X.shape[1],
self.power_val[self.corr_str[ind]],
)
k = corr(self.theta, D)
K = np.eye(self.X.shape[0])
K[self.ij[:, 0], self.ij[:, 1]] = k[:, 0]
K[self.ij[:, 1], self.ij[:, 0]] = k[:, 0]
grad_norm_all = []
diff_norm_all = []
ind_theta = []
for i, theta_i in enumerate(self.theta):
eps_theta = np.zeros(self.theta.shape)
eps_theta[i] = self.eps
k_dk = corr(self.theta + eps_theta, D)
K_dk = np.eye(self.X.shape[0])
K_dk[self.ij[:, 0], self.ij[:, 1]] = k_dk[:, 0]
K_dk[self.ij[:, 1], self.ij[:, 0]] = k_dk[:, 0]
grad_eps = (K_dk - K) / self.eps
dk = corr(self.theta, D, grad_ind=i)
dK = np.zeros((self.X.shape[0], self.X.shape[0]))
dK[self.ij[:, 0], self.ij[:, 1]] = dk[:, 0]
dK[self.ij[:, 1], self.ij[:, 0]] = dk[:, 0]
grad_norm_all.append(np.linalg.norm(dK))
diff_norm_all.append(np.linalg.norm(grad_eps))
ind_theta.append(r"$x_%d$" % i)
self.assert_error(
np.array(grad_norm_all), np.array(diff_norm_all), 1e-5, 1e-5
) # from utils/smt_test_case.py
def test_corr_hessian(self):
for ind, corr in enumerate(self.corr_def): # For every kernel
# self.corr_str[ind] = self.corr_def[ind]
D = componentwise_distance(
self.D,
self.corr_str[ind],
self.X.shape[1],
self.power_val[self.corr_str[ind]],
)
grad_norm_all = []
diff_norm_all = []
for i, theta_i in enumerate(self.theta):
k = corr(self.theta, D, grad_ind=i)
K = np.eye(self.X.shape[0])
K[self.ij[:, 0], self.ij[:, 1]] = k[:, 0]
K[self.ij[:, 1], self.ij[:, 0]] = k[:, 0]
for j, omega_j in enumerate(self.theta):
eps_omega = np.zeros(self.theta.shape)
eps_omega[j] = self.eps
k_dk = corr(self.theta + eps_omega, D, grad_ind=i)
K_dk = np.eye(self.X.shape[0])
K_dk[self.ij[:, 0], self.ij[:, 1]] = k_dk[:, 0]
K_dk[self.ij[:, 1], self.ij[:, 0]] = k_dk[:, 0]
grad_eps = (K_dk - K) / self.eps
dk = corr(self.theta, D, grad_ind=i, hess_ind=j)
dK = np.zeros((self.X.shape[0], self.X.shape[0]))
dK[self.ij[:, 0], self.ij[:, 1]] = dk[:, 0]
dK[self.ij[:, 1], self.ij[:, 0]] = dk[:, 0]
grad_norm_all.append(np.linalg.norm(dK))
diff_norm_all.append(np.linalg.norm(grad_eps))
self.assert_error(
np.array(grad_norm_all), np.array(diff_norm_all), 1e-5, 1e-5
) # from utils/smt_test_case.py
def test_likelihood_derivatives(self):
for corr_str in [
"pow_exp",
"abs_exp",
"squar_exp",
"act_exp",
"matern32",
"matern52",
]: # For every kernel
for poly_str in ["constant", "linear", "quadratic"]: # For every method
if corr_str == "act_exp":
kr = MGP(print_global=False)
theta = self.random.rand(4)
else:
kr = KRG(print_global=False)
theta = self.theta
kr.options["poly"] = poly_str
kr.options["corr"] = corr_str
kr.options["pow_exp_power"] = self.power_val[corr_str]
kr.set_training_values(self.X, self.y)
kr.train()
grad_red, dpar = kr._reduced_likelihood_gradient(theta)
red, par = kr._reduced_likelihood_function(theta)
grad_norm_all = []
diff_norm_all = []
ind_theta = []
for i, theta_i in enumerate(theta):
eps_theta = theta.copy()
eps_theta[i] = eps_theta[i] + self.eps
red_dk, par_dk = kr._reduced_likelihood_function(eps_theta)
dred_dk = (red_dk - red) / self.eps
grad_norm_all.append(grad_red[i])
diff_norm_all.append(float(dred_dk))
ind_theta.append(r"$x_%d$" % i)
grad_norm_all = np.atleast_2d(grad_norm_all)
diff_norm_all = np.atleast_2d(diff_norm_all).T
self.assert_error(
grad_norm_all, diff_norm_all, atol=1e-5, rtol=1e-3
) # from utils/smt_test_case.py
def test_likelihood_hessian(self):
for corr_str in [
"pow_exp",
"abs_exp",
"squar_exp",
"act_exp",
"matern32",
"matern52",
]: # For every kernel
for poly_str in ["constant", "linear", "quadratic"]: # For every method
if corr_str == "act_exp":
kr = MGP(print_global=False)
theta = self.random.rand(4)
else:
kr = KRG(print_global=False)
theta = self.theta
kr.options["poly"] = poly_str
kr.options["corr"] = corr_str
kr.options["pow_exp_power"] = self.power_val[corr_str]
kr.set_training_values(self.X, self.y)
kr.train()
grad_red, dpar = kr._reduced_likelihood_gradient(theta)
hess, hess_ij, _ = kr._reduced_likelihood_hessian(theta)
Hess = np.zeros((theta.shape[0], theta.shape[0]))
Hess[hess_ij[:, 0], hess_ij[:, 1]] = hess[:, 0]
Hess[hess_ij[:, 1], hess_ij[:, 0]] = hess[:, 0]
grad_norm_all = []
diff_norm_all = []
ind_theta = []
for j, omega_j in enumerate(theta):
eps_omega = theta.copy()
eps_omega[j] += self.eps
grad_red_eps, _ = kr._reduced_likelihood_gradient(eps_omega)
for i, theta_i in enumerate(theta):
hess_eps = (grad_red_eps[i] - grad_red[i]) / self.eps
grad_norm_all.append(
np.linalg.norm(Hess[i, j]) / np.linalg.norm(Hess)
)
diff_norm_all.append(
np.linalg.norm(hess_eps) / np.linalg.norm(Hess)
)
ind_theta.append(r"$x_%d,x_%d$" % (j, i))
self.assert_error(
np.array(grad_norm_all),
np.array(diff_norm_all),
atol=1e-5,
rtol=1e-3,
) # from utils/smt_test_case.py
def test_variance_derivatives(self):
for corr_str in [
"abs_exp",
"squar_exp",
"matern32",
"matern52",
"pow_exp",
]:
kr = KRG(print_global=False)
kr.options["poly"] = "constant"
kr.options["corr"] = corr_str
kr.options["pow_exp_power"] = self.power_val[corr_str]
kr.set_training_values(self.X, self.y)
kr.train()
e = 1e-6
xa = self.random.random()
xb = self.random.random()
x_valid = [[xa, xb], [xa + e, xb], [xa - e, xb], [xa, xb + e], [xa, xb - e]]
y_predicted = kr.predict_variances(np.array(x_valid))
y_jacob = np.zeros((2, 5))
for i in range(np.shape(x_valid)[0]):
l0 = kr.predict_variance_derivatives(np.atleast_2d(x_valid[i]), 0)[0]
l1 = kr.predict_variance_derivatives(np.atleast_2d(x_valid[i]), 1)[0]
y_jacob[0, i] = l0
y_jacob[1, i] = l1
diff_g = (y_predicted[1] - y_predicted[2]) / (2 * e)
diff_d = (y_predicted[3] - y_predicted[4]) / (2 * e)
jac_rel_error1 = abs((y_jacob[0][0] - diff_g) / y_jacob[0][0])
self.assert_error(jac_rel_error1, 1e-3, atol=0.01, rtol=0.01)
jac_rel_error2 = abs((y_jacob[1][0] - diff_d) / y_jacob[1][1])
self.assert_error(jac_rel_error2, 1e-3, atol=0.01, rtol=0.01)
if __name__ == "__main__":
print_output = True
unittest.main()
| 11,319 | 35.050955 | 88 | py |
smt | smt-master/smt/surrogate_models/tests/test_surrogate_model_examples.py | """
Author: John Hwang <<hwangjt@umich.edu>>
This package is distributed under New BSD license.
"""
import unittest
import matplotlib
matplotlib.use("Agg")
try:
from smt.surrogate_models import IDW, RBF, RMTB, RMTC
compiled_available = True
except:
compiled_available = False
class Test(unittest.TestCase):
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_idw(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import IDW
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = IDW(p=2)
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rbf(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import RBF
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = RBF(d0=5)
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rmtb(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import RMTB
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
xlimits = np.array([[0.0, 4.0]])
sm = RMTB(
xlimits=xlimits,
order=4,
num_ctrl_pts=20,
energy_weight=1e-15,
regularization_weight=0.0,
)
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
@unittest.skipIf(not compiled_available, "C compilation failed")
def test_rmtc(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import RMTC
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
xlimits = np.array([[0.0, 4.0]])
sm = RMTC(
xlimits=xlimits,
num_elements=20,
energy_weight=1e-15,
regularization_weight=0.0,
)
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
def test_ls(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import LS
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = LS()
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
def test_qp(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import QP
xt = np.array([[0.0, 1.0, 2.0, 3.0, 4.0]]).T
yt = np.array([[0.2, 1.4, 1.5, 0.9, 1.0], [0.0, 1.0, 2.0, 4, 3]]).T
sm = QP()
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
t1, _ = plt.plot(xt, yt[:, 0], "o", "C0")
p1 = plt.plot(x, y[:, 0], "C0", label="Prediction 1")
t2, _ = plt.plot(xt, yt[:, 1], "o", "C1")
p2 = plt.plot(x, y[:, 1], "C1", label="Prediction 2")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.show()
def test_krg(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = KRG(theta0=[1e-2])
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
# estimated variance
s2 = sm.predict_variances(x)
# derivative according to the first variable
dydx = sm.predict_derivatives(xt, 0)
fig, axs = plt.subplots(1)
# add a plot with variance
axs.plot(xt, yt, "o")
axs.plot(x, y)
axs.fill_between(
np.ravel(x),
np.ravel(y - 3 * np.sqrt(s2)),
np.ravel(y + 3 * np.sqrt(s2)),
color="lightgrey",
)
axs.set_xlabel("x")
axs.set_ylabel("y")
axs.legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="lower right",
)
plt.show()
def test_mixed_int_krg(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
from smt.applications.mixed_integer import MixedIntegerKrigingModel
from smt.utils.design_space import DesignSpace, IntegerVariable
xt = np.array([0.0, 2.0, 3.0])
yt = np.array([0.0, 1.5, 0.9])
design_space = DesignSpace(
[
IntegerVariable(0, 4),
]
)
sm = MixedIntegerKrigingModel(
surrogate=KRG(design_space=design_space, theta0=[1e-2])
)
sm.set_training_values(xt, yt)
sm.train()
num = 500
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
# estimated variance
s2 = sm.predict_variances(x)
fig, axs = plt.subplots(1)
axs.plot(xt, yt, "o")
axs.plot(x, y)
axs.fill_between(
np.ravel(x),
np.ravel(y - 3 * np.sqrt(s2)),
np.ravel(y + 3 * np.sqrt(s2)),
color="lightgrey",
)
axs.set_xlabel("x")
axs.set_ylabel("y")
axs.legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="lower right",
)
plt.show()
def test_mixed_gower_krg(self):
from smt.surrogate_models import (
MixIntKernelType,
KRG,
DesignSpace,
CategoricalVariable,
)
from smt.applications.mixed_integer import (
MixedIntegerKrigingModel,
)
import matplotlib.pyplot as plt
import numpy as np
xt = np.array([0, 3, 4])
yt = np.array([0.0, 1.0, 1.5])
design_space = DesignSpace(
[
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0", "4.0"]),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=design_space,
theta0=[1e-2],
categorical_kernel=MixIntKernelType.GOWER,
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
x = np.linspace(0, 5, 5)
y = sm.predict_values(x)
plt.plot(xt, yt, "o", label="data")
plt.plot(x, y, "d", color="red", markersize=3, label="pred")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.show()
def test_kpls_auto(self):
import numpy as np
from smt.surrogate_models import KPLS
from smt.problems import TensorProduct
from smt.sampling_methods import LHS
# The problem is the exponential problem with dimension 10
ndim = 10
prob = TensorProduct(ndim=ndim, func="exp")
sm = KPLS(eval_n_comp=True)
samp = LHS(xlimits=prob.xlimits, random_state=42)
np.random.seed(0)
xt = samp(50)
yt = prob(xt)
np.random.seed(1)
sm.set_training_values(xt, yt)
sm.train()
## The model automatically choose a dimension of 3
l = sm.options["n_comp"]
print("\n The model automatically choose " + str(l) + " components.")
## You can predict a 10-dimension point from the 3-dimensional model
print(sm.predict_values(np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])))
print(sm.predict_variances(np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])))
def test_kpls(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KPLS
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = KPLS(theta0=[1e-2])
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
# estimated variance
# add a plot with variance
s2 = sm.predict_variances(x)
# to compute the derivative according to the first variable
dydx = sm.predict_derivatives(xt, 0)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.fill_between(
np.ravel(x),
np.ravel(y - 3 * np.sqrt(s2)),
np.ravel(y + 3 * np.sqrt(s2)),
color="lightgrey",
)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction", "Confidence Interval 99%"])
plt.show()
def test_kplsk(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KPLSK
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.9, 1.0])
sm = KPLSK(theta0=[1e-2])
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
# estimated variance
s2 = sm.predict_variances(x)
# derivative according to the first variable
dydx = sm.predict_derivatives(xt, 0)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
# add a plot with variance
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.fill_between(
np.ravel(x),
np.ravel(y - 3 * np.sqrt(s2)),
np.ravel(y + 3 * np.sqrt(s2)),
color="lightgrey",
)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction", "Confidence Interval 99%"])
plt.show()
def test_gekpls(self):
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from smt.surrogate_models import GEKPLS, DesignSpace
from smt.problems import Sphere
from smt.sampling_methods import LHS
# Construction of the DOE
fun = Sphere(ndim=2)
sampling = LHS(xlimits=fun.xlimits, criterion="m")
xt = sampling(20)
yt = fun(xt)
# Compute the gradient
for i in range(2):
yd = fun(xt, kx=i)
yt = np.concatenate((yt, yd), axis=1)
design_space = DesignSpace(fun.xlimits)
# Build the GEKPLS model
n_comp = 2
sm = GEKPLS(
design_space=design_space,
theta0=[1e-2] * n_comp,
extra_points=1,
print_prediction=False,
n_comp=n_comp,
)
sm.set_training_values(xt, yt[:, 0])
for i in range(2):
sm.set_training_derivatives(xt, yt[:, 1 + i].reshape((yt.shape[0], 1)), i)
sm.train()
# Test the model
X = np.arange(fun.xlimits[0, 0], fun.xlimits[0, 1], 0.25)
Y = np.arange(fun.xlimits[1, 0], fun.xlimits[1, 1], 0.25)
X, Y = np.meshgrid(X, Y)
Z = np.zeros((X.shape[0], X.shape[1]))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
Z[i, j] = sm.predict_values(
np.hstack((X[i, j], Y[i, j])).reshape((1, 2))
)
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
surf = ax.plot_surface(X, Y, Z)
plt.show()
def test_genn(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models.genn import GENN, load_smt_data
# Training data
lower_bound = -np.pi
upper_bound = np.pi
number_of_training_points = 4
xt = np.linspace(lower_bound, upper_bound, number_of_training_points)
yt = xt * np.sin(xt)
dyt_dxt = np.sin(xt) + xt * np.cos(xt)
# Validation data
number_of_validation_points = 30
xv = np.linspace(lower_bound, upper_bound, number_of_validation_points)
yv = xv * np.sin(xv)
dyv_dxv = np.sin(xv) + xv * np.cos(xv)
# Truth model
x = np.arange(lower_bound, upper_bound, 0.01)
y = x * np.sin(x)
# GENN
genn = GENN()
genn.options["alpha"] = 0.1 # learning rate that controls optimizer step size
genn.options["beta1"] = 0.9 # tuning parameter to control ADAM optimization
genn.options["beta2"] = 0.99 # tuning parameter to control ADAM optimization
genn.options[
"lambd"
] = 0.1 # lambd = 0. = no regularization, lambd > 0 = regularization
genn.options[
"gamma"
] = 1.0 # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement
genn.options["deep"] = 2 # number of hidden layers
genn.options["wide"] = 6 # number of nodes per hidden layer
genn.options[
"mini_batch_size"
] = 64 # used to divide data into training batches (use for large data sets)
genn.options["num_epochs"] = 20 # number of passes through data
genn.options[
"num_iterations"
] = 100 # number of optimizer iterations per mini-batch
genn.options["is_print"] = True # print output (or not)
load_smt_data(
genn, xt, yt, dyt_dxt
) # convenience function to read in data that is in SMT format
genn.train() # API function to train model
genn.plot_training_history() # non-API function to plot training history (to check convergence)
genn.goodness_of_fit(
xv, yv, dyv_dxv
) # non-API function to check accuracy of regression
y_pred = genn.predict_values(
x
) # API function to predict values at new (unseen) points
# Plot
fig, ax = plt.subplots()
ax.plot(x, y_pred)
ax.plot(x, y, "k--")
ax.plot(xv, yv, "ro")
ax.plot(xt, yt, "k+", mew=3, ms=10)
ax.set(xlabel="x", ylabel="y", title="GENN")
ax.legend(["Predicted", "True", "Test", "Train"])
plt.show()
def test_mgp(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import MGP
from smt.sampling_methods import LHS
# Construction of the DOE
dim = 3
def fun(x):
import numpy as np
res = (
np.sum(x, axis=1) ** 2
- np.sum(x, axis=1)
+ 0.2 * (np.sum(x, axis=1) * 1.2) ** 3
)
return res
sampling = LHS(
xlimits=np.asarray([(-1, 1)] * dim), criterion="m", random_state=42
)
xt = sampling(8)
yt = np.atleast_2d(fun(xt)).T
# Build the MGP model
sm = MGP(
theta0=[1e-2],
print_prediction=False,
n_comp=1,
)
sm.set_training_values(xt, yt[:, 0])
sm.train()
# Get the transfert matrix A
emb = sm.embedding["C"]
# Compute the smallest box containing all points of A
upper = np.sum(np.abs(emb), axis=0)
lower = -upper
# Test the model
u_plot = np.atleast_2d(np.arange(lower, upper, 0.01)).T
x_plot = sm.get_x_from_u(u_plot) # Get corresponding points in Omega
y_plot_true = fun(x_plot)
y_plot_pred = sm.predict_values(u_plot)
sigma_MGP = sm.predict_variances(u_plot)
sigma_KRG = sm.predict_variances_no_uq(u_plot)
u_train = sm.get_u_from_x(xt) # Get corresponding points in A
# Plots
fig, ax = plt.subplots()
ax.plot(u_plot, y_plot_pred, label="Predicted")
ax.plot(u_plot, y_plot_true, "k--", label="True")
ax.plot(u_train, yt, "k+", mew=3, ms=10, label="Train")
ax.fill_between(
u_plot[:, 0],
y_plot_pred[:, 0] - 3 * sigma_MGP[:, 0],
y_plot_pred[:, 0] + 3 * sigma_MGP[:, 0],
color="r",
alpha=0.5,
label="Variance with hyperparameters uncertainty",
)
ax.fill_between(
u_plot[:, 0],
y_plot_pred[:, 0] - 3 * sigma_KRG[:, 0],
y_plot_pred[:, 0] + 3 * sigma_KRG[:, 0],
color="b",
alpha=0.5,
label="Variance without hyperparameters uncertainty",
)
ax.set(xlabel="x", ylabel="y", title="MGP")
fig.legend(loc="upper center", ncol=2)
fig.tight_layout()
fig.subplots_adjust(top=0.74)
plt.show()
if __name__ == "__main__":
unittest.main()
| 18,375 | 28.354633 | 104 | py |
smt | smt-master/smt/surrogate_models/tests/test_qp.py | """
Author: Remi Lafage <<remi.lafage@onera.fr>>, Frederic Zahle
This package is distributed under New BSD license.
"""
import unittest
import numpy as np
from smt.surrogate_models import QP, KRG
from smt.examples.rans_crm_wing.rans_crm_wing import (
get_rans_crm_wing,
plot_rans_crm_wing,
)
class TestQP(unittest.TestCase):
def test_ny(self):
xt, yt, _ = get_rans_crm_wing()
interp = QP()
interp.set_training_values(xt, yt)
interp.train()
v0 = np.zeros((4, 2))
for ix, i in enumerate([10, 11, 12, 13]):
v0[ix, :] = interp.predict_values(np.atleast_2d(xt[i, :]))
v1 = interp.predict_values(np.atleast_2d(xt[10:14, :]))
expected_diff = np.zeros((4, 2))
np.testing.assert_allclose(v1 - v0, expected_diff, atol=1e-15)
| 818 | 25.419355 | 70 | py |
smt | smt-master/smt/surrogate_models/tests/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/surrogate_models/tests/test_krg_predictions.py | """
Authors: Nathalie Bartoli, Paul Saves
This package is distributed under New BSD license.
"""
import unittest
import numpy as np
from smt.surrogate_models import KRG
from smt.sampling_methods import LHS
from smt.utils.sm_test_case import SMTestCase
class Test(SMTestCase):
def setUp(self):
def pb(x):
# sin + linear trend
y = (
np.atleast_2d(np.sin(x[:, 0])).T
+ np.atleast_2d(2 * x[:, 0] + 5 * x[:, 1]).T
+ 10
) # + linear trend
return y
xlimits = np.array([[-5, 10], [-5, 10]])
sampling = LHS(xlimits=xlimits, random_state=42)
self.xt = sampling(12)
self.yt = pb(self.xt)
def test_predictions(self):
trends = ["constant", "linear"]
kernels = ["pow_exp", "squar_exp", "abs_exp", "matern32", "matern52"]
powers = [1.0, 1.5, 2.0]
for trend in trends:
for kernel in kernels:
if kernel == "pow_exp":
for power in powers:
sm = KRG(
theta0=[0.01],
print_global=False,
poly=trend,
corr=kernel,
pow_exp_power=power,
) # ,eval_noise=True)
sm.set_training_values(self.xt, self.yt)
sm.train()
print(f"\n*** TREND = {trend} & kernel = {kernel} ***\n")
# quality of the surrogate on validation points
Test._check_prediction_variances(self, sm)
Test._check_prediction_derivatives(self, sm)
else:
sm = KRG(
theta0=[0.01], print_global=False, poly=trend, corr=kernel
) # ,eval_noise=True)
sm.set_training_values(self.xt, self.yt)
sm.train()
print(f"\n*** TREND = {trend} & kernel = {kernel} ***\n")
# quality of the surrogate on validation points
Test._check_prediction_variances(self, sm)
Test._check_prediction_derivatives(self, sm)
@staticmethod
def _check_prediction_variances(self, sm):
y_predicted = sm.predict_variances(self.xt)
variance_at_training_inputs = np.sum(y_predicted**2)
np.testing.assert_allclose(variance_at_training_inputs, 0, atol=1e-9)
@staticmethod
def _check_prediction_derivatives(self, sm):
e = 5e-6
xa = -1.3
xb = 2.5
x_valid = np.array(
[[xa, xb], [xa + e, xb], [xa - e, xb], [xa, xb + e], [xa, xb - e]]
)
y_predicted = sm.predict_variances(x_valid)
x = np.atleast_2d(x_valid[0])
diff_g = (y_predicted[1, 0] - y_predicted[2, 0]) / (2 * e)
diff_d = (y_predicted[3, 0] - y_predicted[4, 0]) / (2 * e)
deriv = np.array(
[
sm.predict_variance_derivatives(x, 0)[0],
sm.predict_variance_derivatives(x, 1)[0],
]
).T
pred_errors = np.array(
[
np.abs((diff_g - deriv[0][0]) / diff_g),
np.abs((diff_d - deriv[0][1]) / diff_d),
]
)
total_error = np.sum(pred_errors**2)
np.testing.assert_allclose(total_error, 0, atol=5e-3)
y_predicted = sm.predict_values(x_valid)
x = np.atleast_2d(x_valid[0])
diff_g = (y_predicted[1, 0] - y_predicted[2, 0]) / (2 * e)
diff_d = (y_predicted[3, 0] - y_predicted[4, 0]) / (2 * e)
deriv = np.array(
[sm.predict_derivatives(x, 0)[0], sm.predict_derivatives(x, 1)[0]]
).T
pred_errors = np.array(
[
np.abs((diff_g - deriv[0][0]) / diff_g),
np.abs((diff_d - deriv[0][1]) / diff_d),
]
)
total_error = np.sum(pred_errors**2)
np.testing.assert_allclose(total_error, 0, atol=1e-9)
### VECTORIZATION TESTS
x_valid = np.concatenate(
(
x_valid,
np.atleast_2d(np.array([x_valid[0][0] + 1.0, x_valid[0][1] + 1.0])),
)
)
# test predict values & variances vectorization
all_vals1 = np.zeros((6, 2))
for i, x in enumerate(x_valid):
all_vals1[i, 0] = sm.predict_values(np.atleast_2d(x))
all_vals1[i, 1] = sm.predict_variances(np.atleast_2d(x))
all_vals2x = sm.predict_values(np.atleast_2d(x_valid)).flatten()
all_vals2y = sm.predict_variances(np.atleast_2d(x_valid)).flatten()
total_error = np.sum(
[
np.power(all_vals1[:, 0] - all_vals2x, 2),
np.power(all_vals1[:, 1] - all_vals2y, 2),
]
)
np.testing.assert_allclose(total_error, 0, atol=1e-9)
# test predict_derivatives vectorization
all_vals1 = np.zeros((6, 2))
for i, x in enumerate(x_valid):
all_vals1[i, 0] = sm.predict_derivatives(np.atleast_2d(x), 0)
all_vals1[i, 1] = sm.predict_derivatives(np.atleast_2d(x), 1)
all_vals2x = sm.predict_derivatives(np.atleast_2d(x_valid), 0).flatten()
all_vals2y = sm.predict_derivatives(np.atleast_2d(x_valid), 1).flatten()
total_error = np.sum(
[
np.power(all_vals1[:, 0] - all_vals2x, 2),
np.power(all_vals1[:, 1] - all_vals2y, 2),
]
)
np.testing.assert_allclose(total_error, 0, atol=1e-9)
# test predict_variance_derivatives vectorization
all_vals1 = np.zeros((6, 2))
for i, x in enumerate(x_valid):
all_vals1[i, 0] = sm.predict_variance_derivatives(np.atleast_2d(x), 0)
all_vals1[i, 1] = sm.predict_variance_derivatives(np.atleast_2d(x), 1)
all_vals2x = sm.predict_variance_derivatives(
np.atleast_2d(x_valid), 0
).flatten()
all_vals2y = sm.predict_variance_derivatives(
np.atleast_2d(x_valid), 1
).flatten()
total_error = np.sum(
[
np.power(all_vals1[:, 0] - all_vals2x, 2),
np.power(all_vals1[:, 1] - all_vals2y, 2),
]
)
np.testing.assert_allclose(total_error, 0, atol=1e-9)
if __name__ == "__main__":
unittest.main()
| 6,514 | 34.026882 | 84 | py |
smt | smt-master/smt/surrogate_models/tests/test_krg_outputs.py | """
Author: Remi Lafage <<remi.lafage@onera.fr>>
This package is distributed under New BSD license.
"""
import unittest
import numpy as np
from smt.surrogate_models import KRG
from smt.problems import Sphere
from smt.sampling_methods import FullFactorial, LHS
class TestKRG(unittest.TestCase):
def test_predict_output_shape(self):
d, n = (3, 10)
sx = LHS(
xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), d, axis=0),
criterion="m",
random_state=42,
)
x = sx(n)
# 2-dimensional output
n_s = 2
sy = LHS(
xlimits=np.repeat(np.atleast_2d([0.0, 1.0]), n_s, axis=0),
criterion="m",
random_state=42,
)
y = sy(n)
kriging = KRG(poly="linear")
kriging.set_training_values(x, y)
kriging.train()
val = kriging.predict_values(x)
self.assertEqual(y.shape, val.shape)
var = kriging.predict_variances(x)
self.assertEqual(y.shape, var.shape)
for kx in range(d):
val_deriv = kriging.predict_derivatives(x, kx)
self.assertEqual(y.shape, val_deriv.shape)
var_deriv = kriging.predict_variance_derivatives(x, 0)
self.assertEqual((n, n_s), var_deriv.shape)
if __name__ == "__main__":
unittest.main()
| 1,347 | 25.431373 | 70 | py |
smt | smt-master/smt/applications/mfk.py | # -*- coding: utf-8 -*-
"""
Created on Fri May 04 10:26:49 2018
@author: Mostafa Meliani <melimostafa@gmail.com>
Multi-Fidelity co-Kriging: recursive formulation with autoregressive model of
order 1 (AR1)
Adapted on January 2021 by Andres Lopez-Lopera to the new SMT version
"""
from copy import deepcopy
import numpy as np
from scipy.linalg import solve_triangular
from scipy import linalg
from scipy.spatial.distance import cdist
from sklearn.cross_decomposition import PLSRegression as pls
from smt.surrogate_models.krg_based import KrgBased
from smt.sampling_methods import LHS
from smt.utils.kriging import (
cross_distances,
componentwise_distance,
differences,
)
from smt.utils.misc import standardization
class NestedLHS(object):
def __init__(self, nlevel, xlimits, random_state=None):
"""
Constructor where values of options can be passed in.
Parameters
----------
nlevel : integer.
The number of design of experiments to be built
xlimits : ndarray
The interval of the domain in each dimension with shape (nx, 2)
random_state : Numpy RandomState object or seed number which controls random draws
"""
self.nlevel = nlevel
self.xlimits = xlimits
self.random_state = random_state
def __call__(self, nb_samples_hifi):
"""
Builds nlevel nested design of experiments of dimension dim and size n_samples.
Each doe sis built with the optmized lhs procedure.
Builds the highest level first; nested properties are ensured by deleting
the nearest neighbours in lower levels of fidelity.
Parameters
----------
nb_samples_hifi: The number of samples of the highest fidelity model.
nb_samples_fi(n-1) = 2 * nb_samples_fi(n)
Returns
------
list of length nlevel of design of experiemnts from low to high fidelity level.
"""
nt = []
for i in range(self.nlevel, 0, -1):
nt.append(pow(2, i - 1) * nb_samples_hifi)
if len(nt) != self.nlevel:
raise ValueError("nt must be a list of nlevel elements")
if np.allclose(np.sort(nt)[::-1], nt) == False:
raise ValueError("nt must be a list of decreasing integers")
doe = []
p0 = LHS(xlimits=self.xlimits, criterion="ese", random_state=self.random_state)
doe.append(p0(nt[0]))
for i in range(1, self.nlevel):
p = LHS(
xlimits=self.xlimits, criterion="ese", random_state=self.random_state
)
doe.append(p(nt[i]))
for i in range(1, self.nlevel)[::-1]:
ind = []
d = cdist(doe[i], doe[i - 1], "euclidean")
for j in range(doe[i].shape[0]):
dj = np.sort(d[j, :])
k = dj[0]
l = (np.where(d[j, :] == k))[0][0]
m = 0
while l in ind:
m = m + 1
k = dj[m]
l = (np.where(d[j, :] == k))[0][0]
ind.append(l)
doe[i - 1] = np.delete(doe[i - 1], ind, axis=0)
doe[i - 1] = np.vstack((doe[i - 1], doe[i]))
return doe
class MFK(KrgBased):
def _initialize(self):
super()._initialize()
declare = self.options.declare
declare(
"rho_regr",
"constant",
values=("constant", "linear", "quadratic"),
desc="Regression function type for rho",
)
declare(
"optim_var",
False,
types=bool,
values=(True, False),
desc="If True, the variance at HF samples is forced to zero",
)
declare(
"propagate_uncertainty",
True,
types=bool,
values=(True, False),
desc="If True, the variance cotribution of lower fidelity levels are considered",
)
self.name = "MFK"
def _differences(self, X, Y):
"""
Compute the distances
"""
return differences(X, Y)
def _check_list_structure(self, X, y):
"""
checks if the data structure is compatible with MFK.
sets class attributes such as (number of levels of Fidelity, training points in each level, ...)
Arguments :
X : list of arrays, each array corresponds to a fidelity level. starts from lowest to highest
y : same as X
"""
if type(X) is not list:
nlevel = 1
X = [X]
else:
nlevel = len(X)
if type(y) is not list:
y = [y]
if len(X) != len(y):
raise ValueError("X and y must have the same length.")
n_samples = np.zeros(nlevel, dtype=int)
n_features = np.zeros(nlevel, dtype=int)
n_samples_y = np.zeros(nlevel, dtype=int)
for i in range(nlevel):
n_samples[i], n_features[i] = X[i].shape
if i > 1 and n_features[i] != n_features[i - 1]:
raise ValueError("All X must have the same number of columns.")
y[i] = np.asarray(y[i]).ravel()[:, np.newaxis]
n_samples_y[i] = y[i].shape[0]
if n_samples[i] != n_samples_y[i]:
raise ValueError("X and y must have the same number of rows.")
self.nx = n_features[0]
self.nt_all = n_samples
self.nlvl = nlevel
self.ny = y[0].shape[1]
self.X = X[:]
self.y = y[:]
def _new_train(self):
"""
Overrides KrgBased implementation
Trains the Multi-Fidelity model
"""
self._new_train_init()
theta0 = self.options["theta0"].copy()
noise0 = self.options["noise0"].copy()
for lvl in range(self.nlvl):
self._new_train_iteration(lvl)
self.options["theta0"] = theta0
self.options["noise0"] = noise0
self._reinterpolate(lvl)
def _new_train_init(self):
if self.name in ["MFKPLS", "MFKPLSK"]:
_pls = pls(self.options["n_comp"])
# As of sklearn 0.24.1 PLS with zeroed outputs raises an exception while sklearn 0.23 returns zeroed x_rotations
# For now the try/except below is a workaround to restore the 0.23 behaviour
try:
# PLS is done on the highest fidelity identified by the key None
self.m_pls = _pls.fit(
self.training_points[None][0][0].copy(),
self.training_points[None][0][1].copy(),
)
self.coeff_pls = self.m_pls.x_rotations_
except StopIteration:
self.coeff_pls = np.zeros(
self.training_points[None][0][0].shape[1], self.options["n_comp"]
)
xt = []
yt = []
i = 0
while self.training_points.get(i, None) is not None:
xt.append(self.training_points[i][0][0])
yt.append(self.training_points[i][0][1])
i = i + 1
xt.append(self.training_points[None][0][0])
yt.append(self.training_points[None][0][1])
self._check_list_structure(xt, yt)
self._check_param()
X = self.X
y = self.y
_, _, self.X_offset, self.y_mean, self.X_scale, self.y_std = standardization(
np.concatenate(xt, axis=0), np.concatenate(yt, axis=0)
)
nlevel = self.nlvl
# initialize lists
self.optimal_noise_all = nlevel * [0]
self.D_all = nlevel * [0]
self.F_all = nlevel * [0]
self.p_all = nlevel * [0]
self.q_all = nlevel * [0]
self.optimal_rlf_value = nlevel * [0]
self.optimal_par = nlevel * [{}]
self.optimal_theta = nlevel * [0]
self.X_norma_all = [(x - self.X_offset) / self.X_scale for x in X]
self.y_norma_all = [(f - self.y_mean) / self.y_std for f in y]
def _new_train_iteration(self, lvl):
n_samples = self.nt_all
self.options["noise0"] = np.array([self.options["noise0"][lvl]]).flatten()
self.options["theta0"] = self.options["theta0"][lvl, :]
self.X_norma = self.X_norma_all[lvl]
self.y_norma = self.y_norma_all[lvl]
if self.options["eval_noise"]:
if self.options["use_het_noise"]:
# hetGP works with unique design variables
(
self.X_norma,
self.index_unique, # do we need to store it?
self.nt_reps, # do we need to store it?
) = np.unique(
self.X_norma, return_inverse=True, return_counts=True, axis=0
)
self.nt_all[lvl] = self.X_norma.shape[0]
# computing the mean of the output per unique design variable (see Binois et al., 2018)
y_norma_unique = []
for i in range(self.nt_all[lvl]):
y_norma_unique.append(np.mean(self.y_norma[self.index_unique == i]))
y_norma_unique = np.array(y_norma_unique).reshape(-1, 1)
# pointwise sensible estimates of the noise variances (see Ankenman et al., 2010)
self.optimal_noise = self.options["noise0"] * np.ones(self.nt_all[lvl])
for i in range(self.nt_all[lvl]):
diff = self.y_norma[self.index_unique == i] - y_norma_unique[i]
if np.sum(diff**2) != 0.0:
self.optimal_noise[i] = np.std(diff, ddof=1) ** 2
self.optimal_noise = self.optimal_noise / self.nt_reps
self.optimal_noise_all[lvl] = self.optimal_noise
self.y_norma = y_norma_unique
self.X_norma_all[lvl] = self.X_norma
self.y_norma_all[lvl] = self.y_norma
else:
self.optimal_noise = self.options["noise0"] / self.y_std**2
self.optimal_noise_all[lvl] = self.optimal_noise
# Calculate matrix of distances D between samples
self.D_all[lvl] = cross_distances(self.X_norma)
# Regression matrix and parameters
self.F_all[lvl] = self._regression_types[self.options["poly"]](self.X_norma)
self.p_all[lvl] = self.F_all[lvl].shape[1]
# Concatenate the autoregressive part for levels > 0
if lvl > 0:
F_rho = self._regression_types[self.options["rho_regr"]](self.X_norma)
self.q_all[lvl] = F_rho.shape[1]
self.F_all[lvl] = np.hstack(
(
F_rho
* np.dot(
self._predict_intermediate_values(
self.X_norma, lvl, descale=False
),
np.ones((1, self.q_all[lvl])),
),
self.F_all[lvl],
)
)
else:
self.q_all[lvl] = 0
n_samples_F_i = self.F_all[lvl].shape[0]
if n_samples_F_i != n_samples[lvl]:
raise Exception(
"Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model."
)
if int(self.p_all[lvl] + self.q_all[lvl]) >= n_samples_F_i:
raise Exception(
(
"Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the regression"
" model size p+q=%d."
)
% (n_samples_F_i, self.p_all[lvl] + self.q_all[lvl])
)
# Determine Gaussian Process model parameters
self.F = self.F_all[lvl]
D, self.ij = self.D_all[lvl]
self._lvl = lvl
self.nt = self.nt_all[lvl]
self.q = self.q_all[lvl]
self.p = self.p_all[lvl]
(
self.optimal_rlf_value[lvl],
self.optimal_par[lvl],
self.optimal_theta[lvl],
) = self._optimize_hyperparam(D)
if self.options["eval_noise"] and not self.options["use_het_noise"]:
tmp_list = self.optimal_theta[lvl]
self.optimal_theta[lvl] = tmp_list[:-1]
self.optimal_noise = tmp_list[-1]
self.optimal_noise_all[lvl] = self.optimal_noise
del self.y_norma, self.D, self.optimal_noise
def _reinterpolate(self, lvl):
if self.options["eval_noise"] and self.options["optim_var"]:
X = self.X
for lvl in range(self.nlvl - 1):
self.set_training_values(
X[lvl], self._predict_intermediate_values(X[lvl], lvl + 1), name=lvl
)
self.set_training_values(
X[-1], self._predict_intermediate_values(X[-1], self.nlvl)
)
self.options["eval_noise"] = False
self._new_train()
self.options["eval_noise"] = True
def _componentwise_distance(self, dx, opt=0):
d = componentwise_distance(
dx, self.options["corr"], self.nx, power=self.options["pow_exp_power"]
)
return d
def _predict_intermediate_values(self, X, lvl, descale=True):
"""
Evaluates the model at a set of points.
Used for training the model at level lvl.
Allows to relax the order problem.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
lvl : level at which the prediction is made
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
n_eval, _ = X.shape
# if n_features_X != self.n_features:
# raise ValueError("Design must be an array of n_features columns.")
# Calculate kriging mean and variance at level 0
mu = np.zeros((n_eval, lvl))
if descale:
X = (X - self.X_offset) / self.X_scale
f = self._regression_types[self.options["poly"]](X)
f0 = self._regression_types[self.options["poly"]](X)
dx = self._differences(X, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
beta = self.optimal_par[0]["beta"]
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
gamma = self.optimal_par[0]["gamma"]
# Scaled predictor
mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()
# Calculate recursively kriging mean and variance at level i
for i in range(1, lvl):
g = self._regression_types[self.options["rho_regr"]](X)
dx = self._differences(X, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
f = np.vstack((g.T * mu[:, i - 1], f0.T))
beta = self.optimal_par[i]["beta"]
gamma = self.optimal_par[i]["gamma"]
# scaled predictor
mu[:, i] = (np.dot(f.T, beta) + np.dot(r_, gamma)).ravel()
# scaled predictor
if descale:
mu = mu * self.y_std + self.y_mean
return mu[:, -1].reshape((n_eval, 1))
def _predict_values(self, X, is_acting=None):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
return self._predict_intermediate_values(X, self.nlvl)
def _predict_variances(self, X: np.ndarray, is_acting=None) -> np.ndarray:
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
return self.predict_variances_all_levels(X)[0][:, -1]
def predict_variances_all_levels(self, X):
"""
Evaluates the model at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
# Initialization X = atleast_2d(X)
nlevel = self.nlvl
sigma2_rhos = []
n_eval, n_features_X = X.shape
# if n_features_X != self.n_features:
# raise ValueError("Design must be an array of n_features columns.")
X = (X - self.X_offset) / self.X_scale
# Calculate kriging mean and variance at level 0
mu = np.zeros((n_eval, nlevel))
f = self._regression_types[self.options["poly"]](X)
f0 = self._regression_types[self.options["poly"]](X)
dx = self._differences(X, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
# Get regression function and correlation
F = self.F_all[0]
C = self.optimal_par[0]["C"]
beta = self.optimal_par[0]["beta"]
Ft = solve_triangular(C, F, lower=True)
# yt = solve_triangular(C, self.y_norma_all[0], lower=True)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
gamma = self.optimal_par[0]["gamma"]
# Scaled predictor
mu[:, 0] = (np.dot(f, beta) + np.dot(r_, gamma)).ravel()
self.sigma2_rho = nlevel * [None]
MSE = np.zeros((n_eval, nlevel))
r_t = solve_triangular(C, r_.T, lower=True)
G = self.optimal_par[0]["G"]
u_ = solve_triangular(G.T, f.T - np.dot(Ft.T, r_t), lower=True)
sigma2 = self.optimal_par[0]["sigma2"] / self.y_std**2
MSE[:, 0] = sigma2 * (
# 1 + self.optimal_noise_all[0] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t**2).sum(axis=0)
+ (u_**2).sum(axis=0)
)
# Calculate recursively kriging variance at level i
for i in range(1, nlevel):
F = self.F_all[i]
C = self.optimal_par[i]["C"]
g = self._regression_types[self.options["rho_regr"]](X)
dx = self._differences(X, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
f = np.vstack((g.T * mu[:, i - 1], f0.T))
Ft = solve_triangular(C, F, lower=True)
yt = solve_triangular(C, self.y_norma_all[i], lower=True)
r_t = solve_triangular(C, r_.T, lower=True)
G = self.optimal_par[i]["G"]
beta = self.optimal_par[i]["beta"]
# scaled predictor
sigma2 = self.optimal_par[i]["sigma2"] / self.y_std**2
q = self.q_all[i]
u_ = solve_triangular(G.T, f - np.dot(Ft.T, r_t), lower=True)
sigma2_rho = np.dot(
g,
sigma2 * linalg.inv(np.dot(G.T, G))[:q, :q]
+ np.dot(beta[:q], beta[:q].T),
)
sigma2_rho = (sigma2_rho * g).sum(axis=1)
sigma2_rhos.append(sigma2_rho)
if self.name in ["MFKPLS", "MFKPLSK"]:
p = self.p_all[i]
Q_ = (np.dot((yt - np.dot(Ft, beta)).T, yt - np.dot(Ft, beta)))[0, 0]
MSE[:, i] = (
# sigma2_rho * MSE[:, i - 1]
+Q_ / (2 * (self.nt_all[i] - p - q))
# * (1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0))
* (1 - (r_t**2).sum(axis=0))
+ sigma2 * (u_**2).sum(axis=0)
)
else:
MSE[:, i] = sigma2 * (
# 1 + self.optimal_noise_all[i] - (r_t ** 2).sum(axis=0) + (u_ ** 2).sum(axis=0)
1
- (r_t**2).sum(axis=0)
+ (u_**2).sum(axis=0)
) # + sigma2_rho * MSE[:, i - 1]
if self.options["propagate_uncertainty"]:
MSE[:, i] = MSE[:, i] + sigma2_rho * MSE[:, i - 1]
# scaled predictor
MSE *= self.y_std**2
return MSE, sigma2_rhos
def _predict_derivatives(self, x, kx):
"""
Evaluates the derivatives at a set of points.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
Returns
-------
y : np.ndarray*self.y_std/self.X_scale[kx])
Derivative values.
"""
lvl = self.nlvl
# Initialization
n_eval, n_features_x = x.shape
x = (x - self.X_offset) / self.X_scale
dy_dx = np.zeros((n_eval, lvl))
if self.options["corr"] != "squar_exp":
raise ValueError(
"The derivative is only available for square exponential kernel"
)
if self.options["poly"] == "constant":
df = np.zeros([n_eval, 1])
elif self.options["poly"] == "linear":
df = np.zeros((n_eval, self.nx + 1))
df[:, 1:] = 1
else:
raise ValueError(
"The derivative is only available for ordinary kriging or "
+ "universal kriging using a linear trend"
)
df0 = deepcopy(df)
if self.options["rho_regr"] != "constant":
raise ValueError(
"The derivative is only available for regression rho constant"
)
# Get pairwise componentwise L1-distances to the input training set
dx = self._differences(x, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
# Compute the correlation function
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
# Beta and gamma = R^-1(y-FBeta)
beta = self.optimal_par[0]["beta"]
gamma = self.optimal_par[0]["gamma"]
df_dx = np.dot(df, beta)
d_dx = x[:, kx].reshape((n_eval, 1)) - self.X_norma_all[0][:, kx].reshape(
(1, self.nt_all[0])
)
theta = self._get_theta(0)
dy_dx[:, 0] = np.ravel((df_dx - 2 * theta[kx] * np.dot(d_dx * r_, gamma)))
# Calculate recursively derivative at level i
for i in range(1, lvl):
g = self._regression_types[self.options["rho_regr"]](x)
dx = self._differences(x, Y=self.X_norma_all[i])
d = self._componentwise_distance(dx)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[i], d
).reshape(n_eval, self.nt_all[i])
df = np.vstack((g.T * dy_dx[:, i - 1], df0.T))
beta = self.optimal_par[i]["beta"]
gamma = self.optimal_par[i]["gamma"]
df_dx = np.dot(df.T, beta)
d_dx = x[:, kx].reshape((n_eval, 1)) - self.X_norma_all[i][:, kx].reshape(
(1, self.nt_all[i])
)
theta = self._get_theta(i)
# scaled predictor
dy_dx[:, i] = np.ravel(df_dx - 2 * theta[kx] * np.dot(d_dx * r_, gamma))
return dy_dx[:, -1] * self.y_std / self.X_scale[kx]
def _get_theta(self, i):
return self.optimal_theta[i]
def _check_param(self):
"""
Overrides KrgBased implementation
This function checks some parameters of the model.
"""
if self.name in ["MFKPLS", "MFKPLSK"]:
d = self.options["n_comp"]
else:
d = self.nx
if self.options["corr"] == "act_exp":
raise ValueError("act_exp correlation function must be used with MGP")
if self.name in ["MFKPLS"]:
if self.options["corr"] not in ["squar_exp", "abs_exp"]:
raise ValueError(
"MFKPLS only works with a squared exponential or an absolute exponential kernel"
)
elif self.name in ["MFKPLSK"]:
if self.options["corr"] not in ["squar_exp"]:
raise ValueError(
"MFKPLSK only works with a squared exponential kernel (until we prove the contrary)"
)
if isinstance(self.options["theta0"], np.ndarray):
if self.options["theta0"].shape != (self.nlvl, d):
raise ValueError(
"the dimensions of theta0 %s should coincide to the number of dim %s"
% (self.options["theta0"].shape, (self.nlvl, d))
)
else:
if len(self.options["theta0"]) != d:
if len(self.options["theta0"]) == 1:
self.options["theta0"] *= np.ones((self.nlvl, d))
elif len(self.options["theta0"]) == self.nlvl:
self.options["theta0"] = np.array(self.options["theta0"]).reshape(
-1, 1
)
self.options["theta0"] *= np.ones((1, d))
else:
raise ValueError(
"the length of theta0 (%s) should be equal to the number of dim (%s) or levels of fidelity (%s)."
% (len(self.options["theta0"]), d, self.nlvl)
)
else:
self.options["theta0"] *= np.ones((self.nlvl, 1))
if len(self.options["noise0"]) != self.nlvl:
if len(self.options["noise0"]) == 1:
self.options["noise0"] = self.nlvl * [self.options["noise0"]]
else:
raise ValueError(
"the length of noise0 (%s) should be equal to the number of levels of fidelity (%s)."
% (len(self.options["noise0"]), self.nlvl)
)
for i in range(self.nlvl):
if self.options["use_het_noise"]:
if len(self.X[i]) == len(np.unique(self.X[i])):
if len(self.options["noise0"][i]) != self.nt_all[i]:
if len(self.options["noise0"][i]) == 1:
self.options["noise0"][i] *= np.ones(self.nt_all[i])
else:
raise ValueError(
"for the level of fidelity %s, the length of noise0 (%s) should be equal to the number of observations (%s)."
% (i, len(self.options["noise0"][i]), self.nt_all[i])
)
else:
if np.size(self.options["noise0"][i]) != 1:
raise ValueError(
"for the level of fidelity %s, the length of noise0 (%s) should be equal to one."
% (i, len(self.options["noise0"][i]))
)
| 27,471 | 35.776439 | 141 | py |
smt | smt-master/smt/applications/application.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>, R. Lafage
This package is distributed under New BSD license.
"""
from smt.utils.options_dictionary import OptionsDictionary
from smt.surrogate_models import LS, QP, KPLS, KRG, KPLSK, GEKPLS, MGP
try:
from smt.surrogate_models import IDW, RBF, RMTC, RMTB
compiled_available = True
except:
compiled_available = False
class SurrogateBasedApplication:
if compiled_available:
_surrogate_type = {
"KRG": KRG,
"LS": LS,
"QP": QP,
"KPLS": KPLS,
"KPLSK": KPLSK,
"GEKPLS": GEKPLS,
"RBF": RBF,
"RMTC": RMTC,
"RMTB": RMTB,
"IDW": IDW,
"MGP": MGP,
}
else:
_surrogate_type = {
"KRG": KRG,
"LS": LS,
"QP": QP,
"KPLS": KPLS,
"KPLSK": KPLSK,
"GEKPLS": GEKPLS,
"MGP": MGP,
}
def __init__(self, **kwargs):
"""
Constructor where values of options can be passed in.
For the list of options, see the documentation for the surrogate model being used.
Parameters
----------
**kwargs : named arguments
Set of options that can be optionally set; each option must have been declared.
Examples
--------
>>> from smt.applications import VFM
>>> extension = VFM(type_bridge = 'Additive', name_model_LF = QP, name_model_bridge =
LS, X_LF = xLF, y_LF = yLF, X_HF = xHF, y_HF = yHF, options_LF =
dictOptionLFModel, options_bridge = dictOptionBridgeModel)
"""
self.options = OptionsDictionary()
self._initialize()
self.options.update(kwargs)
def _initialize(self):
"""
Implemented by the application to declare options and declare what they support (optional).
Examples
--------
self.options.declare('option_name', default_value, types=(bool, int), desc='description')
"""
pass
| 2,127 | 27 | 99 | py |
smt | smt-master/smt/applications/vfm.py | """
Author: Dr. Mohamed Amine Bouhlel <mbouhlel@umich.edu>, R. Lafage
This package is distributed under New BSD license.
Variable-fidelity modeling: two types of bridge functions are available; i.e.,
additive and multiplicative
"""
import numpy as np
from smt.utils.options_dictionary import OptionsDictionary
from types import FunctionType
from smt.applications.application import SurrogateBasedApplication
class VFM(SurrogateBasedApplication):
def _initialize(self):
super(VFM, self)._initialize()
declare = self.options.declare
declare(
"name_model_LF",
types=object,
values=(
"KRG",
"LS",
"QP",
"KPLS",
"KPLSK",
"GEKPLS",
"RBF",
"RMTC",
"RMTB",
"IDW",
),
desc="Name of the low-fidelity model",
)
declare("options_LF", {}, types=dict, desc="Options for the low-fidelity model")
declare(
"name_model_bridge",
types=object,
values=(
"KRG",
"LS",
"QP",
"KPLS",
"KPLSK",
"GEKPLS",
"RBF",
"RMTC",
"RMTB",
"IDW",
),
desc="Name of the bridge model",
)
declare("options_bridge", {}, types=dict, desc="Options for the bridge model")
declare(
"type_bridge",
"Additive",
types=str,
values=("Additive", "Multiplicative"),
desc="Bridge function type",
)
declare("X_LF", None, types=np.ndarray, desc="Low-fidelity inputs")
declare("y_LF", None, types=np.ndarray, desc="Low-fidelity output")
declare("X_HF", None, types=np.ndarray, desc="High-fidelity inputs")
declare("y_HF", None, types=np.ndarray, desc="High-fidelity output")
declare("dy_LF", None, types=np.ndarray, desc="Low-fidelity derivatives")
declare("dy_HF", None, types=np.ndarray, desc="High-fidelity derivatives")
self.nx = None
self.ny = None
self.sm_HF = None
self._trained = False
def predict_values(self, x):
"""
Predict the output values at a set of points x.
Parameters
----------
x: np.ndarray[n, nx] or np.ndarray[n]
Input values for the prediction result analysis.
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
return
------
y: np.ndarray
Output values at the prediction points.
"""
if not self._trained:
self._apply()
y = self.sm_HF["predict_values"](x)
return y
def predict_derivatives(self, x, kx):
"""
Predict the dy_dx derivatives at a set of points.
Parameters
----------
x: np.ndarray[n, nx] or np.ndarray[n]
Input values for the prediction result analysis.
kx : int
The 0-based index of the input variable with respect to which derivatives are desired.
return
------
y: np.ndarray
Derivatives at the prediction points.
"""
if not self._trained:
self._apply()
if kx is None:
y = np.zeros(x.shape)
for i in range(x.shape[1]):
y[:, i] = self.sm_HF["predict_derivatives"][i](x).reshape((x.shape[0]))
else:
y = self.sm_HF["predict_derivatives"][kx](x).reshape((x.shape[0], self.ny))
return y
def _apply(self):
"""
Algorithm of the VFM method
"""
# For seek of readability
if (
self.options["X_LF"] is not None
and self.options["y_LF"] is not None
and self.options["X_HF"] is not None
and self.options["y_HF"] is not None
):
X_LF = self.options["X_LF"]
y_LF = self.options["y_LF"]
X_HF = self.options["X_HF"]
y_HF = self.options["y_HF"]
else:
raise ValueError("Check X_LF, y_LF, X_HF, and y_FH")
self.nx = X_LF.shape[1]
self.ny = y_LF.shape[1]
if self.options["dy_LF"] is not None:
dy_LF = self.options["dy_LF"]
if self.options["dy_HF"] is not None:
dy_HF = self.options["dy_HF"]
# Check parameters
self._check_param()
# Train the low fidelity model
self.LF_deriv = self.options["options_LF"]["deriv"]
del self.options["options_LF"]["deriv"]
sm_LF = self.options["name_model_LF"](**self.options["options_LF"])
sm_LF.options["print_global"] = False
sm_LF.set_training_values(X_LF, y_LF)
if self.LF_deriv:
for i in range(sm_LF.nx):
sm_LF.set_training_derivatives(X_LF, dy_LF[:, i], i)
sm_LF.train()
# compute the bridge data
if self.options["type_bridge"] == "Multiplicative":
y_bridge = y_HF / sm_LF.predict_values(X_HF)
self.B_deriv = self.options["options_bridge"]["deriv"]
del self.options["options_bridge"]["deriv"]
if self.B_deriv:
dy_bridge = np.zeros(dy_HF.shape)
for i in range(sm_LF.nx):
dy_bridge[:, i] = (
(
(
dy_HF[:, i].reshape((y_HF.shape[0], 1))
* sm_LF.predict_values(X_HF)
)
- (y_HF * sm_LF.predict_derivatives(X_HF, kx=i))
)
/ sm_LF.predict_values(X_HF) ** 2
).reshape(y_HF.shape[0])
elif self.options["type_bridge"] == "Additive":
y_bridge = y_HF - sm_LF.predict_values(X_HF)
self.B_deriv = self.options["options_bridge"]["deriv"]
del self.options["options_bridge"]["deriv"]
if self.B_deriv:
dy_bridge = np.zeros(dy_HF.shape)
for i in range(sm_LF.nx):
dy_bridge[:, i] = (
dy_HF[:, i].reshape((y_HF.shape[0], 1))
- sm_LF.predict_derivatives(X_HF, kx=i)
).reshape((y_HF.shape[0]))
else:
raise ValueError("Only Additive and Multiplicative bridges are available")
# Construct of the bridge function
sm_bridge = self.options["name_model_bridge"](**self.options["options_bridge"])
sm_bridge.set_training_values(X_HF, y_bridge)
if self.B_deriv and self.options["dy_HF"] is not None:
for i in range(sm_bridge.nx):
sm_bridge.set_training_derivatives(X_HF, dy_bridge[:, i], i)
sm_bridge.train()
# Construct the final model
sm_HF = {}
if self.options["type_bridge"] == "Multiplicative":
sm_HF["predict_values"] = lambda x: sm_bridge.predict_values(
x
) * sm_LF.predict_values(x)
if sm_bridge.supports["derivatives"] and sm_LF.supports["derivatives"]:
sm_HF["predict_derivatives"] = []
for i in range(sm_LF.nx):
sm_HF["predict_derivatives"].append(
lambda x, i=i: sm_bridge.predict_derivatives(x, i)
* sm_LF.predict_values(x)
+ sm_bridge.predict_values(x) * sm_LF.predict_derivatives(x, i)
)
else:
sm_HF["predict_values"] = lambda x: sm_bridge.predict_values(
x
) + sm_LF.predict_values(x)
if sm_bridge.supports["derivatives"] and sm_LF.supports["derivatives"]:
sm_HF["predict_derivatives"] = []
for i in range(sm_LF.nx):
sm_HF["predict_derivatives"].append(
lambda x, i=i: sm_bridge.predict_derivatives(x, i)
+ sm_LF.predict_derivatives(x, i)
)
self._trained = True
self.sm_HF = sm_HF
def _check_param(self):
"""
This function check some parameters of the model.
"""
# Check surrogates
if not callable(self.options["name_model_LF"]):
if self.options["name_model_LF"] in self._surrogate_type:
self.options["name_model_LF"] = self._surrogate_type[
self.options["name_model_LF"]
]
else:
raise ValueError(
"The LF surrogate should be one of %s, "
"%s was given."
% (self._surrogate_type.keys(), self.options["name_model_LF"])
)
if not callable(self.options["name_model_bridge"]):
if self.options["name_model_bridge"] in self._surrogate_type:
self.options["name_model_bridge"] = self._surrogate_type[
self.options["name_model_bridge"]
]
else:
raise ValueError(
"The bridge surrogate should be one of %s, "
"%s was given."
% (self._surrogate_type.keys(), self.options["name_model_bridge"])
)
# Initialize the parameter deriv
if "deriv" not in self.options["options_LF"].keys():
self.options["options_LF"]["deriv"] = False
if "deriv" not in self.options["options_bridge"].keys():
self.options["options_bridge"]["deriv"] = False
| 9,813 | 34.557971 | 97 | py |
smt | smt-master/smt/applications/moe.py | """
Author: Remi Lafage <remi.lafage@onera.fr>
This package is distributed under New BSD license.
Mixture of Experts
"""
# TODO : support for best number of clusters
# TODO : implement verbosity 'print_global'
# TODO : documentation
import numpy as np
import warnings
OLD_SKLEARN = False
try: # scikit-learn < 0.20.0
from sklearn.mixture import GMM as GaussianMixture
OLD_SKLEARN = True
except:
from sklearn.mixture import GaussianMixture
from scipy.stats import multivariate_normal
from smt.utils.options_dictionary import OptionsDictionary
from smt.applications.application import SurrogateBasedApplication
from smt.utils.misc import compute_rms_error
from smt.surrogate_models.surrogate_model import SurrogateModel
warnings.filterwarnings("ignore", category=DeprecationWarning)
MOE_EXPERT_NAMES = [
"KRG",
"KPLS",
"KPLSK",
"LS",
"QP",
"RBF",
"IDW",
"RMTB",
"RMTC",
]
class MOESurrogateModel(SurrogateModel):
"""Wrapper class exposing MOE features as a SurrogateModel subclass."""
name = "MOE"
def _initialize(self):
super(MOESurrogateModel, self)._initialize()
# Copy over options from MOE object
self.moe = moe = MOE()
for key, data in moe.options._declared_entries.items():
self.options._declared_entries[key] = data
value = moe.options[key]
if value is not None:
self.options[key] = value
def _setup(self):
for key in self.moe.options._declared_entries:
if key in self.options:
self.moe.options[key] = self.options[key]
# self.supports['derivatives'] = self.options['derivatives_support'] # Interface not yet implemented
self.supports["variances"] = self.options["variances_support"]
def train(self):
if len(self.training_points) == 0:
xt = self.options["xt"]
yt = self.options["yt"]
self.set_training_values(xt, yt)
super(MOESurrogateModel, self).train()
def _train(self):
self._setup()
for name in self.training_points:
xt, yt = self.training_points[name][0]
self.moe.set_training_values(xt, yt, name=name)
self.moe.train()
def _predict_values(self, x: np.ndarray, is_acting=None) -> np.ndarray:
return self.moe.predict_values(x)
def _predict_variances(self, x: np.ndarray, is_acting=None) -> np.ndarray:
return self.moe.predict_variances(x)
class MOE(SurrogateBasedApplication):
# Names of experts available to be part of the mixture
AVAILABLE_EXPERTS = [
name
for name in MOE_EXPERT_NAMES
if name in SurrogateBasedApplication._surrogate_type
]
def _initialize(self):
super(MOE, self)._initialize()
declare = self.options.declare
declare("xt", None, types=np.ndarray, desc="Training inputs")
declare("yt", None, types=np.ndarray, desc="Training outputs")
declare(
"ct",
None,
types=np.ndarray,
desc="Training derivative outputs used for clustering",
)
declare("xtest", None, types=np.ndarray, desc="Test inputs")
declare("ytest", None, types=np.ndarray, desc="Test outputs")
declare(
"ctest",
None,
types=np.ndarray,
desc="Derivatives test outputs used for clustering",
)
declare("n_clusters", 2, types=int, desc="Number of clusters")
declare(
"smooth_recombination",
True,
types=bool,
desc="Continuous cluster transition",
)
declare(
"heaviside_optimization",
False,
types=bool,
desc="Optimize Heaviside scaling factor when smooth recombination is used",
)
declare(
"derivatives_support",
False,
types=bool,
desc="Use only experts that support derivatives prediction",
)
declare(
"variances_support",
False,
types=bool,
desc="Use only experts that support variance prediction",
)
declare(
"allow",
[],
desc="Names of allowed experts to be possibly part of the mixture. "
"Empty list corresponds to all surrogates allowed.",
)
declare(
"deny",
[],
desc="Names of forbidden experts",
)
self.x = None
self.y = None
self.c = None
self.n_clusters = None
self.smooth_recombination = None
self.heaviside_optimization = None
self.heaviside_factor = 1.0
# dictionary {name: class} of possible experts wrt to options
self._enabled_expert_types = self._get_enabled_expert_types()
# list of experts after MOE training
self._experts = []
self.xt = None
self.yt = None
@property
def enabled_experts(self):
"""
Returns the names of enabled experts after taking into account MOE options
"""
self._enabled_expert_types = self._get_enabled_expert_types()
return list(self._enabled_expert_types.keys())
def set_training_values(self, xt, yt, name=None):
"""
Set training data (values).
Parameters
----------
xt : np.ndarray[nt, nx] or np.ndarray[nt]
The input values for the nt training points.
yt : np.ndarray[nt, ny] or np.ndarray[nt]
The output values for the nt training points.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
"""
self.xt = xt
self.yt = yt
def train(self):
"""
Supports for surrogate model API.
Build and train the mixture of experts surrogate.
"""
if self.xt is not None and self.yt is not None:
# set_training_values has been called
self.x = x = self.xt
self.y = y = self.yt
else:
self.x = x = self.options["xt"]
self.y = y = self.options["yt"]
self.c = c = self.options["ct"]
if not self.c:
self.c = c = y
self.n_clusters = self.options["n_clusters"]
self.smooth_recombination = self.options["smooth_recombination"]
self.heaviside_optimization = (
self.options["smooth_recombination"]
and self.options["heaviside_optimization"]
)
self.heaviside_factor = 1.0
self._check_inputs()
self._enabled_expert_types = self._get_enabled_expert_types()
self._experts = []
# Set test values and trained values
xtest = self.options["xtest"]
ytest = self.options["ytest"]
ctest = self.options["ctest"]
if not ctest:
ctest = ytest
values = np.c_[x, y, c]
self.test_data_present = xtest is not None and ytest is not None
if self.test_data_present:
self._test_values = np.c_[xtest, ytest, ctest]
self._training_values = values
else:
self._test_values, self._training_values = self._extract_part(values, 10)
self.ndim = nx = x.shape[1]
xt = self._training_values[:, 0:nx]
yt = self._training_values[:, nx : nx + 1]
ct = self._training_values[:, nx + 1 :]
# Clustering
self.cluster = GaussianMixture(
n_components=self.n_clusters, covariance_type="full", n_init=20
)
self.cluster.fit(np.c_[xt, ct])
if not self.cluster.converged_:
raise Exception("Clustering not converged")
# Choice of the experts and training
self._fit()
xtest = self._test_values[:, 0:nx]
ytest = self._test_values[:, nx : nx + 1]
# Heaviside factor
if self.heaviside_optimization and self.n_clusters > 1:
self.heaviside_factor = self._find_best_heaviside_factor(xtest, ytest)
print("Best Heaviside factor = {}".format(self.heaviside_factor))
self.distribs = self._create_clusters_distributions(self.heaviside_factor)
if not self.test_data_present:
# if we have used part of data to validate, fit on overall data
self._training_values = values
self._fit(new_model=False)
def predict_values(self, x):
"""
Predict the output values at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
Returns
-------
y : np.ndarray[nt, ny]
Output values at the prediction points.
"""
if self.smooth_recombination:
y = self._predict_smooth_output(x)
else:
y = self._predict_hard_output(x)
return y
def predict_variances(self, x):
"""
Predict the output variances at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
Returns
-------
y : np.ndarray[nt, ny]
Output variances at the prediction points.
"""
if not self.options["variances_support"]:
raise RuntimeError(
"Experts not selected taking variance support into account: use variances_support=True "
"when creating MOE"
)
if self.smooth_recombination:
y = self._predict_smooth_output(x, output_variances=True)
else:
y = self._predict_hard_output(x, output_variances=True)
return y
def _check_inputs(self):
"""
Check the input data given by the client is correct.
raise Value error with relevant message
"""
if self.x is None or self.y is None:
raise ValueError("check x and y values")
if self.x.shape[0] != self.y.shape[0]:
raise ValueError(
"The number of input points %d doesn t match with the number of output points %d."
% (self.x.shape[0], self.y.shape[0])
)
if self.y.shape[0] != self.c.shape[0]:
raise ValueError(
"The number of output points %d doesn t match with the number of criterion weights %d."
% (self.y.shape[0], self.c.shape[0])
)
# choice of number of cluster
max_n_clusters = int(len(self.x) / 10) + 1
if self.n_clusters > max_n_clusters:
print("Number of clusters should be inferior to {0}".format(max_n_clusters))
raise ValueError(
"The number of clusters is too high considering the number of points"
)
def _get_enabled_expert_types(self):
"""
Select relevant surrogate models (experts) regarding MOE feature options
"""
prototypes = {
name: smclass()
for name, smclass in self._surrogate_type.items()
if name in MOE_EXPERT_NAMES
}
if self.options["derivatives_support"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if proto.supports["derivatives"]
}
if self.options["variances_support"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if proto.supports["variances"]
}
if self.options["allow"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if name in self.options["allow"]
}
if self.options["deny"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if name not in self.options["deny"]
}
if not prototypes:
raise ValueError(
f"List of experts is empty: check support, allow and deny options wrt "
f"possible experts: {MOE_EXPERT_NAMES}"
)
return {name: self._surrogate_type[name] for name in prototypes}
def _fit(self, new_model=True):
"""
Find the best model for each cluster (clustering already done) and train it if new_model is True
otherwise train the points given (choice of best models by cluster already done)
Arguments
---------
- new_model : bool (optional)
Set true to search the best local model
"""
self.distribs = self._create_clusters_distributions(self.heaviside_factor)
nx = self.ndim
xt = self._training_values[:, 0:nx]
yt = self._training_values[:, nx : nx + 1]
ct = self._training_values[:, nx + 1 :]
xtest = self._test_values[:, 0:nx]
ytest = self._test_values[:, nx : nx + 1]
ctest = self._test_values[:, nx + 1 :]
# sort trained_values for each cluster
cluster_classifier = self.cluster.predict(np.c_[xt, ct])
clustered_values = self._cluster_values(np.c_[xt, yt], cluster_classifier)
# sort test_values for each cluster only used in case of new model
if new_model:
test_cluster_classifier = self.cluster.predict(np.c_[xtest, ctest])
clustered_test_values = self._cluster_values(
np.c_[xtest, ytest], test_cluster_classifier
)
# find model for each cluster
for i in range(self.n_clusters):
if new_model:
model = self._find_best_model(
clustered_values[i], clustered_test_values[i]
)
self._experts.append(model)
else:
# retrain the experts
# used when self._training_values changed with expert best models already found
training_values = np.array(clustered_values[i])
xtrain = training_values[:, 0 : self.ndim]
ytrain = training_values[:, self.ndim]
self._experts[i].set_training_values(xtrain, ytrain)
self._experts[i].train()
def _predict_hard_output(self, x, output_variances=False):
"""
This method predicts the output of a x samples for a
discontinuous recombination.
Arguments
---------
- x : array_like
x samples
Return
------
- predicted_values : array_like
predicted output
"""
predicted_values = []
probs = self._proba_cluster(x)
sort_cluster = np.apply_along_axis(np.argmax, 1, probs)
for i in range(len(sort_cluster)):
model = self._experts[sort_cluster[i]]
if output_variances:
predicted_values.append(model.predict_variances(np.atleast_2d(x[i]))[0])
else:
predicted_values.append(model.predict_values(np.atleast_2d(x[i]))[0])
predicted_values = np.array(predicted_values)
return predicted_values
def _predict_smooth_output(self, x, distribs=None, output_variances=False):
"""
This method predicts the output of x with a smooth recombination.
Arguments:
----------
- x: np.ndarray
x samples
- distribs: distribution list (optional)
array of membership distributions (use self ones if None)
Returns
-------
- predicted_values : array_like
predicted output
"""
predicted_values = []
if distribs is None:
distribs = self.distribs
sort_proba = self._proba_cluster(x, distribs)
for i in range(len(sort_proba)):
recombined_value = 0
for j in range(len(self._experts)):
if output_variances:
expert_value = (
self._experts[j].predict_variances(np.atleast_2d(x[i]))[0]
* sort_proba[i][j] ** 2
)
else:
expert_value = (
self._experts[j].predict_values(np.atleast_2d(x[i]))[0]
* sort_proba[i][j]
)
recombined_value += expert_value
predicted_values.append(recombined_value)
predicted_values = np.array(predicted_values)
return predicted_values
@staticmethod
def _extract_part(values, quantile):
"""
Divide the values list in quantile parts to return one part
of (num/quantile) values out of num values.
Arguments
----------
- values : np.ndarray[num, -1]
the values list to extract from
- quantile : int
the quantile
Returns
-------
- extracted, remaining : np.ndarray, np.ndarray
the extracted values part, the remaining values
"""
num = values.shape[0]
indices = np.arange(0, num, quantile) # uniformly distributed
mask = np.zeros(num, dtype=bool)
mask[indices] = True
return values[mask], values[~mask]
def _find_best_model(self, clustered_values, clustered_test_values):
"""
Find the best model which minimizes the errors.
Arguments :
------------
- clustered_values: array_like
training samples [[X1,X2, ..., Xn, Y], ... ]
Returns :
---------
- model : surrogate model
best trained surrogate model
"""
dim = self.ndim
scores = {}
sms = {}
training_values = np.array(clustered_values)
test_values = np.array(clustered_test_values)
for name, sm_class in self._enabled_expert_types.items():
kwargs = {}
if name in ["RMTB", "RMTC"]:
# Note: RMTS checks for xlimits,
# we take limits on all x (not just the trained_values ones) as
# the surrogate is finally re-trained on the whole x set.
xlimits = np.zeros((dim, 2))
for i in range(dim):
xlimits[i][0] = np.amin(self.x[:, i])
xlimits[i][1] = np.amax(self.x[:, i])
kwargs = {"xlimits": xlimits}
sm = sm_class(**kwargs)
sm.options["print_global"] = False
sm.set_training_values(training_values[:, 0:dim], training_values[:, dim])
sm.train()
expected = test_values[:, dim]
actual = sm.predict_values(test_values[:, 0:dim]).reshape(-1)
l_two = np.linalg.norm(expected - actual, 2)
# l_two_rel = l_two / np.linalg.norm(expected, 2)
# mse = (l_two**2) / len(expected)
# rmse = mse ** 0.5
scores[sm.name] = l_two
print(sm.name, l_two)
sms[sm.name] = sm
best_name = None
best_score = None
for name, rmse in scores.items():
if best_score is None or rmse < best_score:
best_name, best_score = name, rmse
print("Best expert = {}".format(best_name))
return sms[best_name]
def _find_best_heaviside_factor(self, x, y):
"""
Find the best heaviside factor to smooth approximated values.
Arguments
---------
- x: array_like
input training samples
- y: array_like
output training samples
Returns
-------
hfactor : float
best heaviside factor wrt given samples
"""
heaviside_factor = 1.0
if self.n_clusters > 1:
hfactors = np.linspace(0.1, 2.1, num=21)
errors = []
for hfactor in hfactors:
distribs = self._create_clusters_distributions(hfactor)
ypred = self._predict_smooth_output(x, distribs)
err_rel = np.linalg.norm(y - ypred, 2) / np.linalg.norm(y, 2)
errors.append(err_rel)
if max(errors) < 1e-6:
heaviside_factor = 1.0
else:
min_error_index = errors.index(min(errors))
heaviside_factor = hfactors[min_error_index]
return heaviside_factor
"""
Functions related to clustering
"""
def _create_clusters_distributions(self, heaviside_factor=1.0):
"""
Create an array of frozen multivariate normal distributions (distribs).
Arguments
---------
- heaviside_factor: float
Heaviside factor used to scale covariance matrices
Returns:
--------
- distribs: array_like
Array of frozen multivariate normal distributions
with clusters means and covariances
"""
distribs = []
dim = self.ndim
means = self.cluster.means_
if OLD_SKLEARN:
cov = heaviside_factor * self.cluster.covars_
else:
cov = heaviside_factor * self.cluster.covariances_
for k in range(self.n_clusters):
meansk = means[k][0:dim]
covk = cov[k][0:dim, 0:dim]
mvn = multivariate_normal(meansk, covk, allow_singular=True)
distribs.append(mvn)
return distribs
def _cluster_values(self, values, classifier):
"""
Classify values regarding the given classifier info.
Arguments
---------
- values: array_like
values to cluster
- classifier: array_like
Cluster corresponding to each point of value in the same order
Returns
-------
- clustered: array_like
Samples sort by cluster
Example:
---------
values:
[[ 1.67016597e-01 5.42927264e-01 9.25779645e+00]
[ 5.20618344e-01 9.88223010e-01 1.51596837e+02]
[ 6.09979830e-02 2.66824984e-01 1.17890707e+02]
[ 9.62783472e-01 7.36979149e-01 7.37641826e+01]
[ 3.01194132e-01 8.58084068e-02 4.88696602e+01]
[ 6.40398203e-01 6.91090937e-01 8.91963162e+01]
[ 7.90710374e-01 1.40464471e-01 1.89390766e+01]
[ 4.64498124e-01 3.61009635e-01 1.04779656e+01]]
cluster_classifier:
[1 0 0 2 1 2 1 1]
clustered:
[[array([ 0.52061834, 0.98822301, 151.59683723]),
array([ 6.09979830e-02, 2.66824984e-01, 1.17890707e+02])]
[array([ 0.1670166 , 0.54292726, 9.25779645]),
array([ 0.30119413, 0.08580841, 48.86966023]),
array([ 0.79071037, 0.14046447, 18.93907662]),
array([ 0.46449812, 0.36100964, 10.47796563])]
[array([ 0.96278347, 0.73697915, 73.76418261]),
array([ 0.6403982 , 0.69109094, 89.19631619])]]
"""
num = len(classifier)
assert values.shape[0] == num
clusters = [[] for n in range(self.n_clusters)]
for i in range(num):
clusters[classifier[i]].append(values[i])
return clusters
def _proba_cluster_one_sample(self, x, distribs):
"""
Compute membership probabilities to each cluster for one sample.
Arguments
---------
- x: array_like
a sample for which probabilities must be calculated
- distribs: multivariate_normal objects list
array of normal distributions
Returns
-------
- prob: array_like
x membership probability for each cluster
"""
weights = np.array(self.cluster.weights_)
rvs = np.array([distribs[k].pdf(x) for k in range(len(weights))])
probs = weights * rvs
rad = np.sum(probs)
if rad > 0:
probs = probs / rad
return probs
def _proba_cluster(self, x, distribs=None):
"""
Calculate membership probabilities to each cluster for each sample
Arguments
---------
- x: array_like
samples where probabilities must be calculated
- distribs : multivariate_normal objects list (optional)
array of membership distributions. If None, use self ones.
Returns
-------
- probs: array_like
x membership probabilities to each cluster.
Examples :
----------
x:
[[ 0. 0.]
[ 0. 1.]
[ 1. 0.]
[ 1. 1.]]
prob:
[[ 1.49050563e-02 9.85094944e-01]
[ 9.90381299e-01 9.61870088e-03]
[ 9.99208990e-01 7.91009759e-04]
[ 1.48949963e-03 9.98510500e-01]]
"""
if distribs is None:
distribs = self.distribs
if self.n_clusters == 1:
probs = np.ones((x.shape[0], 1))
else:
probs = np.array(
[self._proba_cluster_one_sample(x[i], distribs) for i in range(len(x))]
)
return probs
| 25,371 | 31.695876 | 109 | py |
smt | smt-master/smt/applications/mfkplsk.py | # -*- coding: utf-8 -*-
"""
Created on Fri May 04 10:26:49 2018
@author: Mostafa Meliani <melimostafa@gmail.com>
Multi-Fidelity co-Kriging: recursive formulation with autoregressive model of order 1 (AR1)
Partial Least Square decomposition added on highest fidelity level
KPLSK model combined PLS followed by a Krging model in the initial dimension
Adapted on March 2020 by Nathalie Bartoli to the new SMT version
Adapted on January 2021 by Andres Lopez-Lopera to the new SMT version
"""
from smt.utils.kriging import componentwise_distance
from smt.applications import MFKPLS
class MFKPLSK(MFKPLS):
def _initialize(self):
super(MFKPLSK, self)._initialize()
declare = self.options.declare
# Like KPLSK, MFKPLSK used only with "squar_exp" correlations
declare(
"corr",
"squar_exp",
values=("squar_exp"),
desc="Correlation function type",
types=(str),
)
self.name = "MFKPLSK"
def _componentwise_distance(self, dx, opt=0):
# Modif for KPLSK model
if opt == 0:
# Kriging step
d = componentwise_distance(
dx, self.options["corr"], self.nx, power=self.options["pow_exp_power"]
)
else:
# KPLS step
d = super(MFKPLSK, self)._componentwise_distance(dx, opt)
return d
def _new_train(self):
"""
Overrides KrgBased implementation
Trains the Multi-Fidelity model + PLS (done on the highest fidelity level) + Kriging (MFKPLSK)
"""
self._new_train_init()
self.n_comp = self.options["n_comp"]
theta0 = self.options["theta0"].copy()
noise0 = self.options["noise0"].copy()
for lvl in range(self.nlvl):
self._new_train_iteration(lvl)
self.options["n_comp"] = self.n_comp
self.options["theta0"] = theta0
self.options["noise0"] = noise0
self._reinterpolate(lvl)
def _get_theta(self, i):
return self.optimal_theta[i]
| 2,074 | 31.421875 | 103 | py |
smt | smt-master/smt/applications/__init__.py | from .vfm import VFM
from .moe import MOE, MOESurrogateModel
from .mfk import MFK, NestedLHS
from .mfkpls import MFKPLS
from .mfkplsk import MFKPLSK
from .ego import EGO, Evaluator
| 181 | 25 | 39 | py |
smt | smt-master/smt/applications/mixed_integer.py | """
Author: Remi Lafage <remi.lafage@onera.fr>
This package is distributed under New BSD license.
"""
import numpy as np
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.sampling_methods.sampling_method import SamplingMethod
from smt.utils.checks import ensure_2d_array
from smt.surrogate_models.krg_based import KrgBased, MixIntKernelType
from smt.utils.design_space import (
BaseDesignSpace,
CategoricalVariable,
ensure_design_space,
)
import warnings
class MixedIntegerSamplingMethod(SamplingMethod):
"""
Sampling method decorator that takes an SMT continuous sampling method and
cast values according x types specification to implement a sampling method
handling integer (ORD) or categorical (ENUM) features
"""
def __init__(self, sampling_method_class, design_space, **kwargs):
"""
Parameters
----------
sampling_method_class: class name
SMT sampling method class
design_space: BaseDesignSpace
design space definition
kwargs: options of the given sampling method
options used to instanciate the SMT sampling method
with the additional 'output_in_folded_space' boolean option
specifying if doe output should be in folded space (enum indexes)
or not (enum masks)
"""
warnings.warn(
"MixedIntegerSamplingMethod has been deprecated, use DesignSpace.sample_valid_x instead!",
category=DeprecationWarning,
)
self._design_space = design_space
self._unfolded_xlimits = design_space.get_unfolded_num_bounds()
self._output_in_folded_space = kwargs.get("output_in_folded_space", True)
kwargs.pop("output_in_folded_space", None)
self._sampling_method = sampling_method_class(
xlimits=self._unfolded_xlimits, **kwargs
)
super().__init__()
def _compute(self, nt):
doe = self._sampling_method(nt)
x_doe, _ = self._design_space.correct_get_acting(doe)
if self._output_in_folded_space:
x_doe, _ = self._design_space.fold_x(x_doe)
return x_doe
def __call__(self, nt):
return self._compute(nt)
def expand_lhs(self, x, nt, method="basic"):
doe = self._sampling_method(nt)
x_doe, _ = self._design_space.correct_get_acting(doe)
if self._output_in_folded_space:
x_doe, _ = self._design_space.fold_x(x_doe)
return x_doe
class MixedIntegerSurrogateModel(SurrogateModel):
"""
Surrogate model (not Kriging) decorator that takes an SMT continuous surrogate model and
cast values according x types specification to implement a surrogate model
handling integer (ORD) or categorical (ENUM) features
"""
def __init__(
self,
design_space,
surrogate,
input_in_folded_space=True,
):
"""
Parameters
----------
design_space: BaseDesignSpace
design space definition
surrogate: SMT surrogate model (not Kriging)
instance of a SMT surrogate model
input_in_folded_space: bool
whether x data are in given in folded space (enum indexes) or not (enum masks)
categorical_kernel: string
the kernel to use for categorical inputs. Only for non continuous Kriging.
"""
super().__init__()
self._surrogate = surrogate
if isinstance(self._surrogate, KrgBased):
raise ValueError(
"Using MixedIntegerSurrogateModel integer model with "
+ str(self._surrogate.name)
+ " is not supported. Please use MixedIntegerKrigingModel instead."
)
self.design_space = ensure_design_space(design_space=design_space)
self._input_in_folded_space = input_in_folded_space
self.supports = self._surrogate.supports
self.options["print_global"] = False
if "poly" in self._surrogate.options:
if self._surrogate.options["poly"] != "constant":
raise ValueError("constant regression must be used with mixed integer")
@property
def name(self):
return "MixedInteger" + self._surrogate.name
def _initialize(self):
self.supports["derivatives"] = False
def set_training_values(self, xt, yt, name=None) -> None:
xt = ensure_2d_array(xt, "xt")
# Round inputs
design_space = self.design_space
xt, _ = design_space.correct_get_acting(xt)
if self._input_in_folded_space:
xt_apply, _ = design_space.unfold_x(xt)
else:
xt_apply = xt
super().set_training_values(xt_apply, yt)
self._surrogate.set_training_values(xt_apply, yt, name)
def update_training_values(self, yt, name=None):
super().update_training_values(yt, name)
self._surrogate.update_training_values(yt, name)
def _train(self):
self._surrogate._train()
def predict_values(self, x: np.ndarray) -> np.ndarray:
x_corr, is_acting = self._get_x_for_surrogate_model(x)
return self._surrogate.predict_values(x_corr)
def predict_variances(self, x: np.ndarray) -> np.ndarray:
x_corr, is_acting = self._get_x_for_surrogate_model(x)
return self._surrogate.predict_variances(x_corr)
def _get_x_for_surrogate_model(self, x):
xp = ensure_2d_array(x, "xp")
x_corr, is_acting = self.design_space.correct_get_acting(xp)
if self._input_in_folded_space:
x_corr, is_acting = self.design_space.unfold_x(x_corr, is_acting=is_acting)
return x_corr, is_acting
def _predict_values(self, x: np.ndarray) -> np.ndarray:
pass
class MixedIntegerKrigingModel(KrgBased):
"""
Kriging model decorator that takes an SMT continuous surrogate model and
cast values according x types specification to implement a surrogate model
handling integer (ORD) or categorical (ENUM) features
"""
def __init__(
self,
surrogate,
input_in_folded_space=True,
):
"""
Parameters
----------
surrogate: SMT Kriging surrogate model
instance of a SMT Kriging surrogate model
"""
super().__init__()
self._surrogate = surrogate
if not (isinstance(self._surrogate, KrgBased)):
raise ValueError(
"Using MixedIntegerKrigingModel integer model with "
+ str(self._surrogate.name)
+ " is not supported. Please use MixedIntegerSurrogateModel instead."
)
self.options["design_space"] = self._surrogate.design_space
self._input_in_folded_space = input_in_folded_space
self.supports = self._surrogate.supports
self.options["print_global"] = False
if "poly" in self._surrogate.options:
if self._surrogate.options["poly"] != "constant":
raise ValueError("constant regression must be used with mixed integer")
design_space = self.design_space
if (
any(
isinstance(dv, CategoricalVariable)
for dv in design_space.design_variables
)
and self._surrogate.options["categorical_kernel"] is None
):
self._surrogate.options[
"categorical_kernel"
] = MixIntKernelType.HOMO_HSPHERE
warnings.warn(
"Using MixedIntegerSurrogateModel integer model with Continuous Relaxation is not supported. Switched to homoscedastic hypersphere kernel instead."
)
if self._surrogate.options["categorical_kernel"] is not None:
self._input_in_folded_space = False
@property
def name(self):
return "MixedInteger" + self._surrogate.name
def _initialize(self):
super()._initialize()
self.supports["derivatives"] = False
def set_training_values(self, xt, yt, name=None, is_acting=None):
xt = ensure_2d_array(xt, "xt")
# If the is_acting matrix is not given, assume input is not corrected (rounding, imputation, etc.) yet
design_space = self.design_space
if is_acting is None:
xt, is_acting = design_space.correct_get_acting(xt)
if self._input_in_folded_space:
xt_apply, is_acting_apply = design_space.unfold_x(xt, is_acting)
else:
xt_apply, is_acting_apply = xt, is_acting
super().set_training_values(xt_apply, yt, is_acting=is_acting_apply)
self._surrogate.set_training_values(
xt_apply, yt, name, is_acting=is_acting_apply
)
def update_training_values(self, yt, name=None):
super().update_training_values(yt, name)
self._surrogate.update_training_values(yt, name)
def _train(self):
self._surrogate._train()
def predict_values(self, x: np.ndarray, is_acting=None) -> np.ndarray:
x_corr, is_acting = self._get_x_for_surrogate_model(x)
return self._surrogate.predict_values(x_corr, is_acting=is_acting)
def predict_variances(self, x: np.ndarray, is_acting=None) -> np.ndarray:
x_corr, is_acting = self._get_x_for_surrogate_model(x)
return self._surrogate.predict_variances(x_corr, is_acting=is_acting)
def _get_x_for_surrogate_model(self, x):
xp = ensure_2d_array(x, "xp")
x_corr, is_acting = self.design_space.correct_get_acting(xp)
if self._input_in_folded_space:
x_corr, is_acting = self.design_space.unfold_x(x_corr, is_acting=is_acting)
return x_corr, is_acting
def _predict_values(self, x: np.ndarray, is_acting=None) -> np.ndarray:
pass
class MixedIntegerContext(object):
"""
Class which acts as sampling method and surrogate model factory
to handle integer and categorical variables consistently.
"""
def __init__(self, design_space, work_in_folded_space=True):
"""
Parameters
----------
design_space: BaseDesignSpace
the design space definition (includes mixed-discrete and/or hierarchical specifications)
work_in_folded_space: bool
whether x data are in given in folded space (enum indexes) or not (enum masks)
"""
self._design_space = ensure_design_space(design_space=design_space)
self._unfold_space = not work_in_folded_space
self._unfolded_xlimits = self._design_space.get_unfolded_num_bounds()
self._work_in_folded_space = work_in_folded_space
@property
def design_space(self) -> BaseDesignSpace:
return self._design_space
def build_sampling_method(self, *_, **__):
"""
Build MixedIntegerSamplingMethod from given SMT sampling method.
"""
return_folded = self._work_in_folded_space
def sample(n):
x, _ = self._design_space.sample_valid_x(n, unfolded=not return_folded)
return x
return sample
def build_kriging_model(self, surrogate):
"""
Build MixedIntegerKrigingModel from given SMT surrogate model.
"""
surrogate.options["design_space"] = self._design_space
return MixedIntegerKrigingModel(
surrogate=surrogate,
input_in_folded_space=self._work_in_folded_space,
)
def build_surrogate_model(self, surrogate):
"""
Build MixedIntegerKrigingModel from given SMT surrogate model.
"""
return MixedIntegerSurrogateModel(
self._design_space,
surrogate=surrogate,
input_in_folded_space=self._work_in_folded_space,
)
def get_unfolded_xlimits(self):
"""
Returns relaxed xlimits
Each level of an enumerate gives a new continuous dimension in [0, 1].
Each integer dimensions are relaxed continuously.
"""
return self._unfolded_xlimits
def get_unfolded_dimension(self):
"""
Returns x dimension (int) taking into account unfolded categorical features
"""
return len(self._unfolded_xlimits)
| 12,226 | 34.44058 | 163 | py |
smt | smt-master/smt/applications/mfkpls.py | # -*- coding: utf-8 -*-
"""
Created on Fri May 04 10:26:49 2018
@author: Mostafa Meliani <melimostafa@gmail.com>
Multi-Fidelity co-Kriging: recursive formulation with autoregressive model of order 1 (AR1)
Partial Least Square decomposition added on highest fidelity level
Adapted on March 2020 by Nathalie Bartoli to the new SMT version
Adapted on January 2021 by Andres Lopez-Lopera to the new SMT version
"""
import numpy as np
from sklearn.cross_decomposition import PLSRegression as pls
from sklearn.metrics.pairwise import manhattan_distances
from smt.applications import MFK
from smt.utils.kriging import componentwise_distance_PLS
class MFKPLS(MFK):
"""
Multi-Fidelity model + PLS (done on the highest fidelity level)
"""
def _initialize(self):
super()._initialize()
declare = self.options.declare
# Like KPLS, MFKPLS used only with "abs_exp" and "squar_exp" correlations
declare(
"corr",
"squar_exp",
values=("abs_exp", "squar_exp"),
desc="Correlation function type",
types=(str),
)
declare("n_comp", 1, types=int, desc="Number of principal components")
self.name = "MFKPLS"
def _differences(self, X, Y):
"""
Overrides differences function for MFK
Compute the manhattan_distances
"""
return manhattan_distances(X, Y, sum_over_features=False)
def _componentwise_distance(self, dx, opt=0):
d = componentwise_distance_PLS(
dx,
self.options["corr"],
self.options["n_comp"],
self.coeff_pls,
power=self.options["pow_exp_power"],
)
return d
def _compute_pls(self, X, y):
_pls = pls(self.options["n_comp"])
# As of sklearn 0.24.1 PLS with zeroed outputs raises an exception while sklearn 0.23 returns zeroed x_rotations
# For now the try/except below is a workaround to restore the 0.23 behaviour
try:
self.coeff_pls = _pls.fit(X.copy(), y.copy()).x_rotations_
except StopIteration:
self.coeff_pls = np.zeros((X.shape[1], self.options["n_comp"]))
return X, y
def _get_theta(self, i):
return np.sum(self.optimal_theta[i] * self.coeff_pls**2, axis=1)
| 2,313 | 32.057143 | 120 | py |
smt | smt-master/smt/applications/ego.py | """
Authors: Nathalie Bartoli, Remy Priem, Remi Lafage, Emile Roux <emile.roux@univ-smb.fr>
This package is distributed under New BSD license.
"""
import numpy as np
from types import FunctionType
from scipy.stats import norm
from scipy.optimize import minimize
from smt.surrogate_models import KPLS, KRG, KPLSK, MGP, GEKPLS
from smt.applications.application import SurrogateBasedApplication
from smt.applications.mixed_integer import MixedIntegerContext
from smt.utils.design_space import (
BaseDesignSpace,
DesignSpace,
FloatVariable,
CategoricalVariable,
)
class Evaluator(object):
"""
An interface for evaluation of a function at x points (nsamples of dimension nx).
User can derive this interface and override the run() method to implement custom multiprocessing.
"""
def run(self, fun, x):
"""
Evaluates fun at x.
Parameters
---------
fun : function to evaluate: (nsamples, nx) -> (nsample, 1)
x : np.ndarray[nsamples, nx]
nsamples points of nx dimensions.
Returns
-------
np.ndarray[nsample, 1]
fun evaluations at the nsamples points.
"""
return fun(x)
class EGO(SurrogateBasedApplication):
def _initialize(self):
super(EGO, self)._initialize()
declare = self.options.declare
declare("fun", None, types=FunctionType, desc="Function to minimize")
declare(
"criterion",
"EI",
types=str,
values=["EI", "SBO", "LCB"],
desc="criterion for next evaluation point determination: Expected Improvement, \
Surrogate-Based Optimization or Lower Confidence Bound",
)
declare("n_iter", None, types=int, desc="Number of optimizer steps")
declare(
"n_max_optim",
20,
types=int,
desc="Maximum number of internal optimizations",
)
declare("n_start", 20, types=int, desc="Number of optimization start points")
declare(
"n_parallel",
1,
types=int,
desc="Number of parallel samples to compute using qEI criterion",
)
declare(
"qEI",
"KBLB",
types=str,
values=["KB", "KBLB", "KBUB", "KBRand", "CLmin"],
desc="Approximated q-EI maximization strategy",
)
declare(
"evaluator",
default=Evaluator(),
types=Evaluator,
desc="Object used to run function fun to optimize at x points (nsamples, nxdim)",
)
declare(
"n_doe",
None,
types=int,
desc="Number of points of the initial LHS doe, only used if xdoe is not given",
)
declare("xdoe", None, types=np.ndarray, desc="Initial doe inputs")
declare("ydoe", None, types=np.ndarray, desc="Initial doe outputs")
declare("verbose", False, types=bool, desc="Print computation information")
declare(
"enable_tunneling",
False,
types=bool,
desc="Enable the penalization of points that have been already evaluated in EI criterion",
)
declare(
"surrogate",
KRG(print_global=False),
types=(KRG, KPLS, KPLSK, GEKPLS, MGP),
desc="SMT kriging-based surrogate model used internaly",
)
self.options.declare(
"random_state",
types=(type(None), int, np.random.RandomState),
desc="Numpy RandomState object or seed number which controls random draws",
)
def optimize(self, fun):
"""
Optimizes fun
Parameters
----------
fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]
Returns
-------
[nx, 1]: x optimum
[1, 1]: y optimum
int: index of optimum in data arrays
[ndoe + n_iter, nx]: coord-x data
[ndoe + n_iter, 1]: coord-y data
"""
x_data, y_data = self._setup_optimizer(fun)
n_iter = self.options["n_iter"]
n_parallel = self.options["n_parallel"]
for k in range(n_iter):
# Virtual enrichement loop
for p in range(n_parallel):
# find next best x-coord point to evaluate
x_et_k, success = self._find_best_point(
x_data, y_data, self.options["enable_tunneling"]
)
if not success:
self.log(
"Internal optimization failed at EGO iter = {}.{}".format(k, p)
)
break
elif success:
self.log(
"Internal optimization succeeded at EGO iter = {}.{}".format(
k, p
)
)
# Set temporaly the y-coord point based on the kriging prediction
x_et_k = np.atleast_2d(x_et_k)
if self.mixint:
x_et_k, _ = self.design_space.correct_get_acting(x_et_k)
y_et_k = self._get_virtual_point(x_et_k, y_data)
# Update y_data with predicted value
y_data = y_data.reshape(y_data.shape[0], self.gpr.ny)
y_data = np.vstack((y_data, y_et_k))
x_data = np.atleast_2d(np.append(x_data, x_et_k, axis=0))
# Compute the real values of y_data
x_to_compute = np.atleast_2d(x_data[-n_parallel:])
y = self._evaluator.run(fun, x_to_compute)
y_data[-n_parallel:] = y
# Find the optimal point
ind_best = np.argmin(y_data if y_data.ndim == 1 else y_data[:, 0])
x_opt = x_data[ind_best]
y_opt = y_data[ind_best]
return x_opt, y_opt, ind_best, x_data, y_data
def log(self, msg):
if self.options["verbose"]:
print(msg)
def EI(self, points, enable_tunneling=False, x_data=None):
"""Expected improvement"""
y_data = np.atleast_2d(self.gpr.training_points[None][0][1])
f_min = y_data[np.argmin(y_data[:, 0])]
pred = self.gpr.predict_values(points)
sig = np.sqrt(self.gpr.predict_variances(points))
args0 = (f_min - pred) / sig
args1 = (f_min - pred) * norm.cdf(args0)
args2 = sig * norm.pdf(args0)
if sig.size == 1 and sig == 0.0: # can be use only if one point is computed
return 0.0
ei = args1 + args2
# penalize the points already evaluated with tunneling
if enable_tunneling:
for i in range(len(points)):
p = np.atleast_2d(points[i])
EIp = self.EI(p, enable_tunneling=False)
for x in x_data:
x = np.atleast_2d(x)
# if np.abs(p-x)<1:
# ei[i]=ei[i]*np.reciprocal(1+100*np.exp(-np.reciprocal(1-np.square(p-x))))
pena = (EIp - self.EI(x, enable_tunneling=False)) / (
1e-9 + np.power(np.linalg.norm(p - x), 4)
)
if pena > 0:
ei[i] = ei[i] - pena
ei[i] = max(ei[i], 0)
return ei
def SBO(self, point):
"""Surrogate based optimization: min the surrogate model by suing the mean mu"""
res = self.gpr.predict_values(point)
return res
def LCB(self, point):
"""Lower confidence bound optimization: minimize by using mu - 3*sigma"""
pred = self.gpr.predict_values(point)
var = self.gpr.predict_variances(point)
res = pred - 3.0 * np.sqrt(var)
return res
def _setup_optimizer(self, fun):
"""
Instanciate internal surrogate used for optimization
and setup function evaluator wrt options
Parameters
----------
fun: function to optimize: ndarray[n, nx] or ndarray[n] -> ndarray[n, 1]
Returns
-------
ndarray: initial coord-x doe
ndarray: initial coord-y doe = fun(xdoe)
"""
# Set the model
self.gpr = self.options["surrogate"]
self.design_space: BaseDesignSpace = self.gpr.design_space
if isinstance(self.design_space, DesignSpace):
self.design_space.seed = self.options["random_state"]
# Handle mixed integer optimization
is_continuous = self.design_space.is_all_cont
if not is_continuous:
self.categorical_kernel = self.gpr.options["categorical_kernel"]
self.mixint = MixedIntegerContext(
self.design_space,
work_in_folded_space=True,
)
underlying_gpr = self.gpr
self.gpr = self.mixint.build_kriging_model(self.gpr)
self.categorical_kernel = underlying_gpr.options["categorical_kernel"]
self.mixint = MixedIntegerContext(
self.design_space,
work_in_folded_space=True,
)
self._sampling = self.mixint.build_sampling_method()
else:
self.mixint = None
self._sampling = lambda n: self.design_space.sample_valid_x(n)[0]
self.categorical_kernel = None
# Build DOE
self._evaluator = self.options["evaluator"]
xdoe = self.options["xdoe"]
if xdoe is None:
self.log("Build initial DOE with LHS")
n_doe = self.options["n_doe"]
x_doe = self._sampling(n_doe)
else:
self.log("Initial DOE given")
x_doe = np.atleast_2d(xdoe)
ydoe = self.options["ydoe"]
if ydoe is None:
y_doe = self._evaluator.run(fun, x_doe)
else: # to save time if y_doe is already given to EGO
y_doe = ydoe
return x_doe, y_doe
def _train_gpr(self, x_data, y_data):
self.gpr.set_training_values(x_data, y_data)
if self.gpr.supports["training_derivatives"]:
for kx in range(self.gpr.nx):
self.gpr.set_training_derivatives(
x_data, y_data[:, 1 + kx].reshape((y_data.shape[0], 1)), kx
)
self.gpr.train()
def _find_best_point(self, x_data=None, y_data=None, enable_tunneling=False):
"""
Function that analyse a set of x_data and y_data and give back the
more interesting point to evaluates according to the selected criterion
Parameters
----------
x_data: ndarray(n_points, nx)
y_data: ndarray(n_points, 1)
Returns
-------
ndarray(nx, 1): the next best point to evaluate
boolean: success flag
"""
self._train_gpr(x_data, y_data)
criterion = self.options["criterion"]
n_start = self.options["n_start"]
n_max_optim = self.options["n_max_optim"]
method = "SLSQP"
options = {"maxiter": 200}
if self.mixint:
bounds = self.design_space.get_num_bounds()
cons = []
for j in range(len(bounds)):
lower, upper = bounds[j]
l = {"type": "ineq", "fun": lambda x, lb=lower, i=j: x[i] - lb}
u = {"type": "ineq", "fun": lambda x, ub=upper, i=j: ub - x[i]}
cons.append(l)
cons.append(u)
bounds = None
options = {"maxiter": 300}
else:
bounds = self.design_space.get_num_bounds()
cons = ()
if criterion == "EI":
self.obj_k = lambda x: -self.EI(np.atleast_2d(x), enable_tunneling, x_data)
elif criterion == "SBO":
self.obj_k = lambda x: self.SBO(np.atleast_2d(x))
elif criterion == "LCB":
self.obj_k = lambda x: self.LCB(np.atleast_2d(x))
success = False
n_optim = 1 # in order to have some success optimizations with SLSQP
while not success and n_optim <= n_max_optim:
opt_all = []
x_start = self._sampling(n_start)
for ii in range(n_start):
try:
opt_all.append(
minimize(
lambda x: float(np.array(self.obj_k(x)).flat[0]),
x_start[ii, :],
method=method,
bounds=bounds,
constraints=cons,
options=options,
)
)
except ValueError: # in case "x0 violates bound constraints" error
print("warning: `x0` violates bound constraints")
print("x0={}".format(x_start[ii, :]))
print("bounds={}".format(bounds))
opt_all.append({"success": False})
opt_all = np.asarray(opt_all)
for opt_i in opt_all:
if (
opt_i["message"]
== "Maximum number of function evaluations has been exceeded."
):
opt_i["success"] = True
opt_success = opt_all[[opt_i["success"] for opt_i in opt_all]]
obj_success = np.array([opt_i["fun"] for opt_i in opt_success])
success = obj_success.size != 0
if not success:
self.log("New start point for the internal optimization")
n_optim += 1
if n_optim >= n_max_optim:
# self.log("Internal optimization failed at EGO iter = {}".format(k))
return np.atleast_2d(0), False
ind_min = np.argmin(obj_success)
opt = opt_success[ind_min]
x_et_k = np.atleast_2d(opt["x"])
return x_et_k, True
def _get_virtual_point(self, x, y_data):
"""
Depending on the qEI attribute return a predicted value at given point x
Parameters
----------
x: ndarray(1, 1) the x-coord point where to forecast the y-coord virtual point
y_data: current y evaluation list only used when qEI is CLmin
Returns
-------
ndarray(1, 1): the so-called virtual y-coord point
"""
qEI = self.options["qEI"]
if qEI == "CLmin":
return np.min(y_data)
if qEI == "KB":
return self.gpr.predict_values(x)
if qEI == "KBUB":
conf = 3.0
if qEI == "KBLB":
conf = -3.0
if qEI == "KBRand":
conf = np.random.randn()
pred = self.gpr.predict_values(x)
var = self.gpr.predict_variances(x)
return pred + conf * np.sqrt(var)
| 14,811 | 33.446512 | 102 | py |
smt | smt-master/smt/applications/tests/test_mfkpls.py | # -*- coding: utf-8 -*-
"""
Created on Mon May 07 14:20:11 2018
@author: m.meliani
Adapted to new SMT version in march 2020 by Nathalie Bartoli
"""
import matplotlib
matplotlib.use("Agg")
import unittest
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct
from smt.sampling_methods import LHS, FullFactorial
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import LS, QP, KPLS, KRG, KPLSK, GEKPLS, GENN
from smt.applications.mfk import MFK, NestedLHS
from smt.applications.mfkpls import MFKPLS
from copy import deepcopy
print_output = False
class TestMFKPLS(SMTestCase):
def setUp(self):
self.nt = 100
self.ne = 100
self.ndim = 3
self.n_comp = 2
def test_mfkpls(self):
self.problems = ["exp", "tanh", "cos"]
for fname in self.problems:
prob = TensorProduct(ndim=self.ndim, func=fname)
sampling = FullFactorial(xlimits=prob.xlimits, clip=True)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
for i in range(self.ndim):
yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)
y_lf = 2 * prob(xt) + 2
x_lf = deepcopy(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
# Modif MM
sm = MFKPLS()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
# to test some options
sm.options["eval_noise"] = True
sm.options["optim_var"] = True
# modif MM
sm.options["n_comp"] = self.n_comp
sm.options["theta0"] = [1e-2] * self.n_comp
sm.set_training_values(xt, yt[:, 0])
sm.set_training_values(x_lf, y_lf[:, 0], name=0)
sm.set_training_values(xt, yt[:, 0])
sm.set_training_values(x_lf, y_lf[:, 0], name=0)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
self.assert_error(t_error, 0.0, 1.5)
self.assert_error(e_error, 0.0, 1.5)
def test_mfkpls_derivs(self):
if self.ndim < 2:
print("To try test_mfkpls_derivs the dimension must be greater than 1")
prob = Sphere(ndim=self.ndim)
sampling = LHS(xlimits=prob.xlimits)
# Modif MM
nt = 100
np.random.seed(0)
xt = sampling(nt)
yt = prob(xt)
dyt = {}
for kx in range(prob.xlimits.shape[0]):
dyt[kx] = prob(xt, kx=kx)
y_lf = 2 * prob(xt) + 2
x_lf = deepcopy(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
dye = {}
for kx in range(prob.xlimits.shape[0]):
dye[kx] = prob(xe, kx=kx)
print("n_comp mfkpls_deriv", self.n_comp)
# modif MM
sm = MFKPLS()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
# modif MM
sm.options["n_comp"] = self.n_comp
sm.options["theta0"] = [1e-2] * self.n_comp
sm.set_training_values(xt, yt)
sm.set_training_values(x_lf, y_lf, name=0)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
e_error0 = compute_rms_error(sm, xe, dye[0], 0)
e_error1 = compute_rms_error(sm, xe, dye[1], 1)
if print_output:
print(
"%8s %6s %18.9e %18.9e %18.9e %18.9e"
% (pname[:6], sname, t_error, e_error, e_error0, e_error1)
)
self.assert_error(e_error0, 0.0, 1e-1)
self.assert_error(e_error1, 0.0, 1e-1)
@staticmethod
def run_mfkpls_example():
import numpy as np
import matplotlib.pyplot as plt
from smt.applications.mfk import MFK, NestedLHS
from smt.applications.mfkpls import MFKPLS
# low fidelity model
def lf_function(x):
import numpy as np
return (
0.5 * ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
+ (x - 0.5) * 10.0
- 5
)
# high fidelity model
def hf_function(x):
import numpy as np
return ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
# Problem set up
xlimits = np.array([[0.0, 1.0]])
xdoes = NestedLHS(nlevel=2, xlimits=xlimits, random_state=0)
xt_c, xt_e = xdoes(7)
# Evaluate the HF and LF functions
yt_e = hf_function(xt_e)
yt_c = lf_function(xt_c)
# choice of number of PLS components
ncomp = 1
sm = MFKPLS(n_comp=ncomp, theta0=ncomp * [1.0])
# low-fidelity dataset names being integers from 0 to level-1
sm.set_training_values(xt_c, yt_c, name=0)
# high-fidelity dataset without name
sm.set_training_values(xt_e, yt_e)
# train the model
sm.train()
x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)
# query the outputs
y = sm.predict_values(x)
mse = sm.predict_variances(x)
derivs = sm.predict_derivatives(x, kx=0)
plt.figure()
plt.plot(x, hf_function(x), label="reference")
plt.plot(x, y, linestyle="-.", label="mean_gp")
plt.scatter(xt_e, yt_e, marker="o", color="k", label="HF doe")
plt.scatter(xt_c, yt_c, marker="*", color="g", label="LF doe")
plt.legend(loc=0)
plt.ylim(-10, 17)
plt.xlim(-0.1, 1.1)
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.show()
if __name__ == "__main__":
unittest.main()
| 6,025 | 27.027907 | 83 | py |
smt | smt-master/smt/applications/tests/test_mfkplsk.py | # -*- coding: utf-8 -*-
"""
Created on Mon May 07 14:20:11 2018
@author: m.meliani
Adapted to new SMT version in march 2020 by Nathalie Bartoli
"""
import matplotlib
matplotlib.use("Agg")
import unittest
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct
from smt.sampling_methods import LHS, FullFactorial
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import LS, QP, KPLS, KRG, KPLSK, GEKPLS, GENN
from smt.applications.mfk import MFK, NestedLHS
from smt.applications.mfkplsk import MFKPLSK
from copy import deepcopy
print_output = False
class TestMFKPLSK(SMTestCase):
def setUp(self):
self.nt = 100
self.ne = 100
self.ndim = 3
self.n_comp = 2
def test_mfkplsk(self):
self.problems = ["exp", "tanh", "cos"]
for fname in self.problems:
prob = TensorProduct(ndim=self.ndim, func=fname)
sampling = FullFactorial(xlimits=prob.xlimits, clip=True)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
for i in range(self.ndim):
yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)
y_lf = 2 * prob(xt) + 2
x_lf = deepcopy(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
# Modif MM
sm = MFKPLSK()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
# to test some options
sm.options["eval_noise"] = True
sm.options["optim_var"] = True
# modif MM
sm.options["n_comp"] = self.n_comp
sm.options["theta0"] = [1e-2] * self.n_comp
sm.set_training_values(xt, yt[:, 0])
sm.set_training_values(x_lf, y_lf[:, 0], name=0)
sm.set_training_values(xt, yt[:, 0])
sm.set_training_values(x_lf, y_lf[:, 0], name=0)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
self.assert_error(t_error, 0.0, 1.5)
self.assert_error(e_error, 0.0, 1.5)
def test_mfkplsk_derivs(self):
if self.ndim < 2:
print("To try test_mfkplsk_derivs the dimension must be greater than 1")
prob = Sphere(ndim=self.ndim)
sampling = LHS(xlimits=prob.xlimits)
# Modif MM
nt = 100
np.random.seed(0)
xt = sampling(nt)
yt = prob(xt)
dyt = {}
for kx in range(prob.xlimits.shape[0]):
dyt[kx] = prob(xt, kx=kx)
y_lf = 2 * prob(xt) + 2
x_lf = deepcopy(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
dye = {}
for kx in range(prob.xlimits.shape[0]):
dye[kx] = prob(xe, kx=kx)
# modif MM
sm = MFKPLSK()
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
# to test some options
sm.options["eval_noise"] = False
# modif MM
sm.options["n_comp"] = self.n_comp
sm.options["theta0"] = [1e-2] * self.n_comp
sm.set_training_values(xt, yt)
sm.set_training_values(x_lf, y_lf, name=0)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
e_error0 = compute_rms_error(sm, xe, dye[0], 0)
e_error1 = compute_rms_error(sm, xe, dye[1], 1)
if print_output:
print(
"%8s %6s %18.9e %18.9e %18.9e %18.9e"
% (pname[:6], sname, t_error, e_error, e_error0, e_error1)
)
self.assert_error(e_error0, 0.0, 1e-1)
self.assert_error(e_error1, 0.0, 1e-1)
@staticmethod
def run_mfkplsk_example():
import numpy as np
import matplotlib.pyplot as plt
from smt.applications.mfk import MFK, NestedLHS
from smt.applications.mfkplsk import MFKPLSK
# low fidelity modelk
def lf_function(x):
import numpy as np
return (
0.5 * ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
+ (x - 0.5) * 10.0
- 5
)
# high fidelity model
def hf_function(x):
import numpy as np
return ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
# Problem set up
xlimits = np.array([[0.0, 1.0]])
xdoes = NestedLHS(nlevel=2, xlimits=xlimits, random_state=0)
xt_c, xt_e = xdoes(7)
# Evaluate the HF and LF functions
yt_e = hf_function(xt_e)
yt_c = lf_function(xt_c)
# choice of number of PLS components
ncomp = 1
sm = MFKPLSK(n_comp=ncomp, theta0=ncomp * [1.0])
# low-fidelity dataset names being integers from 0 to level-1
sm.set_training_values(xt_c, yt_c, name=0)
# high-fidelity dataset without name
sm.set_training_values(xt_e, yt_e)
# train the model
sm.train()
x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)
# query the outputs
y = sm.predict_values(x)
mse = sm.predict_variances(x)
derivs = sm.predict_derivatives(x, kx=0)
plt.figure()
plt.plot(x, hf_function(x), label="reference")
plt.plot(x, y, linestyle="-.", label="mean_gp")
plt.scatter(xt_e, yt_e, marker="o", color="k", label="HF doe")
plt.scatter(xt_c, yt_c, marker="*", color="g", label="LF doe")
plt.legend(loc=0)
plt.ylim(-10, 17)
plt.xlim(-0.1, 1.1)
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.show()
if __name__ == "__main__":
unittest.main()
| 6,062 | 26.811927 | 84 | py |
smt | smt-master/smt/applications/tests/test_ego.py | # coding: utf-8
"""
Author: Remi Lafage <remi.lafage@onera.fr> and Nathalie Bartoli
This package is distributed under New BSD license.
"""
import warnings
warnings.filterwarnings("ignore")
import os
import unittest
import numpy as np
from sys import argv
import matplotlib
matplotlib.use("Agg")
from smt.applications import EGO
from smt.applications.ego import Evaluator
from smt.utils.sm_test_case import SMTestCase
from smt.problems import Branin, Rosenbrock
from smt.sampling_methods import FullFactorial
from multiprocessing import Pool
from smt.sampling_methods import LHS
from smt.surrogate_models import (
KRG,
GEKPLS,
KPLS,
MixIntKernelType,
DesignSpace,
OrdinalVariable,
FloatVariable,
CategoricalVariable,
IntegerVariable,
)
from smt.applications.mixed_integer import (
MixedIntegerContext,
MixedIntegerSamplingMethod,
)
# This implementation only works with Python > 3.3
class ParallelEvaluator(Evaluator):
def run(self, fun, x):
with Pool(3) as p:
return np.array(
[y[0] for y in p.map(fun, [np.atleast_2d(x[i]) for i in range(len(x))])]
)
class TestEGO(SMTestCase):
plot = None
@staticmethod
def function_test_1d(x):
# function xsinx
x = np.reshape(x, (-1,))
y = np.zeros(x.shape)
y = (x - 3.5) * np.sin((x - 3.5) / (np.pi))
return y.reshape((-1, 1))
def test_evaluator(self):
x = [[1], [2], [3]]
expected = TestEGO.function_test_1d(x)
actual = ParallelEvaluator().run(TestEGO.function_test_1d, x)
for i in range(len(x)):
self.assertAlmostEqual(expected[i, 0], actual[i, 0])
def test_function_test_1d(self):
n_iter = 15
xlimits = np.array([[0.0, 25.0]])
criterion = "EI"
design_space = DesignSpace(xlimits)
ego = EGO(
n_iter=n_iter,
criterion=criterion,
n_doe=3,
surrogate=KRG(design_space=design_space, print_global=False),
random_state=42,
)
x_opt, y_opt, _, _, _ = ego.optimize(fun=TestEGO.function_test_1d)
self.assertAlmostEqual(18.9, float(x_opt), delta=1)
self.assertAlmostEqual(-15.1, float(y_opt), delta=1)
def test_function_test_1d_parallel(self):
n_iter = 3
xlimits = np.array([[0.0, 25.0]])
design_space = DesignSpace(xlimits)
criterion = "EI"
n_parallel = 3
ego = EGO(
n_iter=n_iter,
criterion=criterion,
n_doe=3,
surrogate=KRG(design_space=design_space, print_global=False),
n_parallel=n_parallel,
evaluator=ParallelEvaluator(),
random_state=42,
)
x_opt, y_opt, _, _, _ = ego.optimize(fun=TestEGO.function_test_1d)
self.assertAlmostEqual(18.9, float(x_opt), delta=1)
self.assertAlmostEqual(-15.1, float(y_opt), delta=1)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_rosenbrock_2D(self):
n_iter = 50
fun = Rosenbrock(ndim=2)
xlimits = fun.xlimits
criterion = "LCB" #'EI' or 'SBO' or 'LCB'
random_state = 42
design_space = DesignSpace(xlimits, seed=random_state)
xdoe = FullFactorial(xlimits=xlimits)(10)
ego = EGO(
n_start=30,
xdoe=xdoe,
n_iter=n_iter,
criterion=criterion,
surrogate=KRG(design_space=design_space, n_start=25, print_global=False),
random_state=random_state,
)
x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
self.assertTrue(np.allclose([[1, 1]], x_opt, rtol=0.55))
self.assertAlmostEqual(0.0, float(y_opt), delta=1)
def test_rosenbrock_2D_SBO(self):
n_iter = 10
fun = Rosenbrock(ndim=2)
xlimits = fun.xlimits
criterion = "SBO" #'EI' or 'SBO' or 'LCB'
design_space = DesignSpace(xlimits)
xdoe = FullFactorial(xlimits=xlimits)(50)
ego = EGO(
xdoe=xdoe,
n_iter=n_iter,
criterion=criterion,
surrogate=KRG(design_space=design_space, print_global=False),
random_state=42,
)
x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
self.assertTrue(np.allclose([[1, 1]], x_opt, atol=1))
self.assertAlmostEqual(0.0, float(y_opt), delta=1)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_rosenbrock_2D_parallel(self):
n_iter = 20
n_parallel = 5
fun = Rosenbrock(ndim=2)
xlimits = fun.xlimits
criterion = "LCB" #'EI' or 'SBO' or 'LCB'
random_state = 42
design_space = DesignSpace(xlimits, seed=random_state)
xdoe = FullFactorial(xlimits=xlimits)(10)
qEI = "KB"
ego = EGO(
xdoe=xdoe,
n_iter=n_iter,
criterion=criterion,
surrogate=KRG(design_space=design_space, print_global=False),
n_parallel=n_parallel,
qEI=qEI,
evaluator=ParallelEvaluator(),
random_state=random_state,
)
x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
print("Rosenbrock: ", x_opt)
self.assertTrue(np.allclose([[1, 1]], x_opt, rtol=0.5))
self.assertAlmostEqual(0.0, float(y_opt), delta=1)
def test_branin_2D(self):
n_iter = 20
fun = Branin(ndim=2)
criterion = "LCB" #'EI' or 'SBO' or 'LCB'
design_space = fun.design_space
ego = EGO(
surrogate=KRG(design_space=design_space, print_global=False),
n_iter=n_iter,
criterion=criterion,
n_doe=10,
random_state=42,
)
x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
# 3 optimal points possible: [-pi, 12.275], [pi, 2.275], [9.42478, 2.475]
self.assertTrue(
np.allclose([[-3.14, 12.275]], x_opt, rtol=0.25)
or np.allclose([[3.14, 2.275]], x_opt, rtol=0.25)
or np.allclose([[9.42, 2.475]], x_opt, rtol=0.25)
)
self.assertAlmostEqual(0.39, float(y_opt), delta=0.8)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_branin_2D_parallel(self):
n_iter = 10
fun = Branin(ndim=2)
n_parallel = 5
xlimits = fun.xlimits
criterion = "EI" #'EI' or 'SBO' or 'LCB'
design_space = DesignSpace(xlimits)
xdoe = FullFactorial(xlimits=xlimits)(10)
ego = EGO(
xdoe=xdoe,
n_iter=n_iter,
criterion=criterion,
surrogate=KRG(design_space=design_space, print_global=False),
n_parallel=n_parallel,
random_state=42,
)
x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
# 3 optimal points possible: [-pi, 12.275], [pi, 2.275], [9.42478, 2.475]
self.assertTrue(
np.allclose([[-3.14, 12.275]], x_opt, rtol=0.5)
or np.allclose([[3.14, 2.275]], x_opt, rtol=0.5)
or np.allclose([[9.42, 2.475]], x_opt, rtol=0.5)
)
print("Branin=", x_opt)
self.assertAlmostEqual(0.39, float(y_opt), delta=1)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_branin_2D_mixed_parallel(self):
n_parallel = 5
n_iter = 20
fun = Branin(ndim=2)
xlimits = fun.xlimits
criterion = "EI" #'EI' or 'SBO' or 'LCB'
qEI = "CLmin"
random_state = 42
design_space = DesignSpace(
[
IntegerVariable(*xlimits[0]),
FloatVariable(*xlimits[1]),
],
seed=random_state,
)
sm = KRG(design_space=design_space, print_global=False, n_start=25)
mixint = MixedIntegerContext(design_space)
sampling = mixint.build_sampling_method(FullFactorial)
xdoe = sampling(10)
ego = EGO(
xdoe=xdoe,
n_iter=n_iter,
criterion=criterion,
n_parallel=n_parallel,
qEI=qEI,
n_start=30,
evaluator=ParallelEvaluator(),
surrogate=sm,
random_state=random_state,
)
x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
# 3 optimal points possible: [-pi, 12.275], [pi, 2.275], [9.42478, 2.475]
self.assertTrue(
np.allclose([[-3, 12.275]], x_opt, rtol=0.2)
or np.allclose([[3, 2.275]], x_opt, rtol=0.2)
or np.allclose([[9, 2.475]], x_opt, rtol=0.2)
)
self.assertAlmostEqual(0.494, float(y_opt), delta=1)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_branin_2D_mixed(self):
n_iter = 20
fun = Branin(ndim=2)
xlimits = fun.xlimits
random_state = 42
design_space = DesignSpace(
[
IntegerVariable(*xlimits[0]),
FloatVariable(*xlimits[1]),
],
seed=random_state,
)
criterion = "EI" #'EI' or 'SBO' or 'LCB'
sm = KRG(design_space=design_space, print_global=False)
sampling = MixedIntegerSamplingMethod(FullFactorial, design_space)
xdoe = sampling(10)
ego = EGO(
xdoe=xdoe,
n_iter=n_iter,
criterion=criterion,
surrogate=sm,
enable_tunneling=False,
random_state=random_state,
)
x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
# 3 optimal points possible: [-pi, 12.275], [pi, 2.275], [9.42478, 2.475]
self.assertTrue(
np.allclose([[-3, 12.275]], x_opt, rtol=0.2)
or np.allclose([[3, 2.275]], x_opt, rtol=0.2)
or np.allclose([[9, 2.475]], x_opt, rtol=0.2)
)
self.assertAlmostEqual(0.494, float(y_opt), delta=1)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_branin_2D_mixed_tunnel(self):
n_iter = 20
fun = Branin(ndim=2)
xlimits = fun.xlimits
random_state = 42
design_space = DesignSpace(
[
IntegerVariable(*xlimits[0]),
FloatVariable(*xlimits[1]),
],
seed=random_state,
)
criterion = "EI" #'EI' or 'SBO' or 'LCB'
sm = KRG(design_space=design_space, print_global=False)
sampling = MixedIntegerSamplingMethod(FullFactorial, design_space)
xdoe = sampling(30)
ego = EGO(
xdoe=xdoe,
n_iter=n_iter,
criterion=criterion,
surrogate=sm,
enable_tunneling=True,
random_state=random_state,
)
x_opt, y_opt, _, _, _ = ego.optimize(fun=fun)
# 3 optimal points possible: [-pi, 12.275], [pi, 2.275], [9.42478, 2.475]
self.assertTrue(
np.allclose([[-3, 12.275]], x_opt, rtol=2)
or np.allclose([[3, 2.275]], x_opt, rtol=2)
or np.allclose([[9, 2.475]], x_opt, rtol=2)
)
self.assertAlmostEqual(0.494, float(y_opt), delta=2)
@staticmethod
def function_test_mixed_integer(X):
import numpy as np
# float
x1 = X[:, 0]
# XType.ENUM 1
c1 = X[:, 1]
x2 = c1 == 0
x3 = c1 == 1
x4 = c1 == 2
# XType.ENUM 2
c2 = X[:, 2]
x5 = c2 == 0
x6 = c2 == 1
# int
i = X[:, 3]
y = (
(x2 + 2 * x3 + 3 * x4) * x5 * x1
+ (x2 + 2 * x3 + 3 * x4) * x6 * 0.95 * x1
+ i
)
return y
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_ego_mixed_integer(self):
n_iter = 15
n_doe = 5
random_state = 42
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red", "green"]),
CategoricalVariable(["large", "small"]),
OrdinalVariable([0, 2, 3]),
],
seed=random_state,
)
xdoe, _ = design_space.sample_valid_x(n_doe)
criterion = "EI" #'EI' or 'SBO' or 'LCB'
ego = EGO(
n_iter=n_iter,
criterion=criterion,
xdoe=xdoe,
surrogate=KRG(design_space=design_space, print_global=False),
enable_tunneling=False,
random_state=random_state,
)
_, y_opt, _, _, _ = ego.optimize(fun=TestEGO.function_test_mixed_integer)
self.assertAlmostEqual(-15, float(y_opt), delta=5)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_ego_mixed_integer_gower_distance(self):
n_iter = 15
n_doe = 5
random_state = 42
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red", "green"]),
CategoricalVariable(["large", "small"]),
IntegerVariable(0, 2),
],
seed=random_state,
)
xdoe, _ = design_space.sample_valid_x(n_doe)
criterion = "EI" #'EI' or 'SBO' or 'LCB'
ego = EGO(
n_start=30,
n_iter=n_iter,
criterion=criterion,
xdoe=xdoe,
surrogate=KRG(
n_start=25,
design_space=design_space,
categorical_kernel=MixIntKernelType.GOWER,
print_global=False,
),
enable_tunneling=False,
random_state=random_state,
)
_, y_opt, _, _, _ = ego.optimize(fun=TestEGO.function_test_mixed_integer)
self.assertAlmostEqual(-15, float(y_opt), delta=5)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_ego_mixed_integer_hierarchical_NN(self):
random_state = 42
def f_neu(x1, x2, x3, x4):
if x4 == 0:
return 2 * x1 + x2 - 0.5 * x3
elif x4 == 1:
return -x1 + 2 * x2 - 0.5 * x3
elif x4 == 2:
return -x1 + x2 + 0.5 * x3
else:
raise ValueError(f"Unexpected x4: {x4}")
def f1(x1, x2, x3, x4, x5):
return f_neu(x1, x2, x3, x4) + x5**2
def f2(x1, x2, x3, x4, x5, x6):
return f_neu(x1, x2, x3, x4) + (x5**2) + 0.3 * x6
def f3(x1, x2, x3, x4, x5, x6, x7):
return f_neu(x1, x2, x3, x4) + (x5**2) + 0.3 * x6 - 0.1 * x7**3
def f_hv(X):
y = []
for x in X:
x3_decoded = design_space.decode_values(x, i_dv=3)[0]
if x[0] == 0:
y.append(f1(x[1], x[2], x3_decoded, x[4], x[5]))
elif x[0] == 1:
y.append(f2(x[1], x[2], x3_decoded, x[4], x[5], x[6]))
elif x[0] == 2:
y.append(f3(x[1], x[2], x3_decoded, x[4], x[5], x[6], x[7]))
else:
raise ValueError(f"Unexpected x0: {x[0]}")
return np.array(y)
random_state = 42
design_space = DesignSpace(
[
OrdinalVariable(values=[1, 2, 3]), # x0
FloatVariable(-5, 2),
FloatVariable(-5, 2),
OrdinalVariable(values=[8, 16, 32, 64, 128, 256]), # x3
CategoricalVariable(values=["ReLU", "SELU", "ISRLU"]), # x4
IntegerVariable(0, 5), # x5
IntegerVariable(0, 5), # x6
IntegerVariable(0, 5), # x7
],
seed=random_state,
)
# x6 is active when x0 >= 2
design_space.declare_decreed_var(decreed_var=6, meta_var=0, meta_value=[2, 3])
# x7 is active when x0 >= 3
design_space.declare_decreed_var(decreed_var=7, meta_var=0, meta_value=3)
n_doe = 4
neutral_var_ds = DesignSpace(design_space.design_variables[1:])
sampling = MixedIntegerSamplingMethod(
LHS, neutral_var_ds, criterion="ese", random_state=random_state
)
x_cont = sampling(3 * n_doe)
xdoe1 = np.zeros((n_doe, 8))
x_cont2 = x_cont[:n_doe, :5]
xdoe1[:, 0] = np.zeros(n_doe)
xdoe1[:, 1:6] = x_cont2
# ydoe1 = f_hv(xdoe1)
xdoe1 = np.zeros((n_doe, 8))
xdoe1[:, 0] = np.zeros(n_doe)
xdoe1[:, 1:6] = x_cont2
xdoe2 = np.zeros((n_doe, 8))
x_cont2 = x_cont[n_doe : 2 * n_doe, :6]
xdoe2[:, 0] = np.ones(n_doe)
xdoe2[:, 1:7] = x_cont2
# ydoe2 = f_hv(xdoe2)
xdoe2 = np.zeros((n_doe, 8))
xdoe2[:, 0] = np.ones(n_doe)
xdoe2[:, 1:7] = x_cont2
xdoe3 = np.zeros((n_doe, 8))
xdoe3[:, 0] = 2 * np.ones(n_doe)
xdoe3[:, 1:] = x_cont[2 * n_doe :, :]
# ydoe3 = f_hv(xdoe3)
Xt = np.concatenate((xdoe1, xdoe2, xdoe3), axis=0)
# Yt = np.concatenate((ydoe1, ydoe2, ydoe3), axis=0)
n_iter = 6
criterion = "EI"
ego = EGO(
n_iter=n_iter,
criterion=criterion,
xdoe=Xt,
surrogate=KRG(
design_space=design_space,
categorical_kernel=MixIntKernelType.HOMO_HSPHERE,
theta0=[1e-2],
n_start=5,
corr="abs_exp",
print_global=False,
),
enable_tunneling=False,
random_state=random_state,
)
x_opt, y_opt, dnk, x_data, y_data = ego.optimize(fun=f_hv)
self.assertAlmostEqual(
f_hv(np.atleast_2d([2, -5, -5, 5, 0, 0, 0, 5])),
float(y_opt),
delta=15,
)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_ego_mixed_integer_hierarchical_Goldstein(self):
def H(x1, x2, x3, x4, z3, z4, x5, cos_term):
h = (
53.3108
+ 0.184901 * x1
- 5.02914 * x1**3 * 10 ** (-6)
+ 7.72522 * x1**z3 * 10 ** (-8)
- 0.0870775 * x2
- 0.106959 * x3
+ 7.98772 * x3**z4 * 10 ** (-6)
+ 0.00242482 * x4
+ 1.32851 * x4**3 * 10 ** (-6)
- 0.00146393 * x1 * x2
- 0.00301588 * x1 * x3
- 0.00272291 * x1 * x4
+ 0.0017004 * x2 * x3
+ 0.0038428 * x2 * x4
- 0.000198969 * x3 * x4
+ 1.86025 * x1 * x2 * x3 * 10 ** (-5)
- 1.88719 * x1 * x2 * x4 * 10 ** (-6)
+ 2.50923 * x1 * x3 * x4 * 10 ** (-5)
- 5.62199 * x2 * x3 * x4 * 10 ** (-5)
)
if cos_term:
h += 5.0 * np.cos(2.0 * np.pi * (x5 / 100.0)) - 2.0
return h
def f1(x1, x2, z1, z2, z3, z4, x5, cos_term):
c1 = z2 == 0
c2 = z2 == 1
c3 = z2 == 2
c4 = z3 == 0
c5 = z3 == 1
c6 = z3 == 2
y = (
c4
* (
c1 * H(x1, x2, 20, 20, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, 20, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, 20, z3, z4, x5, cos_term)
)
+ c5
* (
c1 * H(x1, x2, 20, 50, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, 50, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, 50, z3, z4, x5, cos_term)
)
+ c6
* (
c1 * H(x1, x2, 20, 80, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, 80, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, 80, z3, z4, x5, cos_term)
)
)
return y
def f2(x1, x2, x3, z2, z3, z4, x5, cos_term):
c1 = z2 == 0
c2 = z2 == 1
c3 = z2 == 2
y = (
c1 * H(x1, x2, x3, 20, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, x3, 50, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, x3, 80, z3, z4, x5, cos_term)
)
return y
def f3(x1, x2, x4, z1, z3, z4, x5, cos_term):
c1 = z1 == 0
c2 = z1 == 1
c3 = z1 == 2
y = (
c1 * H(x1, x2, 20, x4, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, x4, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, x4, z3, z4, x5, cos_term)
)
return y
def f_hv(X):
y = []
for x in X:
if x[0] == 0:
y.append(
f1(x[2], x[3], x[7], x[8], x[9], x[10], x[6], cos_term=x[1])
)
elif x[0] == 1:
y.append(
f2(x[2], x[3], x[4], x[8], x[9], x[10], x[6], cos_term=x[1])
)
elif x[0] == 2:
y.append(
f3(x[2], x[3], x[5], x[7], x[9], x[10], x[6], cos_term=x[1])
)
elif x[0] == 3:
y.append(
H(x[2], x[3], x[4], x[5], x[9], x[10], x[6], cos_term=x[1])
)
else:
raise ValueError
return np.array(y)
random_state = 0
ds = DesignSpace(
[
CategoricalVariable(values=[0, 1, 2, 3]), # meta
OrdinalVariable(values=[0, 1]), # x1
FloatVariable(0, 100), # x2
FloatVariable(0, 100),
FloatVariable(0, 100),
FloatVariable(0, 100),
FloatVariable(0, 100),
IntegerVariable(0, 2), # x7
IntegerVariable(0, 2),
IntegerVariable(0, 2),
IntegerVariable(0, 2),
],
seed=random_state,
)
# x4 is acting if meta == 1, 3
ds.declare_decreed_var(decreed_var=4, meta_var=0, meta_value=[1, 3])
# x5 is acting if meta == 2, 3
ds.declare_decreed_var(decreed_var=5, meta_var=0, meta_value=[2, 3])
# x7 is acting if meta == 0, 2
ds.declare_decreed_var(decreed_var=7, meta_var=0, meta_value=[0, 2])
# x8 is acting if meta == 0, 1
ds.declare_decreed_var(decreed_var=8, meta_var=0, meta_value=[0, 1])
n_doe = 25
ds.seed = random_state
Xt, x_is_active = ds.sample_valid_x(n_doe)
n_iter = 10
criterion = "EI"
ego = EGO(
n_iter=n_iter,
criterion=criterion,
xdoe=Xt,
surrogate=KRG(
design_space=ds,
categorical_kernel=MixIntKernelType.HOMO_HSPHERE,
theta0=[1e-2],
n_start=10,
corr="squar_exp",
print_global=False,
),
verbose=True,
enable_tunneling=False,
random_state=random_state,
n_start=25,
)
x_opt, y_opt, dnk, x_data, y_data = ego.optimize(fun=f_hv)
self.assertAlmostEqual(
9.022,
float(y_opt),
delta=25,
)
def test_ego_mixed_integer_homo_gaussian(self):
n_iter = 15
random_state = 42
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red", "green"]),
CategoricalVariable(["large", "small"]),
IntegerVariable(0, 2),
],
seed=random_state,
)
n_doe = 5
sampling = MixedIntegerSamplingMethod(
LHS,
design_space,
criterion="ese",
random_state=random_state,
output_in_folded_space=True,
)
xdoe = sampling(n_doe)
criterion = "EI" #'EI' or 'SBO' or 'LCB'
ego = EGO(
n_iter=n_iter,
criterion=criterion,
xdoe=xdoe,
surrogate=KRG(
design_space=design_space,
categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE,
print_global=False,
),
enable_tunneling=False,
random_state=random_state,
)
_, y_opt, _, _, _ = ego.optimize(fun=TestEGO.function_test_mixed_integer)
self.assertAlmostEqual(-15, float(y_opt), delta=5)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 1, "too slow")
def test_ego_mixed_integer_homo_gaussian_pls(self):
n_iter = 15
random_state = 42
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red", "green"]),
CategoricalVariable(["large", "small"]),
IntegerVariable(0, 2),
],
seed=random_state,
)
sampling = MixedIntegerSamplingMethod(
LHS,
design_space,
criterion="ese",
random_state=random_state,
output_in_folded_space=True,
)
n_doe = 5
xdoe = sampling(n_doe)
criterion = "EI" #'EI' or 'SBO' or 'LCB'
sm = KPLS(
print_global=False,
design_space=design_space,
categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE,
n_comp=1,
cat_kernel_comps=[2, 2],
)
ego = EGO(
n_iter=n_iter,
criterion=criterion,
xdoe=xdoe,
surrogate=sm,
enable_tunneling=False,
random_state=random_state,
)
_, y_opt, _, _, _ = ego.optimize(fun=TestEGO.function_test_mixed_integer)
self.assertAlmostEqual(-15, float(y_opt), delta=5)
def test_ydoe_option(self):
n_iter = 15
fun = Branin(ndim=2)
xlimits = fun.xlimits
criterion = "LCB" #'EI' or 'SBO' or 'LCB'
random_state = 42
design_space = DesignSpace(xlimits, seed=random_state)
xdoe = FullFactorial(xlimits=xlimits)(10)
ydoe = fun(xdoe)
ego = EGO(
xdoe=xdoe,
ydoe=ydoe,
n_iter=n_iter,
criterion=criterion,
surrogate=KRG(design_space=design_space, print_global=False),
random_state=random_state,
)
_, y_opt, _, _, _ = ego.optimize(fun=fun)
self.assertAlmostEqual(0.39, float(y_opt), delta=1)
def test_find_best_point(self):
fun = TestEGO.function_test_1d
xlimits = np.array([[0.0, 25.0]])
random_state = 42
design_space = DesignSpace(xlimits, seed=random_state)
xdoe = FullFactorial(xlimits=xlimits)(3)
ydoe = fun(xdoe)
ego = EGO(
xdoe=xdoe,
ydoe=ydoe,
n_iter=1,
criterion="LCB",
surrogate=KRG(design_space=design_space, print_global=False),
n_start=30,
enable_tunneling=False,
random_state=random_state,
)
_, _, _, _, _ = ego.optimize(fun=fun)
x, _ = ego._find_best_point(xdoe, ydoe, enable_tunneling=False)
self.assertAlmostEqual(6.5, float(x), delta=1)
@staticmethod
def initialize_ego_gek(func="exp", criterion="LCB"):
from smt.problems import TensorProduct
class TensorProductIndirect(TensorProduct):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.super = super()
def _evaluate(self, x, kx):
assert kx is None
response = self.super._evaluate(x, kx)
sens = np.hstack(
self.super._evaluate(x, ki) for ki in range(x.shape[1])
)
return np.hstack((response, sens))
fun = TensorProductIndirect(ndim=2, func=func)
random_state = 42
design_space = DesignSpace(fun.xlimits, seed=42)
# Construction of the DOE
sampling = LHS(xlimits=fun.xlimits, criterion="m", random_state=random_state)
xdoe = sampling(20)
ydoe = fun(xdoe)
# Build the GEKPLS surrogate model
n_comp = 2
sm = GEKPLS(
theta0=[1e-2] * n_comp,
design_space=design_space,
extra_points=1,
eval_comp_treshold=0.8,
print_prediction=False,
n_comp=n_comp,
)
# Build the EGO optimizer and optimize
ego = EGO(
xdoe=xdoe,
ydoe=ydoe,
n_iter=5,
criterion=criterion,
surrogate=sm,
n_start=30,
enable_tunneling=False,
random_state=random_state,
)
return ego, fun
def test_ego_gek(self):
ego, fun = self.initialize_ego_gek()
x_opt, _, _, _, _ = ego.optimize(fun=fun)
self.assertAlmostEqual(-1.0, float(x_opt[0]), delta=1e-4)
self.assertAlmostEqual(-1.0, float(x_opt[1]), delta=1e-4)
def test_ei_gek(self):
ego, fun = self.initialize_ego_gek(func="cos", criterion="EI")
x_data, y_data = ego._setup_optimizer(fun)
ego._train_gpr(x_data, y_data)
# Test the EI value at the following point
ei = ego.EI(np.array([[0.8398599985874058, -0.3240337426231973]]))
self.assertTrue(np.allclose(ei, [6.87642e-12, 1.47804e-10, 2.76223], atol=1e-1))
def test_qei_criterion_default(self):
fun = TestEGO.function_test_1d
xlimits = np.array([[0.0, 25.0]])
random_state = 42
design_space = DesignSpace(xlimits, seed=random_state)
xdoe = FullFactorial(xlimits=xlimits)(3)
ydoe = fun(xdoe)
ego = EGO(
xdoe=xdoe,
ydoe=ydoe,
n_iter=1,
n_parallel=2,
criterion="SBO",
surrogate=KRG(design_space=design_space, print_global=False),
n_start=30,
random_state=random_state,
)
ego._setup_optimizer(fun)
ego.gpr.set_training_values(xdoe, ydoe)
ego.gpr.train()
xtest = np.array([[10.0]])
# test that default virtual point should be equal to 3sigma lower bound kriging interval
expected = float(
ego.gpr.predict_values(xtest)
- 3 * np.sqrt(ego.gpr.predict_variances(xtest))
)
actual = float(ego._get_virtual_point(xtest, fun(xtest))[0])
self.assertAlmostEqual(expected, actual)
@unittest.skipIf(int(os.getenv("RUN_SLOW", 0)) < 2, "too slow")
def test_examples(self):
self.run_ego_example()
self.run_ego_parallel_example()
self.run_ego_mixed_integer_example()
@staticmethod
def run_ego_example():
import numpy as np
from smt.applications import EGO
from smt.surrogate_models import KRG
from smt.utils.design_space import DesignSpace
import matplotlib.pyplot as plt
def function_test_1d(x):
# function xsinx
import numpy as np
x = np.reshape(x, (-1,))
y = np.zeros(x.shape)
y = (x - 3.5) * np.sin((x - 3.5) / (np.pi))
return y.reshape((-1, 1))
n_iter = 6
xlimits = np.array([[0.0, 25.0]])
random_state = 42 # for reproducibility
design_space = DesignSpace(xlimits, seed=random_state)
xdoe = np.atleast_2d([0, 7, 25]).T
n_doe = xdoe.size
criterion = "EI" #'EI' or 'SBO' or 'LCB'
ego = EGO(
n_iter=n_iter,
criterion=criterion,
xdoe=xdoe,
surrogate=KRG(design_space=design_space, print_global=False),
random_state=random_state,
)
x_opt, y_opt, _, x_data, y_data = ego.optimize(fun=function_test_1d)
print("Minimum in x={:.1f} with f(x)={:.1f}".format(float(x_opt), float(y_opt)))
x_plot = np.atleast_2d(np.linspace(0, 25, 100)).T
y_plot = function_test_1d(x_plot)
fig = plt.figure(figsize=[10, 10])
for i in range(n_iter):
k = n_doe + i
x_data_k = x_data[0:k]
y_data_k = y_data[0:k]
ego.gpr.set_training_values(x_data_k, y_data_k)
ego.gpr.train()
y_gp_plot = ego.gpr.predict_values(x_plot)
y_gp_plot_var = ego.gpr.predict_variances(x_plot)
y_ei_plot = -ego.EI(x_plot)
ax = fig.add_subplot((n_iter + 1) // 2, 2, i + 1)
ax1 = ax.twinx()
(ei,) = ax1.plot(x_plot, y_ei_plot, color="red")
(true_fun,) = ax.plot(x_plot, y_plot)
(data,) = ax.plot(
x_data_k, y_data_k, linestyle="", marker="o", color="orange"
)
if i < n_iter - 1:
(opt,) = ax.plot(
x_data[k], y_data[k], linestyle="", marker="*", color="r"
)
(gp,) = ax.plot(x_plot, y_gp_plot, linestyle="--", color="g")
sig_plus = y_gp_plot + 3 * np.sqrt(y_gp_plot_var)
sig_moins = y_gp_plot - 3 * np.sqrt(y_gp_plot_var)
un_gp = ax.fill_between(
x_plot.T[0], sig_plus.T[0], sig_moins.T[0], alpha=0.3, color="g"
)
lines = [true_fun, data, gp, un_gp, opt, ei]
fig.suptitle("EGO optimization of $f(x) = x \sin{x}$")
fig.subplots_adjust(hspace=0.4, wspace=0.4, top=0.8)
ax.set_title("iteration {}".format(i + 1))
fig.legend(
lines,
[
"f(x)=xsin(x)",
"Given data points",
"Kriging prediction",
"Kriging 99% confidence interval",
"Next point to evaluate",
"Expected improvment function",
],
)
plt.show()
# Check the optimal point is x_opt=18.9, y_opt =-15.1
@staticmethod
def run_ego_mixed_integer_example():
import numpy as np
from smt.applications import EGO
from smt.applications.mixed_integer import MixedIntegerContext
from smt.surrogate_models import MixIntKernelType
from smt.utils.design_space import (
DesignSpace,
CategoricalVariable,
FloatVariable,
IntegerVariable,
)
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
from smt.sampling_methods import LHS
# Regarding the interface, the function to be optimized should handle
# categorical values as index values in the enumeration type specification.
# For instance, here "blue" will be passed to the function as the index value 2.
# This allows to keep the numpy ndarray X handling numerical values.
def function_test_mixed_integer(X):
# float
x1 = X[:, 0]
# enum 1
c1 = X[:, 1]
x2 = c1 == 0
x3 = c1 == 1
x4 = c1 == 2
# enum 2
c2 = X[:, 2]
x5 = c2 == 0
x6 = c2 == 1
# int
i = X[:, 3]
y = (
(x2 + 2 * x3 + 3 * x4) * x5 * x1
+ (x2 + 2 * x3 + 3 * x4) * x6 * 0.95 * x1
+ i
)
return y.reshape((-1, 1))
n_iter = 15
random_state = 42
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red", "green"]),
CategoricalVariable(["square", "circle"]),
IntegerVariable(0, 2),
],
seed=random_state,
)
criterion = "EI" #'EI' or 'SBO' or 'LCB'
qEI = "KBRand"
sm = KRG(
design_space=design_space,
categorical_kernel=MixIntKernelType.GOWER,
print_global=False,
)
mixint = MixedIntegerContext(design_space)
n_doe = 3
sampling = mixint.build_sampling_method(
LHS, criterion="ese", random_state=random_state
)
xdoe = sampling(n_doe)
ydoe = function_test_mixed_integer(xdoe)
ego = EGO(
n_iter=n_iter,
criterion=criterion,
xdoe=xdoe,
ydoe=ydoe,
surrogate=sm,
qEI=qEI,
n_parallel=2,
random_state=random_state,
)
x_opt, y_opt, _, _, y_data = ego.optimize(fun=function_test_mixed_integer)
print("Minimum in x={} with f(x)={:.1f}".format(x_opt, float(y_opt)))
# print("Minimum in typed x={}".format(ego.mixint.cast_to_mixed_integer(x_opt)))
min_ref = -15
mini = np.zeros(n_iter)
for k in range(n_iter):
mini[k] = np.log(np.abs(np.min(y_data[0 : k + n_doe - 1]) - min_ref))
x_plot = np.linspace(1, n_iter + 0.5, n_iter)
u = max(np.floor(max(mini)) + 1, -100)
l = max(np.floor(min(mini)) - 0.2, -10)
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.plot(x_plot, mini, color="r")
axes.set_ylim([l, u])
plt.title("minimum convergence plot", loc="center")
plt.xlabel("number of iterations")
plt.ylabel("log of the difference w.r.t the best")
plt.show()
@staticmethod
def run_ego_parallel_example():
import numpy as np
from smt.applications import EGO
from smt.applications.ego import Evaluator
from smt.surrogate_models import KRG, DesignSpace
import matplotlib.pyplot as plt
def function_test_1d(x):
# function xsinx
import numpy as np
x = np.reshape(x, (-1,))
y = np.zeros(x.shape)
y = (x - 3.5) * np.sin((x - 3.5) / (np.pi))
return y.reshape((-1, 1))
n_iter = 3
n_parallel = 3
n_start = 50
xlimits = np.array([[0.0, 25.0]])
random_state = 42
design_space = DesignSpace(xlimits, seed=random_state)
xdoe = np.atleast_2d([0, 7, 25]).T
n_doe = xdoe.size
class ParallelEvaluator(Evaluator):
"""
Implement Evaluator interface using multiprocessing ThreadPool object (Python 3 only).
"""
def run(self, fun, x):
n_thread = 5
# Caveat: import are made here due to SMT documentation building process
import numpy as np
from sys import version_info
from multiprocessing.pool import ThreadPool
if version_info.major == 2:
return fun(x)
# Python 3 only
with ThreadPool(n_thread) as p:
return np.array(
[
y[0]
for y in p.map(
fun, [np.atleast_2d(x[i]) for i in range(len(x))]
)
]
)
criterion = "EI" #'EI' or 'SBO' or 'LCB'
qEI = "KBUB" # "KB", "KBLB", "KBUB", "KBRand"
ego = EGO(
n_iter=n_iter,
criterion=criterion,
xdoe=xdoe,
surrogate=KRG(design_space=design_space, print_global=False),
n_parallel=n_parallel,
qEI=qEI,
n_start=n_start,
evaluator=ParallelEvaluator(),
random_state=random_state,
)
x_opt, y_opt, _, x_data, y_data = ego.optimize(fun=function_test_1d)
print("Minimum in x={:.1f} with f(x)={:.1f}".format(float(x_opt), float(y_opt)))
x_plot = np.atleast_2d(np.linspace(0, 25, 100)).T
y_plot = function_test_1d(x_plot)
fig = plt.figure(figsize=[10, 10])
for i in range(n_iter):
k = n_doe + (i) * (n_parallel)
x_data_k = x_data[0:k]
y_data_k = y_data[0:k]
x_data_sub = x_data_k.copy()
y_data_sub = y_data_k.copy()
for p in range(n_parallel):
ego.gpr.set_training_values(x_data_sub, y_data_sub)
ego.gpr.train()
y_ei_plot = -ego.EI(x_plot)
y_gp_plot = ego.gpr.predict_values(x_plot)
y_gp_plot_var = ego.gpr.predict_variances(x_plot)
x_data_sub = np.append(x_data_sub, x_data[k + p])
y_KB = ego._get_virtual_point(np.atleast_2d(x_data[k + p]), y_data_sub)
y_data_sub = np.append(y_data_sub, y_KB)
ax = fig.add_subplot(n_iter, n_parallel, i * (n_parallel) + p + 1)
ax1 = ax.twinx()
(ei,) = ax1.plot(x_plot, y_ei_plot, color="red")
(true_fun,) = ax.plot(x_plot, y_plot)
(data,) = ax.plot(
x_data_sub[: -1 - p],
y_data_sub[: -1 - p],
linestyle="",
marker="o",
color="orange",
)
(virt_data,) = ax.plot(
x_data_sub[-p - 1 : -1],
y_data_sub[-p - 1 : -1],
linestyle="",
marker="o",
color="g",
)
(opt,) = ax.plot(
x_data_sub[-1], y_data_sub[-1], linestyle="", marker="*", color="r"
)
(gp,) = ax.plot(x_plot, y_gp_plot, linestyle="--", color="g")
sig_plus = y_gp_plot + 3.0 * np.sqrt(y_gp_plot_var)
sig_moins = y_gp_plot - 3.0 * np.sqrt(y_gp_plot_var)
un_gp = ax.fill_between(
x_plot.T[0], sig_plus.T[0], sig_moins.T[0], alpha=0.3, color="g"
)
lines = [true_fun, data, gp, un_gp, opt, ei, virt_data]
fig.suptitle("EGOp optimization of $f(x) = x \sin{x}$")
fig.subplots_adjust(hspace=0.4, wspace=0.4, top=0.8)
ax.set_title("iteration {}.{}".format(i, p))
fig.legend(
lines,
[
"f(x)=xsin(x)",
"Given data points",
"Kriging prediction",
"Kriging 99% confidence interval",
"Next point to evaluate",
"Expected improvment function",
"Virtula data points",
],
)
plt.show()
if __name__ == "__main__":
if "--plot" in argv:
TestEGO.plot = True
argv.remove("--plot")
if "--example" in argv:
TestEGO.run_ego_mixed_integer_example()
exit()
unittest.main()
| 43,079 | 32.525292 | 98 | py |
smt | smt-master/smt/applications/tests/test_mfk_variance.py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 15:36:13 2020
@author: Vincent Drouet and Nathalie Bartoli
in order to validate the variance formula for multifidelity on the branin 2D function
Comparisons are based on the paper Le Gratiet et Cannamela 2015 :
Le Gratiet, L., & Cannamela, C. (2015).
Cokriging-based sequential design strategies using fast cross-validation
techniques for multi-fidelity computer codes. Technometrics, 57(3), 418-427.
https://doi.org/10.1080/00401706.2014.928233
"""
import numpy as np
from smt.applications.mfk import MFK, NestedLHS
from smt.sampling_methods import LHS
import unittest
from smt.utils.sm_test_case import SMTestCase
print_output = True
# %%
# Define the low and high fidelity models
# Example on a 2D problem: Branin Function
A_corr = 0
# high fidelity model
def HF(point):
# Expensive Forretal function
res = (
(
point[:, 1]
- (5.1 / (4 * np.pi**2)) * point[:, 0] ** 2
+ (5 / np.pi) * point[:, 0]
- 6
)
** 2
+ 10 * (1 - (1 / 8 / np.pi)) * np.cos(point[:, 0])
+ 10
+ 5 * point[:, 0]
)
return res
# low fidelity model
def LF(point):
# Cheap Forretal function
res = (
HF(point)
- (0.5 * A_corr**2 + A_corr + 0.2)
* (
point[:, 1]
- 5.1 / 4 / np.pi**2 * point[:, 0] ** 2
+ 5.0 / np.pi * point[:, 0]
- 6
)
** 2
)
return res
class TestMFK_variance(SMTestCase):
@staticmethod
def corr(x1, x2, sm, lvl):
# Normalization of data with X_scale and X_offset from sm
# lvl start to 1
X_scale = sm.X_scale
X_offset = sm.X_offset
X1 = (x1 - X_offset) / X_scale
X2 = (x2 - X_offset) / X_scale
thetas = sm.optimal_theta[lvl - 1]
if sm.options["corr"] == "abs_exp":
p = 1
else:
p = 2
return np.prod(np.exp(-thetas * np.abs(X1 - X2) ** p))
@staticmethod
def hyperparam_LG(sm):
# computation of the hyperparameters based on the formula from LeGratiet 2015
y_t_lf = sm.training_points[0][0][1]
n_LF = y_t_lf.shape[0]
C = sm.optimal_par[0]["C"]
R = C @ C.T
H = np.ones((n_LF, 1))
R_inv_H = np.linalg.solve(R, H)
M = H.T @ R_inv_H
M_inv = np.linalg.inv(M)
R_inv_y = np.linalg.solve(R, y_t_lf)
beta_LG_1 = M_inv @ H.T @ R_inv_y
sigma2_LG_1 = (
(y_t_lf - H @ beta_LG_1).T
@ np.linalg.solve(R, y_t_lf - H @ beta_LG_1)
/ (n_LF - 1)
)[0, 0]
y_t_hf = sm.training_points[None][0][1]
n_HF = y_t_hf.shape[0]
y_D_l = y_t_lf[-n_HF:, :].reshape(
-1, 1
) # to get y^l-1(D^l) we need n_HF last points of y_t_lf
H = np.hstack((y_D_l, np.ones((n_HF, 1))))
C = sm.optimal_par[1]["C"]
R = C @ C.T
R_inv_H = np.linalg.solve(R, H)
M = H.T @ R_inv_H
M_inv = np.linalg.inv(M)
R_inv_y = np.linalg.solve(R, y_t_hf)
rho_beta = M_inv @ H.T @ R_inv_y
rho_LG = rho_beta[0, 0]
beta_LG_2 = rho_beta[1, 0]
sigma2_LG_2 = (
(y_t_hf - H @ rho_beta).T
@ np.linalg.solve(R, y_t_hf - H @ rho_beta)
/ (n_HF - 2)
)[0, 0]
sigma2_rho_LG = rho_LG**2 + sigma2_LG_2 * M_inv[0, 0]
return beta_LG_1, sigma2_LG_1, beta_LG_2, sigma2_LG_2, rho_LG, sigma2_rho_LG
@staticmethod
def Mu_LG_LG(x, sm):
# computation of the mean based on the formula from LeGratiet 2015
# and using beta from LeGratiet 2015
(
beta_LG_1,
sigma2_LG_1,
beta_LG_2,
sigma2_LG_2,
rho_LG,
sigma2_rho_LG,
) = TestMFK_variance.hyperparam_LG(sm)
C = sm.optimal_par[0]["C"]
R = C @ C.T
x_t_lf = sm.training_points[0][0][0]
y_t_lf = sm.training_points[0][0][1]
n_LF = x_t_lf.shape[0]
r_x = np.empty((n_LF, 1))
for i in range(n_LF):
r_x[i, 0] = TestMFK_variance.corr(x, x_t_lf[i], sm, 1)
M = np.linalg.solve(R, y_t_lf - beta_LG_1 * np.ones((n_LF, 1)))
mu_0 = beta_LG_1 + r_x.T @ M
x_t_hf = sm.training_points[None][0][0]
y_t_hf = sm.training_points[None][0][1]
n_HF = x_t_hf.shape[0]
C = sm.optimal_par[1]["C"]
R = C @ C.T
r_x = np.empty((n_HF, 1))
for i in range(n_HF):
r_x[i, 0] = TestMFK_variance.corr(x, x_t_hf[i], sm, 2)
M = np.linalg.solve(
R,
y_t_hf
- beta_LG_2 * np.ones((n_HF, 1))
- rho_LG * y_t_lf[-n_HF:, 0, np.newaxis],
)
return rho_LG * mu_0 + beta_LG_2 + r_x.T @ M
@staticmethod
def Mu_LG_sm(x, sm):
# computation of the mean based on the formula from LeGratiet 2015
# and using beta from sm
beta = sm.optimal_par[0]["beta"]
C = sm.optimal_par[0]["C"]
R = C @ C.T
x_t_lf = sm.training_points[0][0][0]
y_t_lf = sm.training_points[0][0][1]
n_LF = x_t_lf.shape[0]
r_x = np.empty((n_LF, 1))
for i in range(n_LF):
r_x[i, 0] = TestMFK_variance.corr(x, x_t_lf[i], sm, 1)
M = np.linalg.solve(R, y_t_lf - beta * np.ones((n_LF, 1)))
mu_0 = beta + r_x.T @ M
x_t_hf = sm.training_points[None][0][0]
y_t_hf = sm.training_points[None][0][1]
n_HF = x_t_hf.shape[0]
beta = sm.optimal_par[1]["beta"]
rho = beta[0]
C = sm.optimal_par[1]["C"]
R = C @ C.T
r_x = np.empty((n_HF, 1))
for i in range(n_HF):
r_x[i, 0] = TestMFK_variance.corr(x, x_t_hf[i], sm, 2)
M = np.linalg.solve(
R,
y_t_hf - beta[1] * np.ones((n_HF, 1)) - rho * y_t_lf[-n_HF:, 0, np.newaxis],
)
return rho * mu_0 + beta[1] + r_x.T @ M
@staticmethod
def Cov_LG_sm(x1, x2, sm):
# computation of the covariance based on the formula from LeGratiet 2015
# Using the Sigma2 output from sm
x_t_lf = sm.training_points[0][0][0]
y_t_lf = sm.training_points[0][0][1]
n_LF = x_t_lf.shape[0]
sigma2 = sm.optimal_par[0]["sigma2"]
C = sm.optimal_par[0]["C"]
R = C @ C.T
r_x1 = np.empty((n_LF, 1))
r_x2 = np.empty((n_LF, 1))
for i in range(n_LF):
r_x1[i, 0] = TestMFK_variance.corr(x1, x_t_lf[i], sm, 1)
r_x2[i, 0] = TestMFK_variance.corr(x2, x_t_lf[i], sm, 1)
h_x1 = np.ones((1, 1))
h_x2 = np.ones((1, 1))
H = np.ones((n_LF, 1))
hr_1 = np.vstack((h_x1, r_x1))
hr_2 = np.vstack((h_x2, r_x2))
M1 = np.hstack((np.zeros((1, 1)), H.T))
M2 = np.hstack((H, R))
M3 = np.vstack((M1, M2))
M4 = np.linalg.solve(M3, hr_2) # M4 = M3^-1 @ hr_2
k_0 = sigma2 * (
TestMFK_variance.corr(x1, x2, sm, 1) - hr_1.T @ M4
) # covariance of level 0
x_t_hf = sm.training_points[None][0][0]
n_HF = x_t_hf.shape[0]
sigma2 = sm.optimal_par[1]["sigma2"]
(var_all_pred, sigma2_rho) = sm.predict_variances_all_levels(x1)
sigma2_rho = sigma2_rho[0]
C = sm.optimal_par[1]["C"]
R = C @ C.T
r_x1 = np.empty((n_HF, 1))
r_x2 = np.empty((n_HF, 1))
for i in range(n_HF):
r_x1[i, 0] = TestMFK_variance.corr(x1, x_t_hf[i], sm, 2)
r_x2[i, 0] = TestMFK_variance.corr(x2, x_t_hf[i], sm, 2)
mu_x1 = sm._predict_intermediate_values(x1, 1).reshape(-1, 1)
mu_x2 = sm._predict_intermediate_values(x2, 1).reshape(-1, 1)
h_x1 = np.vstack((mu_x1, np.ones((1, 1))))
h_x2 = np.vstack((mu_x2, np.ones((1, 1))))
y_D_l = y_t_lf[-n_HF:, :].reshape(
-1, 1
) # to get y^l-1(D^l) we need n_HF last points of y_t_lf
H = np.hstack((y_D_l, np.ones((n_HF, 1))))
hr_1 = np.vstack((h_x1, r_x1))
hr_2 = np.vstack((h_x2, r_x2))
M1 = np.hstack((np.zeros((2, 2)), H.T))
M2 = np.hstack((H, R))
M3 = np.vstack((M1, M2))
M4 = np.linalg.solve(M3, hr_2) # M4 = M3^-1 @ hr_2
k_1 = sigma2_rho * k_0 + sigma2 * (
TestMFK_variance.corr(x1, x2, sm, 2) - hr_1.T @ M4
)
return k_0, k_1
@staticmethod
def Cov_LG_LG(x1, x2, sm):
# computation of the covariance based on the formula from LeGratiet 2015
# Using the Sigma2 output from Le Gratiet paper
x_t_lf = sm.training_points[0][0][0]
y_t_lf = sm.training_points[0][0][1]
n_LF = x_t_lf.shape[0]
(
beta_LG_1,
sigma2_LG_1,
beta_LG_2,
sigma2_LG_2,
rho_LG,
sigma2_rho_LG,
) = TestMFK_variance.hyperparam_LG(sm)
sigma2 = sigma2_LG_1
C = sm.optimal_par[0]["C"]
R = C @ C.T
r_x1 = np.empty((n_LF, 1))
r_x2 = np.empty((n_LF, 1))
for i in range(n_LF):
r_x1[i, 0] = TestMFK_variance.corr(x1, x_t_lf[i], sm, 1)
r_x2[i, 0] = TestMFK_variance.corr(x2, x_t_lf[i], sm, 1)
h_x1 = np.ones((1, 1))
h_x2 = np.ones((1, 1))
H = np.ones((n_LF, 1))
hr_1 = np.vstack((h_x1, r_x1))
hr_2 = np.vstack((h_x2, r_x2))
M1 = np.hstack((np.zeros((1, 1)), H.T))
M2 = np.hstack((H, R))
M3 = np.vstack((M1, M2))
M4 = np.linalg.solve(M3, hr_2) # M4 = M3^-1 @ hr_2
k_0 = sigma2 * (
TestMFK_variance.corr(x1, x2, sm, 1) - hr_1.T @ M4
) # covariance of level 0
x_t_hf = sm.training_points[None][0][0]
n_HF = x_t_hf.shape[0]
sigma2 = sigma2_LG_2
C = sm.optimal_par[1]["C"]
R = C @ C.T
r_x1 = np.empty((n_HF, 1))
r_x2 = np.empty((n_HF, 1))
for i in range(n_HF):
r_x1[i, 0] = TestMFK_variance.corr(x1, x_t_hf[i], sm, 2)
r_x2[i, 0] = TestMFK_variance.corr(x2, x_t_hf[i], sm, 2)
mu_x1 = sm._predict_intermediate_values(x1, 1).reshape(-1, 1)
mu_x2 = sm._predict_intermediate_values(x2, 1).reshape(-1, 1)
h_x1 = np.vstack((mu_x1, np.ones((1, 1))))
h_x2 = np.vstack((mu_x2, np.ones((1, 1))))
y_D_l = y_t_lf[-n_HF:, :].reshape(
-1, 1
) # to get y^l-1(D^l) we need n_HF last points of y_t_lf
H = np.hstack((y_D_l, np.ones((n_HF, 1))))
hr_1 = np.vstack((h_x1, r_x1))
hr_2 = np.vstack((h_x2, r_x2))
M1 = np.hstack((np.zeros((2, 2)), H.T))
M2 = np.hstack((H, R))
M3 = np.vstack((M1, M2))
M4 = np.linalg.solve(M3, hr_2) # M4 = M3^-1 @ hr_2
k_1 = sigma2_rho_LG * k_0 + sigma2 * (
TestMFK_variance.corr(x1, x2, sm, 2) - hr_1.T @ M4
)
return k_0, k_1
@staticmethod
def verif_hyperparam(sm, x_test_LHS):
# get the hyperparameters from sm
beta_sm_1 = sm.optimal_par[0]["beta"][0, 0]
sigma2_sm_1 = sm.optimal_par[0]["sigma2"][0]
rho_beta_sm = sm.optimal_par[1]["beta"]
beta_sm_2 = rho_beta_sm[1, 0]
sigma2_sm_2 = sm.optimal_par[1]["sigma2"][0]
rho_sm = rho_beta_sm[0, 0]
(var_all_pred, sigma2_rho) = sm.predict_variances_all_levels(x_test_LHS)
sigma2_rho_sm = sigma2_rho[0]
(
beta_LG_1,
sigma2_LG_1,
beta_LG_2,
sigma2_LG_2,
rho_LG,
sigma2_rho_LG,
) = TestMFK_variance.hyperparam_LG(sm)
return (
beta_sm_1,
sigma2_sm_1,
beta_sm_2,
sigma2_sm_2,
rho_sm,
sigma2_rho_sm,
beta_LG_1,
sigma2_LG_1,
beta_LG_2,
sigma2_LG_2,
rho_LG,
sigma2_rho_LG,
)
def test_mfk_variance(self):
# To create the doe
# dim = 2
nlevel = 2
ub0 = 10.0
ub1 = 15.0
lb0 = -5.0
lb1 = 0.0
xlimits = np.array([[lb0, ub0], [lb1, ub1]])
# Constants
n_HF = 5 # number of high fidelity points (number of low fi is twice)
xdoes = NestedLHS(nlevel=nlevel, xlimits=xlimits)
x_t_lf, x_t_hf = xdoes(n_HF)
# Evaluate the HF and LF functions
y_t_lf = LF(x_t_lf)
y_t_hf = HF(x_t_hf)
sm = MFK(
theta0=x_t_hf.shape[1] * [1e-2],
print_global=False,
rho_regr="constant",
)
# low-fidelity dataset names being integers from 0 to level-1
sm.set_training_values(x_t_lf, y_t_lf, name=0)
# high-fidelity dataset without name
sm.set_training_values(x_t_hf, y_t_hf)
# train the model
sm.train()
# Validation set
# for validation with LHS
ntest = 1
sampling = LHS(xlimits=xlimits)
x_test_LHS = sampling(ntest)
# y_test_LHS = HF(x_test_LHS)
# compare the mean value between different formula
if print_output:
print("Mu sm : {}".format(sm.predict_values(x_test_LHS)[0, 0]))
print(
"Mu LG_sm : {}".format(TestMFK_variance.Mu_LG_sm(x_test_LHS, sm)[0, 0])
)
print(
"Mu LG_LG : {}".format(TestMFK_variance.Mu_LG_LG(x_test_LHS, sm)[0, 0])
)
# self.assertAlmostEqual(
# TestMFK_variance.Mu_LG_sm(x_test_LHS, sm)[0, 0],
# TestMFK_variance.Mu_LG_LG(x_test_LHS, sm)[0, 0],
# delta=1,
# )
self.assertAlmostEqual(
sm.predict_values(x_test_LHS)[0, 0],
TestMFK_variance.Mu_LG_LG(x_test_LHS, sm)[0, 0],
delta=1,
)
# compare the variance value between different formula
(k_0_LG_sm, k_1_LG_sm) = TestMFK_variance.Cov_LG_sm(x_test_LHS, x_test_LHS, sm)
(k_0_LG_LG, k_1_LG_LG) = TestMFK_variance.Cov_LG_LG(x_test_LHS, x_test_LHS, sm)
k_0_sm = sm.predict_variances_all_levels(x_test_LHS)[0][0, 0]
k_1_sm = sm.predict_variances_all_levels(x_test_LHS)[0][0, 1]
if print_output:
print("Level 0")
print("Var sm : {}".format(k_0_sm))
print("Var LG_sm : {}".format(k_0_LG_sm[0, 0]))
print("Var LG_LG : {}".format(k_0_LG_LG[0, 0]))
print("Level 1")
print("Var sm : {}".format(k_1_sm))
print("Var LG_sm : {}".format(k_1_LG_sm[0, 0]))
print("Var LG_LG : {}".format(k_1_LG_LG[0, 0]))
# for level 0
self.assertAlmostEqual(k_0_sm, k_0_LG_sm[0, 0], delta=1)
self.assertAlmostEqual(k_0_LG_sm[0, 0], k_0_LG_LG[0, 0], delta=1)
# for level 1
self.assertAlmostEqual(k_1_sm, k_1_LG_sm[0, 0], delta=1)
self.assertAlmostEqual(k_1_LG_sm[0, 0], k_1_LG_LG[0, 0], delta=1)
(
beta_sm_1,
sigma2_sm_1,
beta_sm_2,
sigma2_sm_2,
rho_sm,
sigma2_rho_sm,
beta_LG_1,
sigma2_LG_1,
beta_LG_2,
sigma2_LG_2,
rho_LG,
sigma2_rho_LG,
) = TestMFK_variance.verif_hyperparam(sm, x_test_LHS)
if print_output:
print("Hyperparameters")
print("rho_sm : {}".format(rho_sm))
print("rho_LG : {}".format(rho_LG))
print("sigma2_rho_sm : {}".format(sigma2_rho_sm[0]))
print("sigma2_rho_LG : {}".format(sigma2_rho_LG))
print("beta_sm_1 : {}".format(beta_sm_1))
print("beta_LG_1 : {}".format(beta_LG_1[0, 0]))
print("beta_sm_2 : {}".format(beta_sm_2))
print("beta_LG_2 : {}".format(beta_LG_2))
print("sigma2_sm_1 : {}".format(sigma2_sm_1))
print("sigma2_LG_1 : {}".format(sigma2_LG_1))
print("sigma2_sm_2 : {}".format(sigma2_sm_2))
print("sigma2_LG_2 : {}".format(sigma2_LG_2))
if __name__ == "__main__":
unittest.main()
| 16,153 | 33.080169 | 88 | py |
smt | smt-master/smt/applications/tests/test_vfm.py | """
Author: Mohamed Amine Bouhlel <mbouhlel@umich.edu>
This package is distributed under New BSD license.
"""
import unittest
import matplotlib
matplotlib.use("Agg")
import numpy as np
from scipy import linalg
from smt.utils.sm_test_case import SMTestCase
from smt.utils import compute_rms_error
from smt.utils.silence import Silence
from smt.problems import WaterFlowLFidelity, WaterFlow
from smt.sampling_methods import LHS
from smt.applications import VFM
from smt.utils.misc import compute_rms_error
from smt.examples.rans_crm_wing.rans_crm_wing import (
get_rans_crm_wing,
plot_rans_crm_wing,
)
def setupCRM(LF_candidate="QP", Bridge_candidate="KRG", type_bridge="Additive"):
xt, yt, _ = get_rans_crm_wing()
optionsLF = {}
optionsB = {}
M = VFM(
type_bridge=type_bridge,
name_model_LF=LF_candidate,
name_model_bridge=Bridge_candidate,
X_LF=xt,
y_LF=yt,
X_HF=xt,
y_HF=yt,
options_LF=optionsLF,
options_bridge=optionsB,
)
return M, xt
class TestVFM(SMTestCase):
def test_vfm(self):
# Problem set up
ndim = 8
ntest = 500
ndoeLF = int(10 * ndim)
ndoeHF = int(3)
funLF = WaterFlowLFidelity(ndim=ndim)
funHF = WaterFlow(ndim=ndim)
deriv1 = True
deriv2 = True
LF_candidate = "QP"
Bridge_candidate = "KRG"
type_bridge = "Multiplicative"
optionsLF = {}
optionsB = {"theta0": [1e-2] * ndim, "print_prediction": False, "deriv": False}
# Construct low/high fidelity data and validation points
sampling = LHS(xlimits=funLF.xlimits, criterion="m", random_state=42)
xLF = sampling(ndoeLF)
yLF = funLF(xLF)
if deriv1:
dy_LF = np.zeros((ndoeLF, 1))
for i in range(ndim):
yd = funLF(xLF, kx=i)
dy_LF = np.concatenate((dy_LF, yd), axis=1)
sampling = LHS(xlimits=funHF.xlimits, criterion="m", random_state=43)
xHF = sampling(ndoeHF)
yHF = funHF(xHF)
if deriv2:
dy_HF = np.zeros((ndoeHF, 1))
for i in range(ndim):
yd = funHF(xHF, kx=i)
dy_HF = np.concatenate((dy_HF, yd), axis=1)
xtest = sampling(ntest)
ytest = funHF(xtest)
dytest = np.zeros((ntest, ndim))
for i in range(ndim):
dytest[:, i] = funHF(xtest, kx=i).T
# Initialize VFM
vfm = VFM(
type_bridge=type_bridge,
name_model_LF=LF_candidate,
name_model_bridge=Bridge_candidate,
X_LF=xLF,
y_LF=yLF,
X_HF=xHF,
y_HF=yHF,
options_LF=optionsLF,
options_bridge=optionsB,
dy_LF=dy_LF,
dy_HF=dy_HF,
)
# Prediction of the validation points
rms_error = compute_rms_error(vfm, xtest, ytest)
self.assert_error(rms_error, 0.0, 3e-1)
@staticmethod
def run_vfm_example(self):
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from smt.utils import compute_rms_error
from smt.problems import WaterFlowLFidelity, WaterFlow
from smt.sampling_methods import LHS
from smt.applications import VFM
# Problem set up
ndim = 8
ntest = 500
ndoeLF = int(10 * ndim)
ndoeHF = int(3)
funLF = WaterFlowLFidelity(ndim=ndim)
funHF = WaterFlow(ndim=ndim)
deriv1 = True
deriv2 = True
LF_candidate = "QP"
Bridge_candidate = "KRG"
type_bridge = "Multiplicative"
optionsLF = {}
optionsB = {"theta0": [1e-2] * ndim, "print_prediction": False, "deriv": False}
# Construct low/high fidelity data and validation points
sampling = LHS(xlimits=funLF.xlimits, criterion="m")
xLF = sampling(ndoeLF)
yLF = funLF(xLF)
if deriv1:
dy_LF = np.zeros((ndoeLF, 1))
for i in range(ndim):
yd = funLF(xLF, kx=i)
dy_LF = np.concatenate((dy_LF, yd), axis=1)
sampling = LHS(xlimits=funHF.xlimits, criterion="m")
xHF = sampling(ndoeHF)
yHF = funHF(xHF)
if deriv2:
dy_HF = np.zeros((ndoeHF, 1))
for i in range(ndim):
yd = funHF(xHF, kx=i)
dy_HF = np.concatenate((dy_HF, yd), axis=1)
xtest = sampling(ntest)
ytest = funHF(xtest)
dytest = np.zeros((ntest, ndim))
for i in range(ndim):
dytest[:, i] = funHF(xtest, kx=i).T
# Initialize the extension VFM
M = VFM(
type_bridge=type_bridge,
name_model_LF=LF_candidate,
name_model_bridge=Bridge_candidate,
X_LF=xLF,
y_LF=yLF,
X_HF=xHF,
y_HF=yHF,
options_LF=optionsLF,
options_bridge=optionsB,
dy_LF=dy_LF,
dy_HF=dy_HF,
)
# Prediction of the validation points
y = M.predict_values(x=xtest)
plt.figure()
plt.plot(ytest, ytest, "-.")
plt.plot(ytest, y, ".")
plt.xlabel(r"$y$ True")
plt.ylabel(r"$y$ prediction")
plt.show()
def test_KRG_KRG_additive(self):
with Silence():
M, xt = setupCRM(
LF_candidate="KRG", Bridge_candidate="KRG", type_bridge="Additive"
)
with Silence():
yp = M.predict_values(np.atleast_2d(xt[0]))
dyp = M.predict_derivatives(np.atleast_2d(xt[0]), kx=0)
self.assert_error(yp, np.array([[0.015368, 0.367424]]), atol=2e-2, rtol=3e-2)
self.assert_error(dyp, np.array([[0.07007729, 3.619421]]), atol=3e-1, rtol=1e-2)
def test_QP_KRG_additive(self):
with Silence():
M, xt = setupCRM(
LF_candidate="QP", Bridge_candidate="KRG", type_bridge="Additive"
)
with Silence():
yp = M.predict_values(np.atleast_2d(xt[0]))
dyp = M.predict_derivatives(np.atleast_2d(xt[0]), kx=0)
self.assert_error(yp, np.array([[0.015368, 0.367424]]), atol=1e-2, rtol=1e-2)
self.assert_error(
dyp, np.array([[1.16130832e-03, 4.36712162e00]]), atol=3e-1, rtol=1e-2
)
def test_KRG_KRG_mult(self):
with Silence():
M, xt = setupCRM(
LF_candidate="KRG", Bridge_candidate="KRG", type_bridge="Multiplicative"
)
with Silence():
yp = M.predict_values(np.atleast_2d(xt[0]))
dyp = M.predict_derivatives(np.atleast_2d(xt[0]), kx=0)
self.assert_error(yp, np.array([[0.015368, 0.367424]]), atol=2e-2, rtol=3e-2)
self.assert_error(dyp, np.array([[0.07007729, 3.619421]]), atol=3e-1, rtol=1e-2)
def test_QP_KRG_mult(self):
with Silence():
M, xt = setupCRM(
LF_candidate="QP", Bridge_candidate="KRG", type_bridge="Multiplicative"
)
with Silence():
yp = M.predict_values(np.atleast_2d(xt[0]))
dyp = M.predict_derivatives(np.atleast_2d(xt[0]), kx=0)
self.assert_error(
yp, np.array([[0.01537882, 0.36681699]]), atol=3e-1, rtol=1e-2
)
self.assert_error(
dyp, np.array([[0.21520949, 4.50217261]]), atol=3e-1, rtol=1e-2
)
if __name__ == "__main__":
unittest.main()
| 7,530 | 29.366935 | 88 | py |
smt | smt-master/smt/applications/tests/test_mfk_1fidelity.py | import matplotlib
matplotlib.use("Agg")
import unittest
import numpy as np
import unittest
from smt.problems import TensorProduct
from smt.sampling_methods import LHS
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.applications.mfk import MFK
print_output = False
class TestMFKOneFidelity(SMTestCase):
def setUp(self):
self.nt = 20
self.ne = 50
self.ndim = 1
def test_mfk_1fidelity(self):
self.problems = ["exp", "tanh", "cos"]
for fname in self.problems:
prob = TensorProduct(ndim=self.ndim, func=fname)
sampling = LHS(xlimits=prob.xlimits, random_state=0)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
for i in range(self.ndim):
yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)
sampling = LHS(xlimits=prob.xlimits, random_state=1)
xv = sampling(self.ne)
yv = prob(xv)
sm = MFK(
theta0=[1e-2] * self.ndim,
print_global=False,
)
sm.set_training_values(xt, yt[:, 0])
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xv, yv)
self.assert_error(t_error, 0.0, 1e-6)
self.assert_error(e_error, 0.0, 1e-6)
@staticmethod
def run_mfk_example_1fidelity():
import numpy as np
import matplotlib.pyplot as plt
from smt.applications.mfk import MFK, NestedLHS
# Consider only 1 fidelity level
# high fidelity model
def hf_function(x):
import numpy as np
return ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
# Problem set up
xlimits = np.array([[0.0, 1.0]])
xdoes = NestedLHS(nlevel=1, xlimits=xlimits, random_state=0)
xt_e = xdoes(7)[0]
# Evaluate the HF function
yt_e = hf_function(xt_e)
sm = MFK(theta0=xt_e.shape[1] * [1.0])
# High-fidelity dataset without name
sm.set_training_values(xt_e, yt_e)
# Train the model
sm.train()
x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)
# Query the outputs
y = sm.predict_values(x)
mse = sm.predict_variances(x)
derivs = sm.predict_derivatives(x, kx=0)
plt.figure()
plt.plot(x, hf_function(x), label="reference")
plt.plot(x, y, linestyle="-.", label="mean_gp")
plt.scatter(xt_e, yt_e, marker="o", color="k", label="HF doe")
plt.legend(loc=0)
plt.ylim(-10, 17)
plt.xlim(-0.1, 1.1)
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.show()
if __name__ == "__main__":
unittest.main()
| 2,882 | 24.741071 | 70 | py |
smt | smt-master/smt/applications/tests/test_mfk.py | # -*- coding: utf-8 -*-
"""
Created on Mon May 07 14:20:11 2018
@author: m.meliani
"""
import matplotlib
matplotlib.use("Agg")
import unittest
import numpy as np
import unittest
import inspect
from collections import OrderedDict
from smt.problems import Sphere, TensorProduct
from smt.sampling_methods import LHS, FullFactorial
from smt.utils.sm_test_case import SMTestCase
from smt.utils.silence import Silence
from smt.utils import compute_rms_error
from smt.surrogate_models import LS, QP, KPLS, KRG, KPLSK, GEKPLS, GENN
from smt.applications.mfk import MFK, NestedLHS
from copy import deepcopy
print_output = False
class TestMFK(SMTestCase):
def setUp(self):
self.nt = 100
self.ne = 100
self.ndim = 3
def test_nested_lhs(self):
xlimits = np.array([[0.0, 1.0], [0.0, 1.0]])
xnorm = NestedLHS(nlevel=3, xlimits=xlimits, random_state=0)
xlow, xmedium, xhigh = xnorm(15)
for items1 in xmedium:
found = False
for items0 in xlow:
if items1.all() == items0.all():
found = True
self.assertTrue(found)
for items1 in xhigh:
found = False
for items0 in xmedium:
if items1.all() == items0.all():
found = True
self.assertTrue(found)
def test_mfk(self):
self.problems = ["exp", "tanh", "cos"]
for fname in self.problems:
prob = TensorProduct(ndim=self.ndim, func=fname)
sampling = FullFactorial(xlimits=prob.xlimits, clip=True)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
for i in range(self.ndim):
yt = np.concatenate((yt, prob(xt, kx=i)), axis=1)
y_lf = 2 * prob(xt) + 2
x_lf = deepcopy(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
sm = MFK(theta0=[1e-2] * self.ndim)
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
sm.set_training_values(xt, yt[:, 0])
sm.set_training_values(x_lf, y_lf[:, 0], name=0)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
self.assert_error(t_error, 0.0, 1)
self.assert_error(e_error, 0.0, 1)
def test_mfk_derivs(self):
prob = Sphere(ndim=self.ndim)
sampling = LHS(xlimits=prob.xlimits)
nt = 500
np.random.seed(0)
xt = sampling(nt)
yt = prob(xt)
dyt = {}
for kx in range(prob.xlimits.shape[0]):
dyt[kx] = prob(xt, kx=kx)
y_lf = 2 * prob(xt) + 2
x_lf = deepcopy(xt)
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
dye = {}
for kx in range(prob.xlimits.shape[0]):
dye[kx] = prob(xe, kx=kx)
sm = MFK(theta0=[1e-2] * self.ndim)
if sm.options.is_declared("xlimits"):
sm.options["xlimits"] = prob.xlimits
sm.options["print_global"] = False
sm.set_training_values(xt, yt)
sm.set_training_values(x_lf, y_lf, name=0)
with Silence():
sm.train()
t_error = compute_rms_error(sm)
e_error = compute_rms_error(sm, xe, ye)
e_error0 = compute_rms_error(sm, xe, dye[0], 0)
e_error1 = compute_rms_error(sm, xe, dye[1], 1)
if print_output:
print(
"%8s %6s %18.9e %18.9e %18.9e %18.9e"
% (pname[:6], sname, t_error, e_error, e_error0, e_error1)
)
self.assert_error(e_error0, 0.0, 1e-1)
self.assert_error(e_error1, 0.0, 1e-1)
@staticmethod
def run_mfk_example():
import numpy as np
import matplotlib.pyplot as plt
from smt.applications.mfk import MFK, NestedLHS
# low fidelity model
def lf_function(x):
import numpy as np
return (
0.5 * ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
+ (x - 0.5) * 10.0
- 5
)
# high fidelity model
def hf_function(x):
import numpy as np
return ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
# Problem set up
xlimits = np.array([[0.0, 1.0]])
xdoes = NestedLHS(nlevel=2, xlimits=xlimits, random_state=0)
xt_c, xt_e = xdoes(7)
# Evaluate the HF and LF functions
yt_e = hf_function(xt_e)
yt_c = lf_function(xt_c)
sm = MFK(theta0=xt_e.shape[1] * [1.0])
# low-fidelity dataset names being integers from 0 to level-1
sm.set_training_values(xt_c, yt_c, name=0)
# high-fidelity dataset without name
sm.set_training_values(xt_e, yt_e)
# train the model
sm.train()
x = np.linspace(0, 1, 101, endpoint=True).reshape(-1, 1)
# query the outputs
y = sm.predict_values(x)
mse = sm.predict_variances(x)
derivs = sm.predict_derivatives(x, kx=0)
plt.figure()
plt.plot(x, hf_function(x), label="reference")
plt.plot(x, y, linestyle="-.", label="mean_gp")
plt.scatter(xt_e, yt_e, marker="o", color="k", label="HF doe")
plt.scatter(xt_c, yt_c, marker="*", color="g", label="LF doe")
plt.legend(loc=0)
plt.ylim(-10, 17)
plt.xlim(-0.1, 1.1)
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.show()
if __name__ == "__main__":
unittest.main()
| 5,714 | 26.742718 | 74 | py |
smt | smt-master/smt/applications/tests/__init__.py | 0 | 0 | 0 | py | |
smt | smt-master/smt/applications/tests/test_mixed_integer.py | """
Created on Tue Oct 12 10:48:01 2021
@author: psaves
"""
import unittest
import numpy as np
import matplotlib
import itertools
matplotlib.use("Agg")
from smt.applications.mixed_integer import (
MixedIntegerContext,
MixedIntegerSamplingMethod,
MixedIntegerKrigingModel,
)
from smt.problems import Sphere, HierarchicalGoldstein, HierarchicalNeuralNetwork
from smt.utils.design_space import (
DesignSpace,
FloatVariable,
IntegerVariable,
OrdinalVariable,
CategoricalVariable,
)
from smt.sampling_methods import LHS
from smt.surrogate_models import (
KRG,
KPLS,
QP,
MixIntKernelType,
MixHrcKernelType,
)
class TestMixedInteger(unittest.TestCase):
def test_krg_mixed_3D(self):
design_space = DesignSpace(
[
FloatVariable(-10, 10),
CategoricalVariable(["blue", "red", "green"]),
IntegerVariable(-10, 10),
]
)
mixint = MixedIntegerContext(design_space)
sm = mixint.build_kriging_model(KRG(print_prediction=False))
sampling = mixint.build_sampling_method(LHS, criterion="m")
fun = Sphere(ndim=3)
xt = sampling(20)
yt = fun(xt)
sm.set_training_values(xt, yt)
sm.train()
eq_check = True
for i in range(xt.shape[0]):
if abs(float(xt[i, :][2]) - int(float(xt[i, :][2]))) > 10e-8:
eq_check = False
if not (xt[i, :][1] == 0 or xt[i, :][1] == 1 or xt[i, :][1] == 2):
eq_check = False
self.assertTrue(eq_check)
def test_krg_mixed_3D_bad_regr(self):
design_space = DesignSpace(
[
FloatVariable(-10, 10),
CategoricalVariable(["blue", "red", "green"]),
IntegerVariable(-10, 10),
]
)
mixint = MixedIntegerContext(design_space)
with self.assertRaises(ValueError):
sm = mixint.build_kriging_model(KRG(print_prediction=False, poly="linear"))
def test_qp_mixed_2D_INT(self):
design_space = DesignSpace(
[
FloatVariable(-10, 10),
IntegerVariable(-10, 10),
]
)
mixint = MixedIntegerContext(design_space)
sm = mixint.build_surrogate_model(QP(print_prediction=False))
sampling = mixint.build_sampling_method(LHS, criterion="m")
fun = Sphere(ndim=2)
xt = sampling(10)
yt = fun(xt)
sm.set_training_values(xt, yt)
sm.train()
eq_check = True
for i in range(xt.shape[0]):
if abs(float(xt[i, :][1]) - int(float(xt[i, :][1]))) > 10e-8:
eq_check = False
self.assertTrue(eq_check)
def test_compute_unfolded_dimension(self):
design_space = DesignSpace(
[
FloatVariable(0, 1),
CategoricalVariable(["A", "B"]),
]
)
assert design_space.get_unfolded_num_bounds().shape[0] == 3
def test_unfold_with_enum_mask(self):
x = np.array([[1.5, 1], [1.5, 0], [1.5, 1]])
expected = [[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]]
design_space = DesignSpace(
[
FloatVariable(1, 2),
CategoricalVariable(["A", "B"]),
]
)
x_unfolded, _ = design_space.unfold_x(x)
self.assertListEqual(expected, x_unfolded.tolist())
def test_unfold_with_enum_mask_with_enum_first(self):
x = np.array([[1, 1.5], [0, 1.5], [1, 1.5]])
expected = [[0, 1, 1.5], [1, 0, 1.5], [0, 1, 1.5]]
design_space = DesignSpace(
[
CategoricalVariable(["A", "B"]),
FloatVariable(1, 2),
]
)
x_unfolded, _ = design_space.unfold_x(x)
self.assertListEqual(expected, x_unfolded.tolist())
def test_fold_with_enum_index(self):
x = np.array([[1.5, 0, 1], [1.5, 1, 0], [1.5, 0, 1]])
expected = [[1.5, 1], [1.5, 0], [1.5, 1]]
design_space = DesignSpace(
[
FloatVariable(1, 2),
CategoricalVariable(["A", "B"]),
]
)
x_folded, _ = design_space.fold_x(x)
self.assertListEqual(expected, x_folded.tolist())
def test_fold_with_enum_index_with_list(self):
expected = [[1.5, 1]]
x = np.array([1.5, 0, 1])
design_space = DesignSpace(
[
FloatVariable(1, 2),
CategoricalVariable(["A", "B"]),
]
)
x_folded, _ = design_space.fold_x(x)
self.assertListEqual(expected, x_folded.tolist())
x = [1.5, 0, 1]
x_folded, _ = design_space.fold_x(x)
self.assertListEqual(expected, x_folded.tolist())
def test_cast_to_enum_value(self):
design_space = DesignSpace(
[
FloatVariable(0, 4),
CategoricalVariable(["blue", "red"]),
]
)
x = np.zeros((5, 2))
x[:, 1] = [1, 1, 0, 1, 0]
decoded = design_space.decode_values(x, i_dv=1)
expected = ["red", "red", "blue", "red", "blue"]
self.assertListEqual(expected, decoded)
def test_unfolded_xlimits_type(self):
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["2", "3"]),
CategoricalVariable(["4", "5"]),
IntegerVariable(0, 2),
]
)
sampling = MixedIntegerSamplingMethod(LHS, design_space, criterion="ese")
doe = sampling(10)
self.assertEqual((10, 4), doe.shape)
def test_unfold_xlimits_with_continuous_limits(self):
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red"]),
CategoricalVariable(["short", "medium", "long"]),
IntegerVariable(0, 2),
]
)
unfolded_limits = design_space.get_unfolded_num_bounds()
self.assertEqual(
np.array_equal(
[[-5, 5], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 2]],
unfolded_limits,
),
True,
)
def test_unfold_xlimits_with_continuous_limits_and_ordinal_values(self):
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red"]),
CategoricalVariable(["short", "medium", "long"]),
OrdinalVariable(["0", "3", "4"]),
]
)
unfolded_limits = design_space.get_unfolded_num_bounds()
self.assertEqual(
np.array_equal(
[[-5, 5], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 2]],
unfolded_limits,
),
True,
)
def test_cast_to_discrete_values(self):
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red"]),
CategoricalVariable(["short", "medium", "long"]),
IntegerVariable(0, 4),
]
)
x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 3.1]])
self.assertEqual(
np.array_equal(
np.array([[2.6, 0, 1, 0, 0, 1, 3]]),
design_space.correct_get_acting(x)[0],
),
True,
)
def test_cast_to_discrete_values_with_smooth_rounding_ordinal_values(self):
x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 1.1]])
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red"]),
CategoricalVariable(["short", "medium", "long"]),
OrdinalVariable(["0", "2", "4"]),
]
)
self.assertEqual(
np.array_equal(
np.array([[2.6, 0, 1, 0, 0, 1, 1]]),
design_space.correct_get_acting(x)[0],
),
True,
)
def test_cast_to_discrete_values_with_hard_rounding_ordinal_values(self):
x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 0.9]])
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red"]),
CategoricalVariable(["short", "medium", "long"]),
OrdinalVariable(["0", "4"]),
]
)
self.assertEqual(
np.array_equal(
np.array([[2.6, 0, 1, 0, 0, 1, 1]]),
design_space.correct_get_acting(x)[0],
),
True,
)
def test_cast_to_discrete_values_with_non_integer_ordinal_values(self):
x = np.array([[2.6, 0.3, 0.5, 0.25, 0.45, 0.85, 0.8]])
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["blue", "red"]),
CategoricalVariable(["short", "medium", "long"]),
OrdinalVariable(["0", "3.5"]),
]
)
self.assertEqual(
np.array_equal(
np.array([[2.6, 0, 1, 0, 0, 1, 1]]),
design_space.correct_get_acting(x)[0],
),
True,
)
def test_examples(self):
self.run_mixed_integer_lhs_example()
self.run_mixed_integer_qp_example()
self.run_mixed_integer_context_example()
self.run_hierarchical_variables_Goldstein()
self.run_mixed_discrete_design_space_example()
self.run_hierarchical_design_space_example()
def run_mixed_integer_lhs_example(self):
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from smt.utils.design_space import (
DesignSpace,
FloatVariable,
CategoricalVariable,
)
float_var = FloatVariable(0, 4)
cat_var = CategoricalVariable(["blue", "red"])
design_space = DesignSpace(
[
float_var,
cat_var,
]
)
num = 40
x, x_is_acting = design_space.sample_valid_x(num)
cmap = colors.ListedColormap(cat_var.values)
plt.scatter(x[:, 0], np.zeros(num), c=x[:, 1], cmap=cmap)
plt.show()
def run_mixed_integer_qp_example(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import QP
from smt.applications.mixed_integer import MixedIntegerSurrogateModel
from smt.utils.design_space import DesignSpace, IntegerVariable
xt = np.array([0.0, 1.0, 2.0, 3.0, 4.0])
yt = np.array([0.0, 1.0, 1.5, 0.5, 1.0])
# Specify the design space using the DesignSpace
# class and various available variable types
design_space = DesignSpace(
[
IntegerVariable(0, 4),
]
)
sm = MixedIntegerSurrogateModel(design_space=design_space, surrogate=QP())
sm.set_training_values(xt, yt)
sm.train()
num = 100
x = np.linspace(0.0, 4.0, num)
y = sm.predict_values(x)
plt.plot(xt, yt, "o")
plt.plot(x, y)
plt.xlabel("x")
plt.ylabel("y")
plt.legend(["Training data", "Prediction"])
plt.show()
def run_mixed_integer_context_example(self):
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG
from smt.applications.mixed_integer import MixedIntegerContext
from smt.utils.design_space import (
DesignSpace,
FloatVariable,
IntegerVariable,
CategoricalVariable,
)
design_space = DesignSpace(
[
IntegerVariable(0, 5),
FloatVariable(0.0, 4.0),
CategoricalVariable(["blue", "red", "green", "yellow"]),
]
)
def ftest(x):
return (x[:, 0] * x[:, 0] + x[:, 1] * x[:, 1]) * (x[:, 2] + 1)
# Helper class for creating surrogate models
mi_context = MixedIntegerContext(design_space)
# DOE for training
sampler = mi_context.build_sampling_method()
num = mi_context.get_unfolded_dimension() * 5
print("DOE point nb = {}".format(num))
xt = sampler(num)
yt = ftest(xt)
# Surrogate
sm = mi_context.build_kriging_model(KRG())
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
xv = sampler(50)
yv = ftest(xv)
yp = sm.predict_values(xv)
plt.plot(yv, yv)
plt.plot(yv, yp, "o")
plt.xlabel("actual")
plt.ylabel("prediction")
plt.show()
def test_hierarchical_variables_Goldstein(self):
problem = HierarchicalGoldstein()
ds = problem.design_space
self.assertIsInstance(ds, DesignSpace)
self.assertEqual(ds.n_dv, 11)
x = np.array(
[
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
)
y = problem(x)
self.assertTrue(
np.linalg.norm(
y - np.array([50.75285716, 56.62074043, 50.97693309, 56.29235443])
)
< 1e-8
)
self.assertTrue(
np.linalg.norm(
problem.eval_is_acting.astype(int)
- np.array(
[
[1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1],
]
)
)
< 1e-8
)
self.assertTrue(
np.linalg.norm(
problem.eval_x
- np.array(
[
[0, 1, 1, 1, 50, 50, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 50, 1, 0, 1, 1, 1],
[2, 1, 1, 1, 50, 1, 1, 1, 0, 1, 1],
[3, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1],
]
)
)
< 1e-8
)
self.assertTrue(np.linalg.norm(y - problem(problem.eval_x)) < 1e-8)
n_doe = 15
ds.seed = 42
Xt, is_acting = ds.sample_valid_x(n_doe)
Yt = problem(Xt)
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=ds,
categorical_kernel=MixIntKernelType.HOMO_HSPHERE,
hierarchical_kernel=MixHrcKernelType.ARC_KERNEL,
theta0=[1e-2],
corr="abs_exp",
n_start=10,
),
)
sm.set_training_values(Xt, Yt, is_acting=is_acting)
sm.train()
y_s = sm.predict_values(Xt)[:, 0]
pred_RMSE = np.linalg.norm(y_s - Yt) / len(Yt)
y_sv = sm.predict_variances(Xt)[:, 0]
var_RMSE = np.linalg.norm(y_sv) / len(Yt)
self.assertTrue(pred_RMSE < 1e-7)
print("Pred_RMSE", pred_RMSE)
self.assertTrue(var_RMSE < 1e-7)
self.assertTrue(
np.linalg.norm(
sm.predict_values(
np.array(
[
[0.0, 1.0, 64.0, 4.0, 56.0, 37.0, 35.0, 1.0, 2.0, 1.0, 1.0],
[1.0, 0.0, 31.0, 92.0, 24.0, 3.0, 17.0, 1.0, 2.0, 1.0, 1.0],
[2.0, 1.0, 28.0, 60.0, 77.0, 66.0, 9.0, 0.0, 1.0, 1.0, 1.0],
[
3.0,
1.0,
50.0,
40.0,
99.0,
35.0,
51.0,
2.0,
1.0,
1.0,
2.0,
],
]
)
)[:, 0]
- sm.predict_values(
np.array(
[
[0.0, 1.0, 64.0, 4.0, 6.0, 7.0, 35.0, 1.0, 2.0, 1.0, 1.0],
[
1.0,
0.0,
31.0,
92.0,
24.0,
30.0,
17.0,
0.0,
2.0,
1.0,
1.0,
],
[2.0, 1.0, 28.0, 60.0, 7.0, 66.0, 9.0, 0.0, 2.0, 1.0, 1.0],
[
3.0,
1.0,
50.0,
40.0,
99.0,
35.0,
51.0,
0.0,
0.0,
1.0,
2.0,
],
]
)
)[:, 0]
)
< 1e-8
)
self.assertTrue(
np.linalg.norm(
sm.predict_values(
np.array(
[[1.0, 0.0, 31.0, 92.0, 24.0, 3.0, 17.0, 1.0, 2.0, 1.0, 1.0]]
)
)
- sm.predict_values(
np.array(
[[1.0, 0.0, 31.0, 92.0, 24.0, 3.0, 17.0, 1.0, 1.0, 1.0, 1.0]]
)
)
)
> 1e-8
)
def run_mixed_discrete_design_space_example(self):
import numpy as np
from smt.utils.design_space import (
DesignSpace,
FloatVariable,
IntegerVariable,
OrdinalVariable,
CategoricalVariable,
)
ds = DesignSpace(
[
CategoricalVariable(
["A", "B"]
), # x0 categorical: A or B; order is not relevant
OrdinalVariable(
["C", "D", "E"]
), # x1 ordinal: C, D or E; order is relevant
IntegerVariable(
0, 2
), # x2 integer between 0 and 2 (inclusive): 0, 1, 2
FloatVariable(0, 1), # c3 continuous between 0 and 1
]
)
# Sample the design space
# Note: is_acting_sampled specifies for each design variable whether it is acting or not
x_sampled, is_acting_sampled = ds.sample_valid_x(100)
# Correct design vectors: round discrete variables, correct hierarchical variables
x_corr, is_acting = ds.correct_get_acting(
np.array(
[
[0, 0, 2, 0.25],
[0, 2, 1, 0.75],
]
)
)
print(is_acting)
def run_hierarchical_design_space_example(self):
import numpy as np
from smt.utils.design_space import (
DesignSpace,
FloatVariable,
IntegerVariable,
OrdinalVariable,
CategoricalVariable,
)
ds = DesignSpace(
[
CategoricalVariable(
["A", "B"]
), # x0 categorical: A or B; order is not relevant
OrdinalVariable(
["C", "D", "E"]
), # x1 ordinal: C, D or E; order is relevant
IntegerVariable(
0, 2
), # x2 integer between 0 and 2 (inclusive): 0, 1, 2
FloatVariable(0, 1), # c3 continuous between 0 and 1
]
)
# Declare that x1 is acting if x0 == A
ds.declare_decreed_var(decreed_var=1, meta_var=0, meta_value="A")
# Sample the design space
# Note: is_acting_sampled specifies for each design variable whether it is acting or not
x_sampled, is_acting_sampled = ds.sample_valid_x(100)
# Correct design vectors: round discrete variables, correct hierarchical variables
x_corr, is_acting = ds.correct_get_acting(
np.array(
[
[0, 0, 2, 0.25],
[1, 2, 1, 0.66],
]
)
)
# Observe the hierarchical behavior:
assert np.all(
is_acting
== np.array(
[
[True, True, True, True],
[True, False, True, True], # x1 is not acting if x0 != A
]
)
)
assert np.all(
x_corr
== np.array(
[
[0, 0, 2, 0.25],
# x1 is not acting, so it is corrected ("imputed") to its non-acting value (0 for discrete vars)
[1, 0, 1, 0.66],
]
)
)
def run_hierarchical_variables_Goldstein(self):
import numpy as np
from smt.utils.design_space import (
DesignSpace,
CategoricalVariable,
IntegerVariable,
FloatVariable,
)
from smt.applications.mixed_integer import MixedIntegerKrigingModel
from smt.surrogate_models import MixIntKernelType, MixHrcKernelType, KRG
def f_hv(X):
import numpy as np
def H(x1, x2, x3, x4, z3, z4, x5, cos_term):
import numpy as np
h = (
53.3108
+ 0.184901 * x1
- 5.02914 * x1**3 * 10 ** (-6)
+ 7.72522 * x1**z3 * 10 ** (-8)
- 0.0870775 * x2
- 0.106959 * x3
+ 7.98772 * x3**z4 * 10 ** (-6)
+ 0.00242482 * x4
+ 1.32851 * x4**3 * 10 ** (-6)
- 0.00146393 * x1 * x2
- 0.00301588 * x1 * x3
- 0.00272291 * x1 * x4
+ 0.0017004 * x2 * x3
+ 0.0038428 * x2 * x4
- 0.000198969 * x3 * x4
+ 1.86025 * x1 * x2 * x3 * 10 ** (-5)
- 1.88719 * x1 * x2 * x4 * 10 ** (-6)
+ 2.50923 * x1 * x3 * x4 * 10 ** (-5)
- 5.62199 * x2 * x3 * x4 * 10 ** (-5)
)
if cos_term:
h += 5.0 * np.cos(2.0 * np.pi * (x5 / 100.0)) - 2.0
return h
def f1(x1, x2, z1, z2, z3, z4, x5, cos_term):
c1 = z2 == 0
c2 = z2 == 1
c3 = z2 == 2
c4 = z3 == 0
c5 = z3 == 1
c6 = z3 == 2
y = (
c4
* (
c1 * H(x1, x2, 20, 20, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, 20, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, 20, z3, z4, x5, cos_term)
)
+ c5
* (
c1 * H(x1, x2, 20, 50, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, 50, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, 50, z3, z4, x5, cos_term)
)
+ c6
* (
c1 * H(x1, x2, 20, 80, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, 80, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, 80, z3, z4, x5, cos_term)
)
)
return y
def f2(x1, x2, x3, z2, z3, z4, x5, cos_term):
c1 = z2 == 0
c2 = z2 == 1
c3 = z2 == 2
y = (
c1 * H(x1, x2, x3, 20, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, x3, 50, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, x3, 80, z3, z4, x5, cos_term)
)
return y
def f3(x1, x2, x4, z1, z3, z4, x5, cos_term):
c1 = z1 == 0
c2 = z1 == 1
c3 = z1 == 2
y = (
c1 * H(x1, x2, 20, x4, z3, z4, x5, cos_term)
+ c2 * H(x1, x2, 50, x4, z3, z4, x5, cos_term)
+ c3 * H(x1, x2, 80, x4, z3, z4, x5, cos_term)
)
return y
y = []
for x in X:
if x[0] == 0:
y.append(
f1(x[2], x[3], x[7], x[8], x[9], x[10], x[6], cos_term=x[1])
)
elif x[0] == 1:
y.append(
f2(x[2], x[3], x[4], x[8], x[9], x[10], x[6], cos_term=x[1])
)
elif x[0] == 2:
y.append(
f3(x[2], x[3], x[5], x[7], x[9], x[10], x[6], cos_term=x[1])
)
elif x[0] == 3:
y.append(
H(x[2], x[3], x[4], x[5], x[9], x[10], x[6], cos_term=x[1])
)
return np.array(y)
design_space = DesignSpace(
[
CategoricalVariable(values=[0, 1, 2, 3]), # meta
IntegerVariable(0, 1), # x1
FloatVariable(0, 100), # x2
FloatVariable(0, 100),
FloatVariable(0, 100),
FloatVariable(0, 100),
FloatVariable(0, 100),
IntegerVariable(0, 2), # x7
IntegerVariable(0, 2),
IntegerVariable(0, 2),
IntegerVariable(0, 2),
]
)
# x4 is acting if meta == 1, 3
design_space.declare_decreed_var(decreed_var=4, meta_var=0, meta_value=[1, 3])
# x5 is acting if meta == 2, 3
design_space.declare_decreed_var(decreed_var=5, meta_var=0, meta_value=[2, 3])
# x7 is acting if meta == 0, 2
design_space.declare_decreed_var(decreed_var=7, meta_var=0, meta_value=[0, 2])
# x8 is acting if meta == 0, 1
design_space.declare_decreed_var(decreed_var=8, meta_var=0, meta_value=[0, 1])
# Sample from the design spaces, correctly considering hierarchy
n_doe = 15
Xt, Xt_is_acting = design_space.sample_valid_x(n_doe)
Yt = f_hv(Xt)
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=design_space,
categorical_kernel=MixIntKernelType.HOMO_HSPHERE,
hierarchical_kernel=MixHrcKernelType.ALG_KERNEL, # ALG or ARC
theta0=[1e-2],
corr="abs_exp",
n_start=5,
),
)
sm.set_training_values(Xt, Yt, is_acting=Xt_is_acting)
sm.train()
y_s = sm.predict_values(Xt)[:, 0]
pred_RMSE = np.linalg.norm(y_s - Yt) / len(Yt)
y_sv = sm.predict_variances(Xt)[:, 0]
var_RMSE = np.linalg.norm(y_sv) / len(Yt)
def test_hierarchical_variables_NN(self):
problem = HierarchicalNeuralNetwork()
ds = problem.design_space
self.assertEqual(ds.n_dv, 8)
n_doe = 100
ds_sampling = DesignSpace(ds.design_variables[1:])
sampling = MixedIntegerSamplingMethod(
LHS, ds_sampling, criterion="ese", random_state=42
)
x_cont = sampling(3 * n_doe)
xdoe1 = np.zeros((n_doe, 8))
x_cont2 = x_cont[:n_doe, :5]
xdoe1[:, 0] = np.zeros(n_doe)
xdoe1[:, 1:6] = x_cont2
ydoe1 = problem(xdoe1)
xdoe1 = np.zeros((n_doe, 8))
xdoe1[:, 0] = np.zeros(n_doe)
xdoe1[:, 1:6] = x_cont2
xdoe2 = np.zeros((n_doe, 8))
x_cont2 = x_cont[n_doe : 2 * n_doe, :6]
xdoe2[:, 0] = np.ones(n_doe)
xdoe2[:, 1:7] = x_cont2
ydoe2 = problem(xdoe2)
xdoe2 = np.zeros((n_doe, 8))
xdoe2[:, 0] = np.ones(n_doe)
xdoe2[:, 1:7] = x_cont2
xdoe3 = np.zeros((n_doe, 8))
xdoe3[:, 0] = 2 * np.ones(n_doe)
xdoe3[:, 1:] = x_cont[2 * n_doe :, :]
ydoe3 = problem(xdoe3)
Xt = np.concatenate((xdoe1, xdoe2, xdoe3), axis=0)
Yt = np.concatenate((ydoe1, ydoe2, ydoe3), axis=0)
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=problem.design_space,
categorical_kernel=MixIntKernelType.HOMO_HSPHERE,
hierarchical_kernel=MixHrcKernelType.ALG_KERNEL,
theta0=[1e-2],
corr="abs_exp",
n_start=5,
),
)
sm.set_training_values(Xt, Yt)
sm.train()
y_s = sm.predict_values(Xt)[:, 0]
pred_RMSE = np.linalg.norm(y_s - Yt) / len(Yt)
y_sv = sm.predict_variances(Xt)[:, 0]
var_RMSE = np.linalg.norm(y_sv) / len(Yt)
self.assertTrue(pred_RMSE < 1e-7)
print("Pred_RMSE", pred_RMSE)
self.assertTrue(var_RMSE < 1e-7)
self.assertTrue(
np.linalg.norm(
sm.predict_values(
np.array(
[
[0, -1, -2, 8, 0, 2, 0, 0],
[1, -1, -2, 16, 1, 2, 1, 0],
[2, -1, -2, 32, 2, 2, 1, -2],
]
)
)[:, 0]
- sm.predict_values(
np.array(
[
[0, -1, -2, 8, 0, 2, 10, 10],
[1, -1, -2, 16, 1, 2, 1, 10],
[2, -1, -2, 32, 2, 2, 1, -2],
]
)
)[:, 0]
)
< 1e-8
)
self.assertTrue(
np.linalg.norm(
sm.predict_values(np.array([[0, -1, -2, 8, 0, 2, 0, 0]]))
- sm.predict_values(np.array([[0, -1, -2, 8, 0, 12, 10, 10]]))
)
> 1e-8
)
def test_mixed_gower_2D(self):
xt = np.array([[0, 5], [2, -1], [4, 0.5]])
yt = np.array([[0.0], [1.0], [1.5]])
design_space = DesignSpace(
[
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0", "4.0"]),
FloatVariable(-5, 5),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=design_space,
theta0=[1e-2],
corr="abs_exp",
categorical_kernel=MixIntKernelType.GOWER,
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
x = np.linspace(0, 4, 5)
x2 = np.linspace(-5, 5, 21)
x1 = []
for element in itertools.product(x, x2):
x1.append(np.array(element))
x_pred = np.array(x1)
y = sm.predict_values(x_pred)
yvar = sm.predict_variances(x_pred)
# prediction are correct on known points
self.assertAlmostEqual(y[20, 0], 0)
self.assertAlmostEqual(y[50, 0], 1)
self.assertAlmostEqual(y[95, 0], 1.5)
self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6)
self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6)
self.assertEqual(np.shape(y), (105, 1))
def test_mixed_CR_2D(self):
xt = np.array([[0, 5], [2, -1], [4, 0.5]])
yt = np.array([[0.0], [1.0], [1.5]])
design_space = DesignSpace(
[
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0", "4.0"]),
FloatVariable(-5, 5),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=design_space,
theta0=[1e-2],
corr="abs_exp",
categorical_kernel=MixIntKernelType.GOWER,
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
x = np.linspace(0, 4, 5)
x2 = np.linspace(-5, 5, 21)
x1 = []
for element in itertools.product(x, x2):
x1.append(np.array(element))
x_pred = np.array(x1)
y = sm.predict_values(x_pred)
yvar = sm.predict_variances(x_pred)
# prediction are correct on known points
self.assertAlmostEqual(y[20, 0], 0)
self.assertAlmostEqual(y[50, 0], 1)
self.assertAlmostEqual(y[95, 0], 1.5)
self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6)
self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6)
self.assertEqual(np.shape(y), (105, 1))
def test_mixed_CR_PLS_noisy_2D(self):
xt = np.array([[0, 5], [2, -1], [4, 0.5]])
yt = np.array([[0.0], [1.0], [1.5]])
design_space = DesignSpace(
[
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0", "4.0"]),
FloatVariable(-5, 5),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KPLS(
n_comp=1,
eval_noise=True,
design_space=design_space,
theta0=[1e-2],
categorical_kernel=MixIntKernelType.CONT_RELAX,
corr="abs_exp",
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
x = np.linspace(0, 4, 5)
x2 = np.linspace(-5, 5, 21)
x1 = []
for element in itertools.product(x, x2):
x1.append(np.array(element))
x_pred = np.array(x1)
y = sm.predict_values(x_pred)
yvar = sm.predict_variances(x_pred)
# prediction are correct on known points
self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6)
self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6)
self.assertEqual(np.shape(y), (105, 1))
def test_mixed_homo_gaussian_2D(self):
xt = np.array([[0, 5], [2, -1], [4, 0.5]])
yt = np.array([[0.0], [1.0], [1.5]])
design_space = DesignSpace(
[
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0", "4.0"]),
FloatVariable(-5, 5),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=design_space,
theta0=[1e-2],
corr="abs_exp",
categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE,
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
x = np.linspace(0, 4, 5)
x2 = np.linspace(-5, 5, 21)
x1 = []
for element in itertools.product(x, x2):
x1.append(np.array(element))
x_pred = np.array(x1)
y = sm.predict_values(x_pred)
yvar = sm.predict_variances(x_pred)
# prediction are correct on known points
self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6)
self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6)
self.assertEqual(np.shape(y), (105, 1))
def test_mixed_homo_hyp_2D(self):
xt = np.array([[0, 5], [2, -1], [4, 0.5]])
yt = np.array([[0.0], [1.0], [1.5]])
design_space = DesignSpace(
[
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0", "4.0"]),
FloatVariable(-5, 5),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=design_space,
theta0=[1e-2],
categorical_kernel=MixIntKernelType.HOMO_HSPHERE,
corr="abs_exp",
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
x = np.linspace(0, 4, 5)
x2 = np.linspace(-5, 5, 21)
x1 = []
for element in itertools.product(x, x2):
x1.append(np.array(element))
x_pred = np.array(x1)
y = sm.predict_values(x_pred)
yvar = sm.predict_variances(x_pred)
# prediction are correct on known points
self.assertTrue(np.abs(np.sum(np.array([y[20], y[50], y[95]]) - yt)) < 1e-6)
self.assertTrue(np.abs(np.sum(np.array([yvar[20], yvar[50], yvar[95]]))) < 1e-6)
self.assertEqual(np.shape(y), (105, 1))
def test_mixed_homo_gaussian_3D_PLS(self):
xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]])
yt = np.array([[0.0], [3], [1.0], [1.5]])
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0", "4.0"]),
FloatVariable(-5, 5),
]
)
# Surrogate
sm = surrogate = KPLS(
design_space=design_space,
theta0=[1e-2],
n_comp=1,
categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE,
cat_kernel_comps=[3],
corr="squar_exp",
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
x = np.linspace(0, 4, 5)
x2 = np.linspace(-5, 5, 21)
x1 = []
for element in itertools.product(x2, x, x2):
x1.append(np.array(element))
x_pred = np.array(x1)
i = 0
i += 1
y = sm.predict_values(x_pred)
yvar = sm.predict_variances(x_pred)
self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)))) < 1e-6)
self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)))) < 1e-6)
def test_mixed_homo_gaussian_3D_PLS_cate(self):
xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]])
yt = np.array([[0.0], [3], [1.0], [1.5]])
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0", "4.0"]),
FloatVariable(-5, 5),
]
)
# Surrogate
sm = KPLS(
design_space=design_space,
theta0=[1e-2],
n_comp=2,
corr="abs_exp",
cat_kernel_comps=[3],
categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE,
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
x = np.linspace(0, 4, 5)
x2 = np.linspace(-5, 5, 21)
x1 = []
for element in itertools.product(x2, x, x2):
x1.append(np.array(element))
x_pred = np.array(x1)
i = 0
i += 1
y = sm.predict_values(x_pred)
yvar = sm.predict_variances(x_pred)
self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)))) < 1e-6)
self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)))) < 1e-6)
def test_mixed_homo_hyp_3D_PLS_cate(self):
xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]])
yt = np.array([[0.0], [3], [1.0], [1.5]])
design_space = DesignSpace(
[
FloatVariable(-5, 5),
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0", "4.0"]),
FloatVariable(-5, 5),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KPLS(
design_space=design_space,
theta0=[1e-2],
n_comp=1,
categorical_kernel=MixIntKernelType.HOMO_HSPHERE,
cat_kernel_comps=[3],
corr="squar_exp",
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
x = np.linspace(0, 4, 5)
x2 = np.linspace(-5, 5, 21)
x1 = []
for element in itertools.product(x2, x, x2):
x1.append(np.array(element))
x_pred = np.array(x1)
i = 0
i += 1
y = sm.predict_values(x_pred)
yvar = sm.predict_variances(x_pred)
self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)))) < 1e-6)
self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)))) < 1e-6)
def test_mixed_homo_gaussian_3D_ord_cate(self):
xt = np.array([[0.5, 0, 5], [2, 3, 4], [5, 2, -1], [-2, 4, 0.5]])
yt = np.array([[0.0], [3], [1.0], [1.5]])
design_space = DesignSpace(
[
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0", "4.0"]),
FloatVariable(-5, 5),
CategoricalVariable(["0.0", "1.0", " 2.0", "3.0"]),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KPLS(
design_space=design_space,
theta0=[1e-2],
n_comp=1,
categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE,
cat_kernel_comps=[3, 2],
corr="squar_exp",
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
x = np.linspace(0, 4, 5)
x2 = np.linspace(-5, 5, 21)
x3 = np.linspace(0, 3, 4)
x1 = []
for element in itertools.product(x, x2, x3):
x1.append(np.array(element))
x_pred = np.array(x1)
y = sm.predict_values(x_pred)
yvar = sm.predict_variances(x_pred)
# prediction are correct on known points
self.assertTrue((np.abs(np.sum(np.array(sm.predict_values(xt) - yt)) < 1e-6)))
self.assertTrue((np.abs(np.sum(np.array(sm.predict_variances(xt) - 0)) < 1e-6)))
def test_mixed_gower_3D(self):
design_space = DesignSpace(
[
FloatVariable(-10, 10),
IntegerVariable(-10, 10),
IntegerVariable(-10, 10),
]
)
mixint = MixedIntegerContext(design_space)
sm = mixint.build_kriging_model(
KRG(categorical_kernel=MixIntKernelType.GOWER, print_prediction=False)
)
sampling = mixint.build_sampling_method(LHS, criterion="m")
fun = Sphere(ndim=3)
xt = sampling(10)
yt = fun(xt)
sm.set_training_values(xt, yt)
sm.train()
eq_check = True
for i in range(xt.shape[0]):
if abs(float(xt[i, :][1]) - int(float(xt[i, :][1]))) > 10e-8:
eq_check = False
self.assertTrue(eq_check)
def run_mixed_gower_example(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG, MixIntKernelType
from smt.applications.mixed_integer import MixedIntegerKrigingModel
from smt.utils.design_space import (
DesignSpace,
CategoricalVariable,
FloatVariable,
)
xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]])
xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]])
xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]])
xt = np.concatenate((xt1, xt2, xt3), axis=0)
xt[:, 1] = xt[:, 1].astype(np.float64)
yt1 = np.array([0.0, 9.0, 16.0])
yt2 = np.array([0.0, -4, -13.0])
yt3 = np.array([-10, 3, 11.0])
yt = np.concatenate((yt1, yt2, yt3), axis=0)
design_space = DesignSpace(
[
CategoricalVariable(["Blue", "Red", "Green"]),
FloatVariable(0, 4),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=design_space,
categorical_kernel=MixIntKernelType.GOWER,
theta0=[1e-1],
corr="squar_exp",
n_start=20,
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
n = 100
x_cat1 = []
x_cat2 = []
x_cat3 = []
for i in range(n):
x_cat1.append(0)
x_cat2.append(1)
x_cat3.append(2)
x_cont = np.linspace(0.0, 4.0, n)
x1 = np.concatenate(
(np.asarray(x_cat1).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1
)
x2 = np.concatenate(
(np.asarray(x_cat2).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1
)
x3 = np.concatenate(
(np.asarray(x_cat3).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1
)
y1 = sm.predict_values(x1)
y2 = sm.predict_values(x2)
y3 = sm.predict_values(x3)
# estimated variance
s2_1 = sm.predict_variances(x1)
s2_2 = sm.predict_variances(x2)
s2_3 = sm.predict_variances(x3)
fig, axs = plt.subplots(3, figsize=(8, 6))
axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None")
axs[0].plot(x_cont, y1, color="Blue")
axs[0].fill_between(
np.ravel(x_cont),
np.ravel(y1 - 3 * np.sqrt(s2_1)),
np.ravel(y1 + 3 * np.sqrt(s2_1)),
color="lightgrey",
)
axs[0].set_xlabel("x")
axs[0].set_ylabel("y")
axs[0].legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="upper left",
bbox_to_anchor=[0, 1],
)
axs[1].plot(
xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None"
)
axs[1].plot(x_cont, y2, color="Red")
axs[1].fill_between(
np.ravel(x_cont),
np.ravel(y2 - 3 * np.sqrt(s2_2)),
np.ravel(y2 + 3 * np.sqrt(s2_2)),
color="lightgrey",
)
axs[1].set_xlabel("x")
axs[1].set_ylabel("y")
axs[1].legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="upper left",
bbox_to_anchor=[0, 1],
)
axs[2].plot(
xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None"
)
axs[2].plot(x_cont, y3, color="Green")
axs[2].fill_between(
np.ravel(x_cont),
np.ravel(y3 - 3 * np.sqrt(s2_3)),
np.ravel(y3 + 3 * np.sqrt(s2_3)),
color="lightgrey",
)
axs[2].set_xlabel("x")
axs[2].set_ylabel("y")
axs[2].legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="upper left",
bbox_to_anchor=[0, 1],
)
plt.tight_layout()
plt.show()
def run_mixed_homo_gaussian_example(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG, MixIntKernelType
from smt.applications.mixed_integer import MixedIntegerKrigingModel
from smt.utils.design_space import (
DesignSpace,
CategoricalVariable,
FloatVariable,
)
xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]])
xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]])
xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]])
xt = np.concatenate((xt1, xt2, xt3), axis=0)
xt[:, 1] = xt[:, 1].astype(np.float64)
yt1 = np.array([0.0, 9.0, 16.0])
yt2 = np.array([0.0, -4, -13.0])
yt3 = np.array([-10, 3, 11.0])
yt = np.concatenate((yt1, yt2, yt3), axis=0)
design_space = DesignSpace(
[
CategoricalVariable(["Blue", "Red", "Green"]),
FloatVariable(0, 4),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=design_space,
theta0=[1e-1],
corr="squar_exp",
n_start=20,
categorical_kernel=MixIntKernelType.EXP_HOMO_HSPHERE,
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
n = 100
x_cat1 = []
x_cat2 = []
x_cat3 = []
for i in range(n):
x_cat1.append(0)
x_cat2.append(1)
x_cat3.append(2)
x_cont = np.linspace(0.0, 4.0, n)
x1 = np.concatenate(
(np.asarray(x_cat1).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1
)
x2 = np.concatenate(
(np.asarray(x_cat2).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1
)
x3 = np.concatenate(
(np.asarray(x_cat3).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1
)
y1 = sm.predict_values(x1)
y2 = sm.predict_values(x2)
y3 = sm.predict_values(x3)
# estimated variance
s2_1 = sm.predict_variances(x1)
s2_2 = sm.predict_variances(x2)
s2_3 = sm.predict_variances(x3)
fig, axs = plt.subplots(3, figsize=(8, 6))
axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None")
axs[0].plot(x_cont, y1, color="Blue")
axs[0].fill_between(
np.ravel(x_cont),
np.ravel(y1 - 3 * np.sqrt(s2_1)),
np.ravel(y1 + 3 * np.sqrt(s2_1)),
color="lightgrey",
)
axs[0].set_xlabel("x")
axs[0].set_ylabel("y")
axs[0].legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="upper left",
bbox_to_anchor=[0, 1],
)
axs[1].plot(
xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None"
)
axs[1].plot(x_cont, y2, color="Red")
axs[1].fill_between(
np.ravel(x_cont),
np.ravel(y2 - 3 * np.sqrt(s2_2)),
np.ravel(y2 + 3 * np.sqrt(s2_2)),
color="lightgrey",
)
axs[1].set_xlabel("x")
axs[1].set_ylabel("y")
axs[1].legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="upper left",
bbox_to_anchor=[0, 1],
)
axs[2].plot(
xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None"
)
axs[2].plot(x_cont, y3, color="Green")
axs[2].fill_between(
np.ravel(x_cont),
np.ravel(y3 - 3 * np.sqrt(s2_3)),
np.ravel(y3 + 3 * np.sqrt(s2_3)),
color="lightgrey",
)
axs[2].set_xlabel("x")
axs[2].set_ylabel("y")
axs[2].legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="upper left",
bbox_to_anchor=[0, 1],
)
plt.tight_layout()
plt.show()
def run_mixed_homo_hyp_example(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.surrogate_models import KRG, MixIntKernelType
from smt.applications.mixed_integer import MixedIntegerKrigingModel
from smt.utils.design_space import (
DesignSpace,
CategoricalVariable,
FloatVariable,
)
xt1 = np.array([[0, 0.0], [0, 2.0], [0, 4.0]])
xt2 = np.array([[1, 0.0], [1, 2.0], [1, 3.0]])
xt3 = np.array([[2, 1.0], [2, 2.0], [2, 4.0]])
xt = np.concatenate((xt1, xt2, xt3), axis=0)
xt[:, 1] = xt[:, 1].astype(np.float64)
yt1 = np.array([0.0, 9.0, 16.0])
yt2 = np.array([0.0, -4, -13.0])
yt3 = np.array([-10, 3, 11.0])
yt = np.concatenate((yt1, yt2, yt3), axis=0)
design_space = DesignSpace(
[
CategoricalVariable(["Blue", "Red", "Green"]),
FloatVariable(0, 4),
]
)
# Surrogate
sm = MixedIntegerKrigingModel(
surrogate=KRG(
design_space=design_space,
categorical_kernel=MixIntKernelType.HOMO_HSPHERE,
theta0=[1e-1],
corr="squar_exp",
n_start=20,
),
)
sm.set_training_values(xt, yt)
sm.train()
# DOE for validation
n = 100
x_cat1 = []
x_cat2 = []
x_cat3 = []
for i in range(n):
x_cat1.append(0)
x_cat2.append(1)
x_cat3.append(2)
x_cont = np.linspace(0.0, 4.0, n)
x1 = np.concatenate(
(np.asarray(x_cat1).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1
)
x2 = np.concatenate(
(np.asarray(x_cat2).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1
)
x3 = np.concatenate(
(np.asarray(x_cat3).reshape(-1, 1), x_cont.reshape(-1, 1)), axis=1
)
y1 = sm.predict_values(x1)
y2 = sm.predict_values(x2)
y3 = sm.predict_values(x3)
# estimated variance
s2_1 = sm.predict_variances(x1)
s2_2 = sm.predict_variances(x2)
s2_3 = sm.predict_variances(x3)
fig, axs = plt.subplots(3, figsize=(8, 6))
axs[0].plot(xt1[:, 1].astype(np.float64), yt1, "o", linestyle="None")
axs[0].plot(x_cont, y1, color="Blue")
axs[0].fill_between(
np.ravel(x_cont),
np.ravel(y1 - 3 * np.sqrt(s2_1)),
np.ravel(y1 + 3 * np.sqrt(s2_1)),
color="lightgrey",
)
axs[0].set_xlabel("x")
axs[0].set_ylabel("y")
axs[0].legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="upper left",
bbox_to_anchor=[0, 1],
)
axs[1].plot(
xt2[:, 1].astype(np.float64), yt2, marker="o", color="r", linestyle="None"
)
axs[1].plot(x_cont, y2, color="Red")
axs[1].fill_between(
np.ravel(x_cont),
np.ravel(y2 - 3 * np.sqrt(s2_2)),
np.ravel(y2 + 3 * np.sqrt(s2_2)),
color="lightgrey",
)
axs[1].set_xlabel("x")
axs[1].set_ylabel("y")
axs[1].legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="upper left",
bbox_to_anchor=[0, 1],
)
axs[2].plot(
xt3[:, 1].astype(np.float64), yt3, marker="o", color="r", linestyle="None"
)
axs[2].plot(x_cont, y3, color="Green")
axs[2].fill_between(
np.ravel(x_cont),
np.ravel(y3 - 3 * np.sqrt(s2_3)),
np.ravel(y3 + 3 * np.sqrt(s2_3)),
color="lightgrey",
)
axs[2].set_xlabel("x")
axs[2].set_ylabel("y")
axs[2].legend(
["Training data", "Prediction", "Confidence Interval 99%"],
loc="upper left",
bbox_to_anchor=[0, 1],
)
plt.tight_layout()
plt.show()
if __name__ == "__main__":
TestMixedInteger().run_mixed_integer_context_example()
unittest.main()
| 55,817 | 31.584939 | 116 | py |
smt | smt-master/smt/applications/tests/test_moe.py | """
Author: Remi Lafage <remi.lafage@onera.fr>
This package is distributed under New BSD license.
"""
import matplotlib
matplotlib.use("Agg")
import unittest
import numpy as np
from sys import argv
from smt.applications import MOE, MOESurrogateModel
from smt.utils.sm_test_case import SMTestCase
from smt.problems import Branin, LpNorm
from smt.sampling_methods import FullFactorial, LHS
from smt.utils.misc import compute_rms_error
from smt.surrogate_models import RMTB, RMTC
class TestMOE(SMTestCase):
"""
Test class
"""
plot = None
@staticmethod
def function_test_1d(x):
x = np.reshape(x, (-1,))
y = np.zeros(x.shape)
y[x < 0.4] = x[x < 0.4] ** 2
y[(x >= 0.4) & (x < 0.8)] = 3 * x[(x >= 0.4) & (x < 0.8)] + 1
y[x >= 0.8] = np.sin(10 * x[x >= 0.8])
return y.reshape((-1, 1))
# @unittest.skip('disabled')
def test_1d_50(self):
self.ndim = 1
self.nt = 50
self.ne = 50
np.random.seed(0)
xt = np.random.sample(self.nt).reshape((-1, 1))
yt = self.function_test_1d(xt)
moe = MOE(
smooth_recombination=True,
heaviside_optimization=True,
n_clusters=3,
xt=xt,
yt=yt,
)
moe.train()
# validation data
np.random.seed(1)
xe = np.random.sample(self.ne)
ye = self.function_test_1d(xe)
rms_error = compute_rms_error(moe, xe, ye)
self.assert_error(rms_error, 0.0, 3e-1)
self.assertRaises(RuntimeError, lambda: moe.predict_variances(xe))
if TestMOE.plot:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
y = moe.predict_values(xe)
plt.figure(1)
plt.plot(ye, ye, "-.")
plt.plot(ye, y, ".")
plt.xlabel(r"$y$ actual")
plt.ylabel(r"$y$ prediction")
plt.figure(2)
xv = np.linspace(0, 1, 100)
yv = self.function_test_1d(xv)
plt.plot(xv, yv, "-.")
plt.plot(xe, y, "o")
plt.show()
# @unittest.skip('disabled')
def test_1d_50_var(self):
self.ndim = 1
self.nt = 50
self.ne = 50
np.random.seed(0)
xt = np.random.sample(self.nt).reshape((-1, 1))
yt = self.function_test_1d(xt)
moe = MOE(
smooth_recombination=True,
heaviside_optimization=True,
n_clusters=3,
xt=xt,
yt=yt,
variances_support=True,
)
moe.train()
# validation data
np.random.seed(1)
xe = np.random.sample(self.ne)
ye = self.function_test_1d(xe)
rms_error = compute_rms_error(moe, xe, ye)
self.assert_error(rms_error, 0.0, 3e-1)
moe.predict_variances(xe)
moe_hard = MOE(
smooth_recombination=False,
heaviside_optimization=True,
n_clusters=3,
xt=xt,
yt=yt,
variances_support=True,
)
moe_hard.train()
moe_hard.predict_variances(xe)
if TestMOE.plot:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
y = moe.predict_values(xe)
plt.figure(1)
plt.plot(ye, ye, "-.")
plt.plot(ye, y, ".")
plt.xlabel(r"$y$ actual")
plt.ylabel(r"$y$ prediction")
plt.figure(2)
xv = np.linspace(0, 1, 100)
yv = self.function_test_1d(xv)
y = moe.predict_values(xv)
y_std = np.sqrt(moe.predict_variances(xv))
plt.plot(xv, yv, "--k", linewidth=1)
plt.plot(xv, y, "-b", linewidth=1)
plt.plot(xv, y + y_std, "--b", linewidth=1)
plt.plot(xv, y - y_std, "--b", linewidth=1)
plt.show()
# @unittest.skip('disabled')
def test_1d_50_surrogate_model(self):
self.ndim = 1
self.nt = 50
self.ne = 50
np.random.seed(0)
xt = np.random.sample(self.nt).reshape((-1, 1))
yt = self.function_test_1d(xt)
moe = MOESurrogateModel(
smooth_recombination=True,
heaviside_optimization=True,
n_clusters=3,
xt=xt,
yt=yt,
)
self.assertIsInstance(moe.moe, MOE)
moe.train()
self.assertFalse(moe.supports["variances"])
# validation data
np.random.seed(1)
xe = np.random.sample(self.ne)
ye = self.function_test_1d(xe)
rms_error = compute_rms_error(moe, xe, ye)
self.assert_error(rms_error, 0.0, 3e-1)
self.assertRaises(RuntimeError, lambda: moe.predict_variances(xe))
moe_var = MOESurrogateModel(
smooth_recombination=True,
heaviside_optimization=True,
n_clusters=3,
xt=xt,
yt=yt,
variances_support=True,
)
moe_var.train()
self.assertTrue(moe_var.supports["variances"])
moe_var.predict_variances(xe)
if TestMOE.plot:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
y = moe.predict_values(xe)
plt.figure(1)
plt.plot(ye, ye, "-.")
plt.plot(ye, y, ".")
plt.xlabel(r"$y$ actual")
plt.ylabel(r"$y$ prediction")
plt.figure(2)
xv = np.linspace(0, 1, 100)
yv = self.function_test_1d(xv)
plt.plot(xv, yv, "-.")
plt.plot(xe, y, "o")
plt.show()
# @unittest.skip('disabled')
def test_norm1_2d_200(self):
self.ndim = 2
self.nt = 200
self.ne = 200
prob = LpNorm(ndim=self.ndim)
# training data
sampling = FullFactorial(xlimits=prob.xlimits, clip=True)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
# mixture of experts
moe = MOE(smooth_recombination=False, n_clusters=5)
moe.set_training_values(xt, yt)
moe.train()
# validation data
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
rms_error = compute_rms_error(moe, xe, ye)
self.assert_error(rms_error, 0.0, 1e-1)
if TestMOE.plot:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
y = moe.predict_values(xe)
plt.figure(1)
plt.plot(ye, ye, "-.")
plt.plot(ye, y, ".")
plt.xlabel(r"$y$ actual")
plt.ylabel(r"$y$ prediction")
fig = plt.figure(2)
ax = fig.add_subplot(111, projection="3d")
ax.scatter(xt[:, 0], xt[:, 1], yt)
plt.title("L1 Norm")
plt.show()
# @unittest.skip('disabled for now as it blocks unexpectedly on travis linux')
def test_branin_2d_200(self):
self.ndim = 2
self.nt = 200
self.ne = 200
prob = Branin(ndim=self.ndim)
# training data
sampling = FullFactorial(xlimits=prob.xlimits, clip=True)
np.random.seed(0)
xt = sampling(self.nt)
yt = prob(xt)
# mixture of experts
moe = MOE(n_clusters=5)
moe.set_training_values(xt, yt)
moe.options["heaviside_optimization"] = True
moe.train()
# validation data
np.random.seed(1)
xe = sampling(self.ne)
ye = prob(xe)
rms_error = compute_rms_error(moe, xe, ye)
self.assert_error(rms_error, 0.0, 1e-1)
if TestMOE.plot:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
y = moe.analyse_results(x=xe, operation="predict_values")
plt.figure(1)
plt.plot(ye, ye, "-.")
plt.plot(ye, y, ".")
plt.xlabel(r"$y$ actual")
plt.ylabel(r"$y$ prediction")
fig = plt.figure(2)
ax = fig.add_subplot(111, projection="3d")
ax.scatter(xt[:, 0], xt[:, 1], yt)
plt.title("Branin function")
plt.show()
def test_enabled_expert_types(self):
moe = MOE(variances_support=True)
expected = ["KPLS", "KPLSK", "KRG"]
self.assertEqual(expected, sorted(moe.enabled_experts))
moe = MOE(derivatives_support=True)
expected = ["IDW", "KPLS", "KPLSK", "KRG", "LS", "QP", "RBF", "RMTB", "RMTC"]
self.assertEqual(expected, sorted(moe.enabled_experts))
moe = MOE(deny=["KRG", "RMTB"])
expected = ["IDW", "KPLS", "KPLSK", "LS", "QP", "RBF", "RMTC"]
self.assertEqual(expected, sorted(moe.enabled_experts))
moe = MOE(allow=["KRG", "RMTB"])
expected = ["KRG", "RMTB"]
self.assertEqual(expected, sorted(moe.enabled_experts))
moe = MOE(variances_support=True, allow=["KRG", "RMTB"])
expected = ["KRG"]
self.assertEqual(expected, sorted(moe.enabled_experts))
moe = MOE(derivatives_support=True, deny=["RBF", "IDW", "KPLSK"])
expected = ["KPLS", "KRG", "LS", "QP", "RMTB", "RMTC"]
self.assertEqual(expected, sorted(moe.enabled_experts))
def test_fix_moe_rmts_bug(self):
def myfunc(x):
return -0.5 * (
np.sin(40 * (x - 0.85) ** 4) * np.cos(2.5 * (x - 0.95))
+ 0.5 * (x - 0.9)
+ 1
)
nt1 = 11
nt2 = 15
ne = 101
# Training data
X1 = np.linspace(0.001, 0.3, nt1).reshape(nt1, 1)
X1 = np.concatenate((X1, np.array([[0.35]])), axis=0)
X2 = np.linspace(0.4, 1.0, nt2).reshape(nt2, 1)
xt = np.concatenate((X1, X2), axis=0)
yt = myfunc(xt)
moe = MOE(smooth_recombination=True, n_clusters=2, heaviside_optimization=True)
moe._surrogate_type = {"RMTB": RMTB, "RMTC": RMTC}
moe.set_training_values(xt, yt)
moe.train()
def test_fix_test_data_bug(self):
# MOE does not interpolate the first training point
def myfunc(x):
return ((x * 6 - 2) ** 2) * np.sin((x * 6 - 2) * 2)
# limits of teh design space
xlimits = np.array([[0.0, 1.0]])
# LHS DOE with ndoe points
ndoe = 6
# Construction of the DOE
sampling = LHS(xlimits=xlimits, criterion="m", random_state=0)
x1D = sampling(ndoe)
x1D = np.sort(x1D, axis=0)
# Compute the output
y1D = myfunc(x1D)
# test data
num = 50
xv1D = sampling(num)
xv1D = np.sort(xv1D, axis=0)
yv1D = myfunc(xv1D)
moe1D = MOE(n_clusters=1, xtest=xv1D, ytest=yv1D, allow=["KRG"])
moe1D.set_training_values(x1D, y1D)
moe1D.train()
# Check that moe1D is interpolating all training values
ypred = moe1D.predict_values(x1D)
self.assertTrue(np.allclose(y1D, ypred))
def test_bad_allow_value(self):
nt = 35
sampling = FullFactorial(xlimits=np.array([[0, 1]]), clip=True)
np.random.seed(0)
xt = sampling(nt)
yt = self.function_test_1d(xt)
moe = MOE(n_clusters=1, allow=["TOTO"])
moe.set_training_values(xt, yt)
with self.assertRaises(ValueError) as context:
moe.train()
self.assertEqual(
"List of experts is empty: check support, allow and deny options wrt "
"possible experts: ['KRG', 'KPLS', 'KPLSK', 'LS', 'QP', 'RBF', 'IDW', 'RMTB', 'RMTC']",
str(context.exception),
)
@staticmethod
def run_moe_example_1d():
import numpy as np
from smt.applications import MOE
from smt.sampling_methods import FullFactorial
import matplotlib.pyplot as plt
nt = 35
def function_test_1d(x):
import numpy as np # Note: only required by SMT doc testing toolchain
x = np.reshape(x, (-1,))
y = np.zeros(x.shape)
y[x < 0.4] = x[x < 0.4] ** 2
y[(x >= 0.4) & (x < 0.8)] = 3 * x[(x >= 0.4) & (x < 0.8)] + 1
y[x >= 0.8] = np.sin(10 * x[x >= 0.8])
return y.reshape((-1, 1))
x = np.linspace(0, 1, 100)
ytrue = function_test_1d(x)
# Training data
sampling = FullFactorial(xlimits=np.array([[0, 1]]), clip=True)
np.random.seed(0)
xt = sampling(nt)
yt = function_test_1d(xt)
# Mixture of experts
print("MOE Experts: ", MOE.AVAILABLE_EXPERTS)
# MOE1: Find the best surrogate model on the whole domain
moe1 = MOE(n_clusters=1)
print("MOE1 enabled experts: ", moe1.enabled_experts)
moe1.set_training_values(xt, yt)
moe1.train()
y_moe1 = moe1.predict_values(x)
# MOE2: Set nb of cluster with just KRG, LS and IDW surrogate models
moe2 = MOE(smooth_recombination=False, n_clusters=3, allow=["KRG", "LS", "IDW"])
print("MOE2 enabled experts: ", moe2.enabled_experts)
moe2.set_training_values(xt, yt)
moe2.train()
y_moe2 = moe2.predict_values(x)
fig, axs = plt.subplots(1)
axs.plot(x, ytrue, ".", color="black")
axs.plot(x, y_moe1)
axs.plot(x, y_moe2)
axs.set_xlabel("x")
axs.set_ylabel("y")
axs.legend(["Training data", "MOE 1 Prediction", "MOE 2 Prediction"])
plt.show()
@staticmethod
def run_moe_example_2d():
import numpy as np
from smt.applications import MOE
from smt.problems import LpNorm
from smt.sampling_methods import FullFactorial
import sklearn
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
ndim = 2
nt = 200
ne = 200
# Problem: L1 norm (dimension 2)
prob = LpNorm(ndim=ndim)
# Training data
sampling = FullFactorial(xlimits=prob.xlimits, clip=True)
np.random.seed(0)
xt = sampling(nt)
yt = prob(xt)
# Mixture of experts
print("MOE Experts: ", MOE.AVAILABLE_EXPERTS)
moe = MOE(smooth_recombination=True, n_clusters=5, deny=["RMTB", "KPLSK"])
print("Enabled Experts: ", moe.enabled_experts)
moe.set_training_values(xt, yt)
moe.train()
# Validation data
np.random.seed(1)
xe = sampling(ne)
ye = prob(xe)
# Prediction
y = moe.predict_values(xe)
fig = plt.figure(1)
fig.set_size_inches(12, 11)
# Cluster display
colors_ = list(colors.cnames.items())
GMM = moe.cluster
weight = GMM.weights_
mean = GMM.means_
if sklearn.__version__ < "0.20.0":
cov = GMM.covars_
else:
cov = GMM.covariances_
prob_ = moe._proba_cluster(xt)
sort = np.apply_along_axis(np.argmax, 1, prob_)
xlim = prob.xlimits
x0 = np.linspace(xlim[0, 0], xlim[0, 1], 20)
x1 = np.linspace(xlim[1, 0], xlim[1, 1], 20)
xv, yv = np.meshgrid(x0, x1)
x = np.array(list(zip(xv.reshape((-1,)), yv.reshape((-1,)))))
prob = moe._proba_cluster(x)
plt.subplot(221, projection="3d")
ax = plt.gca()
for i in range(len(sort)):
color = colors_[int(((len(colors_) - 1) / sort.max()) * sort[i])][0]
ax.scatter(xt[i][0], xt[i][1], yt[i], c=color)
plt.title("Clustered Samples")
plt.subplot(222, projection="3d")
ax = plt.gca()
for i in range(len(weight)):
color = colors_[int(((len(colors_) - 1) / len(weight)) * i)][0]
ax.plot_trisurf(
x[:, 0], x[:, 1], prob[:, i], alpha=0.4, linewidth=0, color=color
)
plt.title("Membership Probabilities")
plt.subplot(223)
for i in range(len(weight)):
color = colors_[int(((len(colors_) - 1) / len(weight)) * i)][0]
plt.tricontour(x[:, 0], x[:, 1], prob[:, i], 1, colors=color, linewidths=3)
plt.title("Cluster Map")
plt.subplot(224)
plt.plot(ye, ye, "-.")
plt.plot(ye, y, ".")
plt.xlabel("actual")
plt.ylabel("prediction")
plt.title("Predicted vs Actual")
plt.show()
if __name__ == "__main__":
if "--plot" in argv:
TestMOE.plot = True
argv.remove("--plot")
if "--example" in argv:
TestMOE.run_moe_example_1d()
exit()
unittest.main()
| 16,658 | 29.344262 | 99 | py |
smt | smt-master/doc/preprocess_test.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import os, sys
import inspect
import importlib
import contextlib
try:
from StringIO import StringIO
except:
from io import StringIO
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
def process_test(root, file_name, iline, line):
file_path = root + "/" + file_name
embed_num_indent = line.find(".. embed-test")
if line[:embed_num_indent] != " " * embed_num_indent:
return line
include_print_output = (
"embed-test-print" in line
or "embed-test-print-plot" in line
or "embed-test-print-plot" in line
)
include_plot_output = (
"embed-test-plot" in line
or "embed-test-print-plot" in line
or "embed-test-print-plot" in line
)
split_line = line.replace(" ", "").split(",")
if len(split_line) != 3 or len(split_line[0].split("::")) != 2:
raise Exception(
"Invalid format for embed-test in file {} line {}".format(
file_path, iline + 1
)
)
py_file_path = split_line[0].split("::")[1]
class_name = split_line[1]
method_name = split_line[2][:-1]
index = len(py_file_path.split("/")[-1])
py_root = py_file_path[:-index]
py_file_name = py_file_path[-index:]
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/" + py_root)
py_module = importlib.import_module(py_file_name[:-3])
obj = getattr(py_module, class_name)
method = getattr(obj, method_name)
method_lines = inspect.getsource(method).split("\n")
for imethod_line, method_line in enumerate(method_lines):
if "def" in method_line and method_name in method_line:
imethod_line += 1
break
method_lines = method_lines[imethod_line:]
first_line = method_lines[0]
py_num_indent = first_line.find(first_line.strip())
for imethod_line, method_line in enumerate(method_lines):
method_lines[imethod_line] = method_line[py_num_indent:]
replacement_lines = []
replacement_lines.append(" " * embed_num_indent + ".. code-block:: python\n")
replacement_lines.append("\n")
replacement_lines.extend(
[
" " * embed_num_indent + " " * 2 + method_line + "\n"
for method_line in method_lines
]
)
if include_print_output:
joined_method_lines = "\n".join(method_lines)
with stdoutIO() as s:
exec(joined_method_lines)
output_lines = s.getvalue().split("\n")
if len(output_lines) > 1:
replacement_lines.append(" " * embed_num_indent + "::\n")
replacement_lines.append("\n")
replacement_lines.extend(
[
" " * embed_num_indent + " " * 2 + output_line + "\n"
for output_line in output_lines
]
)
if include_plot_output:
joined_method_lines = "\n".join(method_lines)
plt.clf()
with stdoutIO() as s:
exec(joined_method_lines)
abs_plot_name = file_path[:-5] + ".png"
plt.savefig(abs_plot_name)
rel_plot_name = file_name[:-5] + ".png"
replacement_lines.append(
" " * embed_num_indent + ".. figure:: {}\n".format(rel_plot_name)
)
replacement_lines.append(" " * embed_num_indent + " :scale: 80 %\n")
replacement_lines.append(" " * embed_num_indent + " :align: center\n")
return replacement_lines
| 3,771 | 27.793893 | 81 | py |
smt | smt-master/doc/preprocess_options.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
def process_options(root, file_name, iline, line):
file_path = root + "/" + file_name
embed_num_indent = line.find(".. embed-options-table")
if "embed-options-table-surrogate_models" in line:
type_ = "surrogate_models"
elif "embed-options-table-problems" in line:
type_ = "problems"
elif "embed-options-table-sampling_methods" in line:
type_ = "sampling_methods"
else:
raise Exception("embed-options-table is an invalid name")
if line[:embed_num_indent] != " " * embed_num_indent:
return line
split_line = line.replace(" ", "").split("::")
if len(split_line) != 2:
raise Exception(
"Invalid format for embed-options-table in file {} line {}".format(
file_path, iline + 1
)
)
class_name = split_line[1]
exec("from smt.{} import {}".format(type_, class_name), globals())
exec("sm_class = {}".format(class_name), globals())
sm = sm_class()
options = sm.options
outputs = []
for option_name, option_data in options._declared_entries.items():
name = option_name
default = option_data["default"]
values = option_data["values"]
types = option_data["types"]
desc = option_data["desc"]
if types is not None:
if not isinstance(types, (tuple, list)):
types = (types,)
types = [type_.__name__ for type_ in types]
if values is not None:
if not isinstance(values, (tuple, list)):
values = (values,)
values = [value for value in values]
outputs.append([name, default, values, types, desc])
replacement_lines = []
replacement_lines.append(
" " * embed_num_indent + ".. list-table:: List of options\n"
)
replacement_lines.append(" " * embed_num_indent + " " * 2 + ":header-rows: 1\n")
replacement_lines.append(
" " * embed_num_indent + " " * 2 + ":widths: 15, 10, 20, 20, 30\n"
)
replacement_lines.append(" " * embed_num_indent + " " * 2 + ":stub-columns: 0\n")
replacement_lines.append("\n")
replacement_lines.append(" " * embed_num_indent + " " * 2 + "* - Option\n")
replacement_lines.append(" " * embed_num_indent + " " * 2 + " - Default\n")
replacement_lines.append(
" " * embed_num_indent + " " * 2 + " - Acceptable values\n"
)
replacement_lines.append(
" " * embed_num_indent + " " * 2 + " - Acceptable types\n"
)
replacement_lines.append(" " * embed_num_indent + " " * 2 + " - Description\n")
for output in outputs:
for entry in [output[0]]:
replacement_lines.append(
" " * embed_num_indent + " " * 2 + "* - %s\n" % entry
)
for entry in output[1:]:
replacement_lines.append(
" " * embed_num_indent + " " * 2 + " - %s\n" % entry
)
return replacement_lines
| 3,088 | 31.861702 | 86 | py |
smt | smt-master/doc/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SMT documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 6 19:36:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from smt import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"matplotlib.sphinxext.plot_directive",
"numpydoc",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "SMT"
copyright = "2017, John Hwang"
author = "John Hwang, Mohamed Amine Bouhlel, Remi Lafage"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Take the full version when no need to distinguish version and release
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "bizstyle"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"rightsidebar": False,
"sidebarwidth": 250,
"body_min_width": 1100,
"body_max_width": 1100,
}
html_logo = "smt_logo.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "SMTdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"SMT.tex",
"SMT Documentation",
"John Hwang, Mohamed Amine Bouhlel, Remi Lafage",
"manual",
)
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "smt", "SMT Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"SMT",
"SMT Documentation",
author,
"SMT",
"One line description of project.",
"Miscellaneous",
)
]
| 5,197 | 28.873563 | 79 | py |
smt | smt-master/doc/preprocess.py | """
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
"""
import os
from preprocess_test import process_test
from preprocess_options import process_options
def get_file_paths():
file_paths_list = []
file_paths_list_rstx = []
file_paths_list_py = []
for root, dirs, files in os.walk("."):
for file_name in files:
file_paths_list.append((root, file_name))
if file_name[-5:] == ".rstx":
file_paths_list_rstx.append((root, file_name))
if file_name[-3:] == ".py":
file_paths_list_py.append((root, file_name))
return file_paths_list, file_paths_list_rstx, file_paths_list_py
file_paths_list, file_paths_list_rstx, file_paths_list_py = get_file_paths()
for root, file_name in file_paths_list_rstx:
file_path = root + "/" + file_name
with open(file_path, "r") as f:
lines = f.readlines()
for iline, line in enumerate(lines):
if ".. embed-test" in line:
lines[iline] = process_test(root, file_name, iline, line)
elif ".. embed-options-table" in line:
lines[iline] = process_options(root, file_name, iline, line)
new_lines = []
for line in lines:
if isinstance(line, list):
new_lines.extend(line)
else:
new_lines.append(line)
new_file_path = file_path[:-5] + ".rst"
with open(new_file_path, "w") as f:
f.writelines(new_lines)
| 1,505 | 27.415094 | 76 | py |
smt | smt-master/doc/embed_directives/directive_embed_options.py | from sphinx_auto_embed.directive import Directive
class DirectiveEmbedOptions(Directive):
"""
Directive for embedding a table from an OptionsDictionary instance.
The 3 arguments are the module name, class name, and attribute name.
"""
NAME = "embed-options-table"
NUM_ARGS = 3
def run(self, file_dir, file_name, embed_num_indent, args):
module_path, class_name, attribute_name = args
exec("from {} import {}".format(module_path, class_name), globals())
exec("obj = {}()".format(class_name), globals())
options = getattr(obj, attribute_name)
outputs = []
for option_name, option_data in options._declared_entries.items():
name = option_name
default = option_data["default"]
values = option_data["values"]
types = option_data["types"]
desc = option_data["desc"]
if types is not None:
if not isinstance(types, (tuple, list)):
types = (types,)
types = [type_.__name__ for type_ in types]
if values is not None:
if not isinstance(values, (tuple, list)):
values = (values,)
values = [value for value in values]
outputs.append([name, default, values, types, desc])
lines = []
lines.append(" " * embed_num_indent + ".. list-table:: List of options\n")
lines.append(" " * embed_num_indent + " " * 2 + ":header-rows: 1\n")
lines.append(" " * embed_num_indent + " " * 2 + ":widths: 15, 10, 20, 20, 30\n")
lines.append(" " * embed_num_indent + " " * 2 + ":stub-columns: 0\n")
lines.append("\n")
lines.append(" " * embed_num_indent + " " * 2 + "* - Option\n")
lines.append(" " * embed_num_indent + " " * 2 + " - Default\n")
lines.append(" " * embed_num_indent + " " * 2 + " - Acceptable values\n")
lines.append(" " * embed_num_indent + " " * 2 + " - Acceptable types\n")
lines.append(" " * embed_num_indent + " " * 2 + " - Description\n")
for output in outputs:
for entry in [output[0]]:
lines.append(" " * embed_num_indent + " " * 2 + "* - %s\n" % entry)
for entry in output[1:]:
lines.append(" " * embed_num_indent + " " * 2 + " - %s\n" % entry)
return lines
| 2,419 | 37.412698 | 88 | py |
pyzor | pyzor-master/setup.py | import sys
import setuptools
import distutils.core
import pyzor
long_description = """
Pyzor is spam-blocking networked system that uses spam signatures
to identify them.
"""
classifiers = ["Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Environment :: Console",
"Environment :: No Input/Output (Daemon)",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 3",
"Intended Audience :: System Administrators",
"Topic :: Communications :: Email",
"Topic :: Communications :: Email :: Filters",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: GNU General Public License v2 ("
"GPLv2)",
]
distutils.core.setup(
name='pyzor',
version=pyzor.__version__,
description='networked spam-signature detection',
long_description=long_description,
author='Frank J. Tobin',
author_email='ftobin@neverending.org',
license='GPL',
platforms='POSIX',
keywords='spam',
url='http://www.pyzor.org/',
scripts=['scripts/pyzor', 'scripts/pyzord',
'scripts/pyzor-migrate'],
packages=['pyzor',
'pyzor.engines',
'pyzor.hacks'],
classifiers=classifiers,
test_suite="tests.suite",
)
| 1,532 | 28.480769 | 75 | py |
pyzor | pyzor-master/web/application.py | #! /usr/bin/env python
import os
import email
import socket
import logging
import smtplib
import datetime
import email.utils
import email.mime.base
import email.mime.text
import email.mime.multipart
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
import flask
from flask_wtf.form import Form
from flask.views import MethodView
from wtforms.fields.simple import TextField, SubmitField, TextAreaField
from flask_wtf.html5 import EmailField
from flask_wtf.file import FileField
from flask_wtf.recaptcha.fields import RecaptchaField
from wtforms.validators import required, length
try:
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
except ImportError:
pass
import pyzor
import pyzor.digest
import pyzor.client
MSG_TEMPLATE_TXT = """
Whitelist request:
- Date: %s
- Name: %%(name)s
- Email: %%(email)s
- Digest: %%(digest)s
- Request IP: %%(ip)s
===============
%%(comment)s
===============
Pyzor Version: %s
""" % (datetime.datetime.utcnow(), pyzor.__version__)
def load_configuration():
"""Load server-specific configuration settings."""
conf = ConfigParser.ConfigParser()
defaults = {
"captcha": {
"ssl": "False",
"public_key": "",
"private_key": "",
},
"email": {
"host": "localhost",
"port": "25",
"username": "",
"password": "",
"recipients": "",
"sender": "no-reply@%s" % socket.gethostname(),
},
"logging": {
"file": "/var/log/pyzor/web.log",
"level": "INFO",
"sentry": "",
"sentry_level": "WARNING",
}
}
# Load in default values.
for section, values in defaults.items():
conf.add_section(section)
for option, value in values.items():
conf.set(section, option, value)
if os.path.exists("/etc/pyzor/web.conf"):
# Overwrite with local values.
conf.read("/etc/pyzor/web.conf")
return conf
def setup_logging():
logger = app.logger
file_handler = logging.FileHandler(CONF.get("logging", "file"))
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s %(message)s'))
log_level = getattr(logging, CONF.get("logging", "level"))
logger.setLevel(log_level)
logger.addHandler(file_handler)
CONF = load_configuration()
SENTRY_DSN = CONF.get("logging", "sentry")
if SENTRY_DSN:
sentry_sdk.init(
dsn="https://b23e6ec6b4e14bdfb44dcfa5e9a8ba16@sentry.io/1207622",
integrations=[FlaskIntegration()]
)
setup_logging()
app = flask.Flask(__name__)
app.config.update({
"RECAPTCHA_USE_SSL": CONF.get("captcha", "ssl").lower() == "true",
"RECAPTCHA_PUBLIC_KEY": CONF.get("captcha", "public_key"),
"RECAPTCHA_PRIVATE_KEY": CONF.get("captcha", "private_key"),
})
class MessageForm(Form):
digest = TextField("Pyzor digest*", validators=[length(40, 40,
"Invalid Digest"),
required()])
message = FileField('Raw message*')
name = TextField('Name')
email = EmailField('Email')
comment = TextAreaField('Other details')
recaptcha = RecaptchaField()
submit = SubmitField()
def __init___(self, *args, **kwargs):
super(MessageForm, self).__init__(*args, **kwargs)
self.msg = None
self.raw_message = None
self.logger = app.logger
def validate(self):
if not Form.validate(self):
return False
self.raw_message = flask.request.files["message"].stream.read()
try:
digest = pyzor.digest.DataDigester(
email.message_from_string(self.raw_message)).value
if digest != self.digest.data:
self.add_error("digest", "Digest does not match message.")
return False
client = pyzor.client.Client(timeout=20)
try:
response = client.check(digest)
except pyzor.TimeoutError as e:
self.add_error("message", "Temporary error please try again.")
self.logger.warn("Timeout: %s", e)
return False
except pyzor.CommError as e:
self.add_error("message", "Temporary error please try again.")
self.logger.warn("Error: %s", e)
return False
if not response.is_ok():
self.add_error("message", "Temporary error please try again.")
self.logger.warn("Invalid response from server: %s", response)
return False
if int(response["Count"]) == 0:
self.add_error("message", "Message not reported as spam.")
return False
if int(response["WL-Count"]) != 0:
self.add_error("message", "Message is already whitelisted.")
return False
except AssertionError:
self.add_error("message", "Invalid message.")
return False
return True
def add_error(self, field, message):
try:
self.errors[field].append(message)
except (KeyError, TypeError):
self.errors[field] = [message]
class WhitelistMessage(MethodView):
def __init__(self):
self.form = MessageForm(flask.request.form, csrf_enabled=False)
self.logger = app.logger
def get(self):
return flask.render_template('whitelist.html', form=self.form,
error=None)
def post(self):
success = False
if self.form.validate():
msg = self.build_notification()
self.send_email(msg)
success = True
return flask.render_template('whitelist.html', form=self.form,
success=success)
def build_notification(self):
data = {"name": self.form.name.data,
"email": self.form.email.data,
"digest": self.form.digest.data,
"comment": self.form.comment.data,
"ip": flask.request.remote_addr}
msg = email.mime.multipart.MIMEMultipart()
msg["Date"] = email.utils.formatdate(localtime=True)
msg["Subject"] = "[Pyzor] Whitelist request"
msg["From"] = CONF.get("email", "sender")
msg["To"] = CONF.get("email", "recipients")
msg.preamble = "This is a multi-part message in MIME format."
msg.epilogue = ""
msg.attach(email.mime.text.MIMEText(MSG_TEMPLATE_TXT % data))
original_attachment = email.mime.base.MIMEBase("message", "rfc822")
original_attachment.add_header("Content-Disposition", "attachment")
original_attachment.set_payload(self.form.raw_message)
msg.attach(original_attachment)
return msg
def send_email(self, msg):
smtp = smtplib.SMTP(host=CONF.get("email", "host"),
port=CONF.get("email", "port"))
smtp.ehlo()
try:
code, err = smtp.mail(CONF.get("email", "sender"))
if code != 250:
raise smtplib.SMTPSenderRefused(code, err,
CONF.get("email", "sender"))
rcpterrs = {}
for rcpt in CONF.get("email", "recipients").split(","):
code, err = smtp.rcpt(rcpt)
if code not in (250, 251):
rcpterrs[rcpt] = (code, err)
if rcpterrs:
raise smtplib.SMTPRecipientsRefused(rcpterrs)
code, err = smtp.data(msg.as_string())
if code != 250:
raise smtplib.SMTPDataError(code, err)
finally:
try:
smtp.quit()
except smtplib.SMTPServerDisconnected:
pass
app.add_url_rule("/whitelist/", view_func=WhitelistMessage.as_view("whitelist"))
@app.errorhandler(500)
def unhandled_exception(error):
"""Generic error message."""
setup_logging()
app.logger.error("Unhandled Exception: %s", error, exc_info=True)
return flask.render_template('error.html', error=error)
if __name__ == '__main__':
app.debug = True
app.run()
| 8,333 | 31.940711 | 80 | py |
pyzor | pyzor-master/pyzor/message.py | """This modules contains the various messages used in the pyzor client server
communication.
"""
import random
import email.message
import pyzor
class Message(email.message.Message):
def __init__(self):
email.message.Message.__init__(self)
self.setup()
def setup(self):
pass
def init_for_sending(self):
self.ensure_complete()
def __str__(self):
# The parent class adds the unix From header.
return self.as_string()
def ensure_complete(self):
pass
class ThreadedMessage(Message):
def init_for_sending(self):
if 'Thread' not in self:
self.set_thread(ThreadId.generate())
assert 'Thread' in self
self["PV"] = str(pyzor.proto_version)
Message.init_for_sending(self)
def ensure_complete(self):
if 'PV' not in self or 'Thread' not in self:
raise pyzor.IncompleteMessageError("Doesn't have fields for a "
"ThreadedMessage.")
Message.ensure_complete(self)
def get_protocol_version(self):
return float(self['PV'])
def get_thread(self):
return ThreadId(self['Thread'])
def set_thread(self, i):
self['Thread'] = str(i)
class Response(ThreadedMessage):
ok_code = 200
def ensure_complete(self):
if 'Code' not in self or 'Diag' not in self:
raise pyzor.IncompleteMessageError("doesn't have fields for a "
"Response")
ThreadedMessage.ensure_complete(self)
def is_ok(self):
return self.get_code() == self.ok_code
def get_code(self):
return int(self['Code'])
def get_diag(self):
return self['Diag']
def head_tuple(self):
return self.get_code(), self.get_diag()
class Request(ThreadedMessage):
"""This is the class that should be used to read in Requests of any type.
Subclasses are responsible for setting 'Op' if they are generating a
message,"""
def get_op(self):
return self['Op']
def ensure_complete(self):
if 'Op' not in self:
raise pyzor.IncompleteMessageError("doesn't have fields for a "
"Request")
ThreadedMessage.ensure_complete(self)
class ClientSideRequest(Request):
op = None
def setup(self):
Request.setup(self)
self["Op"] = self.op
class SimpleDigestBasedRequest(ClientSideRequest):
def __init__(self, digest=None):
ClientSideRequest.__init__(self)
self.digest_count = 0
if digest:
self.add_digest(digest)
def add_digest(self, digest):
self.add_header("Op-Digest", digest)
self.digest_count += 1
class SimpleDigestSpecBasedRequest(SimpleDigestBasedRequest):
def __init__(self, digest=None, spec=None):
SimpleDigestBasedRequest.__init__(self, digest)
if spec:
flat_spec = [item for sublist in spec for item in sublist]
self["Op-Spec"] = ",".join(str(part) for part in flat_spec)
class PingRequest(ClientSideRequest):
op = "ping"
class PongRequest(SimpleDigestBasedRequest):
op = "pong"
class CheckRequest(SimpleDigestBasedRequest):
op = "check"
class InfoRequest(SimpleDigestBasedRequest):
op = "info"
class ReportRequest(SimpleDigestSpecBasedRequest):
op = "report"
class WhitelistRequest(SimpleDigestSpecBasedRequest):
op = "whitelist"
class ThreadId(int):
# (0, 1024) is reserved
full_range = (0, 2 ** 16)
ok_range = (1024, full_range[1])
error_value = 0
def __new__(cls, i):
self = int.__new__(cls, i)
if not (cls.full_range[0] <= self < cls.full_range[1]):
raise ValueError("value outside of range")
return self
@classmethod
def generate(cls):
return cls(random.randrange(*cls.ok_range))
def in_ok_range(self):
return self.ok_range[0] <= self < self.ok_range[1]
| 4,023 | 23.993789 | 77 | py |
pyzor | pyzor-master/pyzor/digest.py | """Handle digesting the messages."""
from __future__ import print_function
import re
import sys
import codecs
import hashlib
try:
import HTMLParser
except ImportError:
import html.parser as HTMLParser
# Hard-coded for the moment.
digest_spec = ([(20, 3), (60, 3)])
HASH = hashlib.sha1
HASH_SIZE = len(HASH(b"").hexdigest())
if sys.version_info[0] == 2:
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
class HTMLStripper(HTMLParser.HTMLParser):
"""Strip all tags from the HTML."""
def __init__(self, collector):
HTMLParser.HTMLParser.__init__(self)
self.reset()
self.collector = collector
self.collect = True
def handle_data(self, data):
"""Keep track of the data."""
data = data.strip()
if data and self.collect:
self.collector.append(data)
def handle_starttag(self, tag, attrs):
HTMLParser.HTMLParser.handle_starttag(self, tag, attrs)
if tag.lower() in ("script", "style"):
self.collect = False
def handle_endtag(self, tag):
HTMLParser.HTMLParser.handle_endtag(self, tag)
if tag.lower() in ("script", "style"):
self.collect = True
class DataDigester(object):
"""The major workhouse class."""
__slots__ = ['value', 'digest']
# Minimum line length for it to be included as part of the digest.
min_line_length = 8
# If a message is this many lines or less, then we digest the whole
# message.
atomic_num_lines = 4
# We're not going to try to match email addresses as per the spec
# because it's too difficult. Plus, regular expressions don't work well
# for them. (BNF is better at balanced parens and such).
email_ptrn = re.compile(r'\S+@\S+')
# Same goes for URLs.
url_ptrn = re.compile(r'[a-z]+:\S+', re.IGNORECASE)
# We also want to remove anything that is so long it looks like possibly
# a unique identifier.
longstr_ptrn = re.compile(r'\S{10,}')
ws_ptrn = re.compile(r'\s')
# String that the above patterns will be replaced with.
# Note that an empty string will always be used to remove whitespace.
unwanted_txt_repl = ''
def __init__(self, msg, spec=None):
if spec is None:
spec = digest_spec
self.value = None
self.digest = HASH()
# Need to know the total number of lines in the content.
lines = []
for payload in self.digest_payloads(msg):
for line in payload.splitlines():
norm = self.normalize(line)
if self.should_handle_line(norm):
try:
lines.append(norm.encode("utf8", "ignore"))
except UnicodeError:
continue
if len(lines) <= self.atomic_num_lines:
self.handle_atomic(lines)
else:
self.handle_pieced(lines, spec)
self.value = self.digest.hexdigest()
assert len(self.value) == HASH_SIZE
def handle_atomic(self, lines):
"""We digest everything."""
for line in lines:
self.handle_line(line)
def handle_pieced(self, lines, spec):
"""Digest stuff according to the spec."""
for offset, length in spec:
for i in range(length):
try:
line = lines[int(offset * len(lines) // 100) + i]
except IndexError:
pass
else:
self.handle_line(line)
def handle_line(self, line):
self.digest.update(line.rstrip())
@classmethod
def normalize(cls, s):
s = s.replace("\x00", "")
repl = cls.unwanted_txt_repl
s = cls.longstr_ptrn.sub(repl, s)
s = cls.email_ptrn.sub(repl, s)
s = cls.url_ptrn.sub(repl, s)
# Make sure we do the whitespace last because some of the previous
# patterns rely on whitespace.
return cls.ws_ptrn.sub('', s).strip()
@staticmethod
def normalize_html_part(s):
data = []
stripper = HTMLStripper(data)
try:
stripper.feed(s)
except (UnicodeDecodeError, HTMLParser.HTMLParseError):
# We can't parse the HTML, so just strip it. This is still
# better than including generic HTML/CSS text.
pass
return " ".join(data)
@classmethod
def should_handle_line(cls, s):
return len(s) and cls.min_line_length <= len(s)
@classmethod
def digest_payloads(cls, msg):
for part in msg.walk():
if part.get_content_maintype() == "text":
payload = part.get_payload(decode=True)
charset = part.get_content_charset()
if charset:
charset = charset.replace("\x00", "")
errors = "ignore"
if not charset:
charset = "ascii"
elif (charset.lower().replace("_", "-") in ("quopri-codec",
"quopri", "quoted-printable", "quotedprintable")):
errors = "strict"
try:
payload = payload.decode(charset, errors)
except (LookupError, UnicodeError, AssertionError):
try:
payload = payload.decode("ascii", "ignore")
except UnicodeError:
continue
if part.get_content_subtype() == "html":
yield cls.normalize_html_part(payload)
else:
yield payload
elif part.is_multipart():
# Skip, because walk() will give us the payload next.
pass
else:
# Non-text parts are passed through as-is.
yield part.get_payload()
class PrintingDataDigester(DataDigester):
"""Extends DataDigester: prints out what we're digesting."""
def handle_line(self, line):
print(line.decode("utf8"))
super(PrintingDataDigester, self).handle_line(line)
| 6,107 | 30.979058 | 76 | py |
pyzor | pyzor-master/pyzor/server.py | """Networked spam-signature detection server.
The server receives the request in the form of a RFC5321 message, and
responds with another RFC5321 message. Neither of these messages has a
body - all of the data is encapsulated in the headers.
The response headers will always include a "Code" header, which is a
HTTP-style response code, and a "Diag" header, which is a human-readable
message explaining the response code (typically this will be "OK").
Both the request and response headers always include a "PV" header, which
indicates the protocol version that is being used (in a major.minor format).
Both the requestion and response headers also always include a "Thread",
which uniquely identifies the request (this is a requirement of using UDP).
Responses to requests may arrive in any order, but the "Thread" header of
a response will always match the "Thread" header of the appropriate request.
Authenticated requests must also have "User", "Time" (timestamp), and "Sig"
(signature) headers.
"""
import os
import sys
import time
import errno
import socket
import signal
import logging
import threading
import traceback
import email.message
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
import pyzor.config
import pyzor.account
import pyzor.engines.common
import pyzor.hacks.py26
pyzor.hacks.py26.hack_all()
def _eintr_retry(func, *args):
"""restart a system call interrupted by EINTR"""
while True:
try:
return func(*args)
except OSError as e:
if e.args[0] != errno.EINTR:
raise
class Server(SocketServer.UDPServer):
"""The pyzord server. Handles incoming UDP connections in a single
thread and single process."""
max_packet_size = 8192
time_diff_allowance = 180
def __init__(self, address, database, passwd_fn, access_fn,
forwarder=None):
if ":" in address[0]:
Server.address_family = socket.AF_INET6
else:
Server.address_family = socket.AF_INET
self.log = logging.getLogger("pyzord")
self.usage_log = logging.getLogger("pyzord-usage")
self.database = database
self.one_step = getattr(self.database, "handles_one_step", False)
# Handle configuration files
self.passwd_fn = passwd_fn
self.access_fn = access_fn
self.accounts = {}
self.acl = {}
self.load_config()
self.forwarder = forwarder
self.log.debug("Listening on %s", address)
SocketServer.UDPServer.__init__(self, address, RequestHandler,
bind_and_activate=False)
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error) as e:
self.log.debug("Unable to set IPV6_V6ONLY to false %s", e)
self.server_bind()
self.server_activate()
# Finally, set signals
signal.signal(signal.SIGUSR1, self.reload_handler)
signal.signal(signal.SIGTERM, self.shutdown_handler)
def load_config(self):
"""Reads the configuration files and loads the accounts and ACLs."""
self.accounts = pyzor.config.load_passwd_file(self.passwd_fn)
self.acl = pyzor.config.load_access_file(self.access_fn, self.accounts)
def shutdown_handler(self, *args, **kwargs):
"""Handler for the SIGTERM signal. This should be used to kill the
daemon and ensure proper clean-up.
"""
self.log.info("SIGTERM received. Shutting down.")
t = threading.Thread(target=self.shutdown)
t.start()
def reload_handler(self, *args, **kwargs):
"""Handler for the SIGUSR1 signal. This should be used to reload
the configuration files.
"""
self.log.info("SIGUSR1 received. Reloading configuration.")
t = threading.Thread(target=self.load_config)
t.start()
def handle_error(self, request, client_address):
self.log.error("Error while processing request from: %s",
client_address, exc_info=True)
class PreForkServer(Server):
"""The same as Server, but prefork itself when starting the self, by
forking a number of child-processes.
The parent process will then wait for all his child process to complete.
"""
def __init__(self, address, database, passwd_fn, access_fn, prefork=4):
"""The same as Server.__init__ but requires a list of databases
instead of a single database connection.
"""
self.pids = None
Server.__init__(self, address, database, passwd_fn, access_fn)
self._prefork = prefork
def serve_forever(self, poll_interval=0.5):
"""Fork the current process and wait for all children to finish."""
pids = []
for dummy in range(self._prefork):
database = next(self.database)
pid = os.fork()
if not pid:
# Create the database in the child process, to prevent issues
self.database = database()
Server.serve_forever(self, poll_interval=poll_interval)
os._exit(0)
else:
pids.append(pid)
self.pids = pids
for pid in self.pids:
_eintr_retry(os.waitpid, pid, 0)
def shutdown(self):
"""If this is the parent process send the TERM signal to all children,
else call the super method.
"""
for pid in self.pids or ():
os.kill(pid, signal.SIGTERM)
if self.pids is None:
Server.shutdown(self)
def load_config(self):
"""If this is the parent process send the USR1 signal to all children,
else call the super method.
"""
for pid in self.pids or ():
os.kill(pid, signal.SIGUSR1)
if self.pids is None:
Server.load_config(self)
class ThreadingServer(SocketServer.ThreadingMixIn, Server):
"""A threaded version of the pyzord server. Each connection is served
in a new thread. This may not be suitable for all database types."""
pass
class BoundedThreadingServer(ThreadingServer):
"""Same as ThreadingServer but this also accepts a limited number of
concurrent threads.
"""
def __init__(self, address, database, passwd_fn, access_fn, max_threads,
forwarding_server=None):
ThreadingServer.__init__(self, address, database, passwd_fn, access_fn,
forwarder=forwarding_server)
self.semaphore = threading.Semaphore(max_threads)
def process_request(self, request, client_address):
self.semaphore.acquire()
ThreadingServer.process_request(self, request, client_address)
def process_request_thread(self, request, client_address):
ThreadingServer.process_request_thread(self, request, client_address)
self.semaphore.release()
class ProcessServer(SocketServer.ForkingMixIn, Server):
"""A multi-processing version of the pyzord server. Each connection is
served in a new process. This may not be suitable for all database types.
"""
def __init__(self, address, database, passwd_fn, access_fn,
max_children=40, forwarding_server=None):
ProcessServer.max_children = max_children
Server.__init__(self, address, database, passwd_fn, access_fn,
forwarder=forwarding_server)
class RequestHandler(SocketServer.DatagramRequestHandler):
"""Handle a single pyzord request."""
def __init__(self, *args, **kwargs):
self.response = email.message.Message()
SocketServer.DatagramRequestHandler.__init__(self, *args, **kwargs)
def handle(self):
"""Handle a pyzord operation, cleanly handling any errors."""
self.response["Code"] = "200"
self.response["Diag"] = "OK"
self.response["PV"] = "%s" % pyzor.proto_version
try:
self._really_handle()
except NotImplementedError as e:
self.handle_error(501, "Not implemented: %s" % e)
except pyzor.UnsupportedVersionError as e:
self.handle_error(505, "Version Not Supported: %s" % e)
except pyzor.ProtocolError as e:
self.handle_error(400, "Bad request: %s" % e)
except pyzor.SignatureError as e:
self.handle_error(401, "Unauthorized: Signature Error: %s" % e)
except pyzor.AuthorizationError as e:
self.handle_error(403, "Forbidden: %s" % e)
except Exception as e:
self.handle_error(500, "Internal Server Error: %s" % e)
self.server.log.error(traceback.format_exc())
self.server.log.debug("Sending: %r", self.response.as_string())
self.wfile.write(self.response.as_string().encode("utf8"))
def _really_handle(self):
"""handle() without the exception handling."""
self.server.log.debug("Received: %r", self.packet)
# Read the request.
# Old versions of the client sent a double \n after the signature,
# which screws up the RFC5321 format. Specifically handle that
# here - this could be removed in time.
request = email.message_from_bytes(
self.rfile.read().replace(b"\n\n", b"\n") + b"\n")
# Ensure that the response can be paired with the request.
self.response["Thread"] = request["Thread"]
# If this is an authenticated request, then check the authentication
# details.
user = request["User"] or pyzor.anonymous_user
if user != pyzor.anonymous_user:
try:
pyzor.account.verify_signature(request,
self.server.accounts[user])
except KeyError:
raise pyzor.SignatureError("Unknown user.")
if "PV" not in request:
raise pyzor.ProtocolError("Protocol Version not specified in "
"request")
# The protocol version is compatible if the major number is
# identical (changes in the minor number are unimportant).
try:
if int(float(request["PV"])) != int(pyzor.proto_version):
raise pyzor.UnsupportedVersionError()
except ValueError:
self.server.log.warn("Invalid PV: %s", request["PV"])
raise pyzor.ProtocolError("Invalid Protocol Version")
# Check that the user has permission to execute the requested
# operation.
opcode = request["Op"]
if opcode not in self.server.acl[user]:
raise pyzor.AuthorizationError(
"User is not authorized to request the operation.")
self.server.log.debug("Got a %s command from %s", opcode,
self.client_address[0])
# Get a handle to the appropriate method to execute this operation.
try:
dispatch = self.dispatches[opcode]
except KeyError:
raise NotImplementedError("Requested operation is not "
"implemented.")
# Get the existing record from the database (or a blank one if
# there is no matching record).
digests = request.get_all("Op-Digest")
# Do the requested operation, log what we have done, and return.
if dispatch and digests:
dispatch(self, digests)
self.server.usage_log.info("%s,%s,%s,%r,%s", user,
self.client_address[0], opcode, digests,
self.response["Code"])
def handle_error(self, code, message):
"""Create an appropriate response for an error."""
self.server.usage_log.error("%s: %s", code, message)
self.response.replace_header("Code", "%d" % code)
self.response.replace_header("Diag", message)
def handle_pong(self, digests):
"""Handle the 'pong' command.
This command returns maxint for report counts and 0 whitelist.
"""
self.server.log.debug("Request pong for %s", digests[0])
self.response["Count"] = "%d" % sys.maxsize
self.response["WL-Count"] = "%d" % 0
def handle_check(self, digests):
"""Handle the 'check' command.
This command returns the spam/ham counts for the specified digest.
"""
digest = digests[0]
try:
record = self.server.database[digest]
except KeyError:
record = pyzor.engines.common.Record()
self.server.log.debug("Request to check digest %s", digest)
self.response["Count"] = "%d" % record.r_count
self.response["WL-Count"] = "%d" % record.wl_count
def handle_report(self, digests):
"""Handle the 'report' command in a single step.
This command increases the spam count for the specified digests."""
self.server.log.debug("Request to report digests %s", digests)
if self.server.one_step:
self.server.database.report(digests)
else:
for digest in digests:
try:
record = self.server.database[digest]
except KeyError:
record = pyzor.engines.common.Record()
record.r_increment()
self.server.database[digest] = record
if self.server.forwarder:
for digest in digests:
self.server.forwarder.queue_forward_request(digest)
def handle_whitelist(self, digests):
"""Handle the 'whitelist' command in a single step.
This command increases the ham count for the specified digests."""
self.server.log.debug("Request to whitelist digests %s", digests)
if self.server.one_step:
self.server.database.whitelist(digests)
else:
for digest in digests:
try:
record = self.server.database[digest]
except KeyError:
record = pyzor.engines.common.Record()
record.wl_increment()
self.server.database[digest] = record
if self.server.forwarder:
for digest in digests:
self.server.forwarder.queue_forward_request(digest, True)
def handle_info(self, digests):
"""Handle the 'info' command.
This command returns diagnostic data about a digest (timestamps for
when the digest was first/last seen as spam/ham, and spam/ham
counts).
"""
digest = digests[0]
try:
record = self.server.database[digest]
except KeyError:
record = pyzor.engines.common.Record()
self.server.log.debug("Request for information about digest %s",
digest)
def time_output(time_obj):
"""Convert a datetime object to a POSIX timestamp.
If the object is None, then return 0.
"""
if not time_obj:
return 0
return time.mktime(time_obj.timetuple())
self.response["Entered"] = "%d" % time_output(record.r_entered)
self.response["Updated"] = "%d" % time_output(record.r_updated)
self.response["WL-Entered"] = "%d" % time_output(record.wl_entered)
self.response["WL-Updated"] = "%d" % time_output(record.wl_updated)
self.response["Count"] = "%d" % record.r_count
self.response["WL-Count"] = "%d" % record.wl_count
dispatches = {
'ping': None,
'pong': handle_pong,
'info': handle_info,
'check': handle_check,
'report': handle_report,
'whitelist': handle_whitelist,
}
| 15,776 | 37.574572 | 79 | py |
pyzor | pyzor-master/pyzor/account.py | """A collection of utilities that facilitate working with Pyzor accounts.
Note that accounts are not necessary (on the client or server), as an
"anonymous" account always exists."""
import time
import hashlib
import pyzor
def sign_msg(hashed_key, timestamp, msg, hash_=hashlib.sha1):
"""Converts the key, timestamp (epoch seconds), and msg into a digest.
lower(H(H(M) + ':' T + ':' + K))
M is message
T is integer epoch timestamp
K is hashed_key
H is the hash function (currently SHA1)
"""
msg = msg.as_string().strip().encode("utf8")
digest = hash_()
digest.update(hash_(msg).digest())
digest.update((":%d:%s" % (timestamp, hashed_key)).encode("utf8"))
return digest.hexdigest().lower()
def hash_key(key, user, hash_=hashlib.sha1):
"""Returns the hash key for this username and password.
lower(H(U + ':' + lower(K)))
K is key (hex string)
U is username
H is the hash function (currently SHA1)
"""
result = ("%s:%s" % (user, key.lower())).encode("utf8")
return hash_(result).hexdigest().lower()
def verify_signature(msg, user_key):
"""Verify that the provided message is correctly signed.
The message must have "User", "Time", and "Sig" headers.
If the signature is valid, then the function returns normally.
If the signature is not valid, then a pyzor.SignatureError() exception
is raised."""
timestamp = int(msg["Time"])
user = msg["User"]
provided_signature = msg["Sig"]
# Check that this signature is not too old.
if abs(time.time() - timestamp) > pyzor.MAX_TIMESTAMP_DIFFERENCE:
raise pyzor.SignatureError("Timestamp not within allowed range.")
# Calculate what the correct signature is.
hashed_user_key = hash_key(user_key, user)
# The signature is not part of the message that is signed.
del msg["Sig"]
correct_signature = sign_msg(hashed_user_key, timestamp, msg)
if correct_signature != provided_signature:
raise pyzor.SignatureError("Invalid signature.")
class Account(object):
def __init__(self, username, salt, key):
self.username = username
self.salt = salt
self.key = key
def key_from_hexstr(s):
try:
salt, key = s.split(",")
except ValueError:
raise ValueError("Invalid number of parts for key; perhaps you "
"forgot the comma at the beginning for the "
"salt divider?")
return salt, key
AnonymousAccount = Account(pyzor.anonymous_user, None, "")
| 2,546 | 30.8375 | 74 | py |
pyzor | pyzor-master/pyzor/config.py | """Functions that handle parsing pyzor configuration files."""
import os
import re
import logging
import collections
try:
import sentry_sdk
_has_sentry = True
except ImportError:
_has_sentry = False
import pyzor.account
_COMMENT_P = re.compile(r"((?<=[^\\])#.*)")
# Configuration files for the Pyzor Server
def load_access_file(access_fn, accounts):
"""Load the ACL from the specified file, if it exists, and return an
ACL dictionary, where each key is a username and each value is a set
of allowed permissions (if the permission is not in the set, then it
is not allowed).
'accounts' is a dictionary of accounts that exist on the server - only
the keys are used, which must be the usernames (these are the users
that are granted permission when the 'all' keyword is used, as
described below).
Each line of the file should be in the following format:
operation : user : allow|deny
where 'operation' is a space-separated list of pyzor commands or the
keyword 'all' (meaning all commands), 'username' is a space-separated
list of usernames or the keyword 'all' (meaning all users) - the
anonymous user is called "anonymous", and "allow|deny" indicates whether
or not the specified user(s) may execute the specified operations.
The file is processed from top to bottom, with the final match for
user/operation being the value taken. Every file has the following
implicit final rule:
all : all : deny
If the file does not exist, then the following default is used:
check report ping info : anonymous : allow
"""
log = logging.getLogger("pyzord")
# A defaultdict is safe, because if we get a non-existant user, we get
# the empty set, which is the same as a deny, which is the final
# implicit rule.
acl = collections.defaultdict(set)
if not os.path.exists(access_fn):
log.info("Using default ACL: the anonymous user may use the check, "
"report, ping and info commands.")
acl[pyzor.anonymous_user] = set(("check", "report", "ping", "pong",
"info"))
return acl
accessf = open(access_fn)
for line in accessf:
if not line.strip() or line[0] == "#":
continue
try:
operations, users, allowed = [part.lower().strip()
for part in line.split(":")]
except ValueError:
log.warn("Invalid ACL line: %r", line)
continue
try:
allowed = {"allow": True, "deny": False}[allowed]
except KeyError:
log.warn("Invalid ACL line: %r", line)
continue
if operations == "all":
operations = ("check", "report", "ping", "pong", "info",
"whitelist")
else:
operations = [operation.strip()
for operation in operations.split()]
if users == "all":
users = accounts
else:
users = [user.strip() for user in users.split()]
for user in users:
if allowed:
log.debug("Granting %s to %s.", ",".join(operations), user)
# If these operations are already allowed, this will have
# no effect.
acl[user].update(operations)
else:
log.debug("Revoking %s from %s.", ",".join(operations), user)
# If these operations are not allowed yet, this will have
# no effect.
acl[user].difference_update(operations)
accessf.close()
log.info("ACL: %r", acl)
return acl
def load_passwd_file(passwd_fn):
"""Load the accounts from the specified file.
Each line of the file should be in the format:
username : key
If the file does not exist, then an empty dictionary is returned;
otherwise, a dictionary of (username, key) items is returned.
"""
log = logging.getLogger("pyzord")
accounts = {}
if not os.path.exists(passwd_fn):
log.info("Accounts file does not exist - only the anonymous user "
"will be available.")
return accounts
passwdf = open(passwd_fn)
for line in passwdf:
if not line.strip() or line[0] == "#":
continue
try:
user, key = line.split(":")
except ValueError:
log.warn("Invalid accounts line: %r", line)
continue
user = user.strip()
key = key.strip()
log.debug("Creating an account for %s with key %s.", user, key)
accounts[user] = key
passwdf.close()
# Don't log the keys at 'info' level, just ther usernames.
log.info("Accounts: %s", ",".join(accounts))
return accounts
# Configuration files for the Pyzor Client
def load_accounts(filepath):
"""Layout of file is: host : port : username : salt,key"""
accounts = {}
log = logging.getLogger("pyzor")
if os.path.exists(filepath):
accountsf = open(filepath)
for lineno, orig_line in enumerate(accountsf):
line = orig_line.strip()
if not line or line.startswith('#'):
continue
try:
host, port, username, key = [x.strip()
for x in line.split(":")]
except ValueError:
log.warn("account file: invalid line %d: wrong number of "
"parts", lineno)
continue
try:
port = int(port)
except ValueError as ex:
log.warn("account file: invalid line %d: %s", lineno, ex)
continue
address = (host, port)
try:
salt, key = pyzor.account.key_from_hexstr(key)
except ValueError as ex:
log.warn("account file: invalid line %d: %s", lineno, ex)
continue
if not salt and not key:
log.warn("account file: invalid line %d: keystuff can't be "
"all None's", lineno)
continue
accounts[address] = pyzor.account.Account(username, salt, key)
accountsf.close()
else:
log.warn("No accounts are setup. All commands will be executed by "
"the anonymous user.")
return accounts
def load_servers(filepath):
"""Load the servers file."""
logger = logging.getLogger("pyzor")
if not os.path.exists(filepath):
servers = []
else:
servers = []
with open(filepath) as serverf:
for line in serverf:
line = line.strip()
if re.match("[^#][a-zA-Z0-9.-]+:[0-9]+", line):
address, port = line.rsplit(":", 1)
servers.append((address, int(port)))
if not servers:
logger.info("No servers specified, defaulting to public.pyzor.org.")
servers = [("public.pyzor.org", 24441)]
return servers
def load_local_whitelist(filepath):
"""Load the local digest skip file."""
if not os.path.exists(filepath):
return set()
whitelist = set()
with open(filepath) as serverf:
for line in serverf:
# Remove any comments
line = _COMMENT_P.sub("", line).strip()
if line:
whitelist.add(line)
return whitelist
# Common configurations
def setup_logging(log_name, filepath, debug, sentry_dsn=None,
sentry_lvl="WARN"):
"""Setup logging according to the specified options. Return the Logger
object.
"""
fmt = logging.Formatter('%(asctime)s (%(process)d) %(levelname)s '
'%(message)s')
stream_handler = logging.StreamHandler()
if debug:
stream_log_level = logging.DEBUG
file_log_level = logging.DEBUG
else:
stream_log_level = logging.CRITICAL
file_log_level = logging.INFO
logger = logging.getLogger(log_name)
logger.setLevel(file_log_level)
stream_handler.setLevel(stream_log_level)
stream_handler.setFormatter(fmt)
logger.addHandler(stream_handler)
if filepath:
file_handler = logging.FileHandler(filepath)
file_handler.setLevel(file_log_level)
file_handler.setFormatter(fmt)
logger.addHandler(file_handler)
if sentry_dsn and _has_sentry:
sentry_sdk.init("https://8cb95c088d04414e885879898a952d05@sentry.io/1443213")
return logger
def expand_homefiles(homefiles, category, homedir, config):
"""Set the full file path for these configuration files."""
for filename in homefiles:
filepath = config.get(category, filename)
if not filepath:
continue
filepath = os.path.expanduser(filepath)
if not os.path.isabs(filepath):
filepath = os.path.join(homedir, filepath)
config.set(category, filename, filepath)
| 9,034 | 34.155642 | 85 | py |
pyzor | pyzor-master/pyzor/client.py | """Networked spam-signature detection client.
>>> import pyzor
>>> import pyzor.client
>>> import pyzor.digest
>>> import pyzor.config
To load the accounts file:
>>> accounts = pyzor.config.load_accounts(filename)
To create a client (to then issue commands):
>>> client = pyzor.client.Client(accounts)
To create a client, using the anonymous user:
>>> client = pyzor.client.Client()
To get a digest (of an email.message.Message object, or similar):
>>> digest = pyzor.digest.DataDigester(msg).value
To query a server (where address is a (host, port) pair):
>>> client.ping(address)
>>> client.info(digest, address)
>>> client.report(digest, address)
>>> client.whitelist(digest, address)
>>> client.check(digest, address)
To query the default server (public.pyzor.org):
>>> client.ping()
>>> client.info(digest)
>>> client.report(digest)
>>> client.whitelist(digest)
>>> client.check(digest)
Response will contain, depending on the type of request, some
of the following keys (e.g. client.ping()['Code']):
All responses will have:
- 'Diag' 'OK' or error message
- 'Code' '200' if OK
- 'PV' Protocol Version
- 'Thread'
`info` and `check` responses will also contain:
- '[WL-]Count' Whitelist/Blacklist count
`info` responses will also have:
- '[WL-]Entered' timestamp when message was first whitelisted/blacklisted
- '[WL-]Updated' timestamp when message was last whitelisted/blacklisted
"""
import time
import email
import socket
import logging
import functools
import collections
import pyzor.digest
import pyzor.account
import pyzor.message
import pyzor.hacks.py26
pyzor.hacks.py26.hack_email()
class Client(object):
timeout = 5
max_packet_size = 8192
def __init__(self, accounts=None, timeout=None, spec=None):
if accounts is None:
accounts = {}
self.accounts = dict(((host, int(port)), account)
for (host, port), account in accounts.items())
if spec is None:
spec = pyzor.digest.digest_spec
self.spec = spec
if timeout is not None:
self.timeout = timeout
self.log = logging.getLogger("pyzor")
def ping(self, address=("public.pyzor.org", 24441)):
msg = pyzor.message.PingRequest()
sock = self.send(msg, address)
return self.read_response(sock, msg.get_thread())
def pong(self, digest, address=("public.pyzor.org", 24441)):
msg = pyzor.message.PongRequest(digest)
sock = self.send(msg, address)
return self.read_response(sock, msg.get_thread())
def info(self, digest, address=("public.pyzor.org", 24441)):
msg = pyzor.message.InfoRequest(digest)
sock = self.send(msg, address)
return self.read_response(sock, msg.get_thread())
def report(self, digest, address=("public.pyzor.org", 24441)):
msg = pyzor.message.ReportRequest(digest, self.spec)
sock = self.send(msg, address)
return self.read_response(sock, msg.get_thread())
def whitelist(self, digest, address=("public.pyzor.org", 24441)):
msg = pyzor.message.WhitelistRequest(digest, self.spec)
sock = self.send(msg, address)
return self.read_response(sock, msg.get_thread())
def check(self, digest, address=("public.pyzor.org", 24441)):
msg = pyzor.message.CheckRequest(digest)
sock = self.send(msg, address)
return self.read_response(sock, msg.get_thread())
def _mock_check(self, digests, address=None):
msg = (u"Code: %s\nDiag: OK\nPV: %s\nThread: 1024\nCount: 0\n"
u"WL-Count: 0" % (pyzor.message.Response.ok_code,
pyzor.proto_version)).encode('ascii')
return email.message_from_bytes(msg, _class=pyzor.message.Response)
def send(self, msg, address=("public.pyzor.org", 24441)):
address = (address[0], int(address[1]))
msg.init_for_sending()
try:
account = self.accounts[address]
except KeyError:
account = pyzor.account.AnonymousAccount
timestamp = int(time.time())
msg["User"] = account.username
msg["Time"] = str(timestamp)
msg["Sig"] = pyzor.account.sign_msg(pyzor.account.hash_key(
account.key, account.username), timestamp, msg)
self.log.debug("sending: %r", msg.as_string())
return self._send(msg, address)
@staticmethod
def _send(msg, addr):
sock = None
for res in socket.getaddrinfo(addr[0], addr[1], 0, socket.SOCK_DGRAM,
socket.IPPROTO_UDP):
af, socktype, proto, _, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
sock = None
continue
try:
sock.sendto(msg.as_string().encode("utf8"), 0, sa)
except socket.timeout:
sock.close()
raise pyzor.TimeoutError("Sending to %s time-outed" % sa)
except socket.error:
sock.close()
sock = None
continue
break
if sock is None:
raise pyzor.CommError("Unable to send to %s:%s" % addr)
return sock
def read_response(self, sock, expected_id):
sock.settimeout(self.timeout)
try:
packet, address = sock.recvfrom(self.max_packet_size)
except socket.timeout as ex:
sock.close()
raise pyzor.TimeoutError("Reading response timed-out.")
except socket.error as ex:
sock.close()
raise pyzor.CommError("Socket error while reading response: %s" %
ex)
self.log.debug("received: %r/%r", packet, address)
msg = email.message_from_bytes(packet, _class=pyzor.message.Response)
msg.ensure_complete()
try:
thread_id = msg.get_thread()
if thread_id != expected_id:
if thread_id.in_ok_range():
raise pyzor.ProtocolError(
"received unexpected thread id %d (expected %d)" %
(thread_id, expected_id))
self.log.warn("received error thread id %d (expected %d)",
thread_id, expected_id)
except KeyError:
self.log.warn("no thread id received")
return msg
class BatchClient(Client):
"""Like the normal Client but with support for batching reports."""
def __init__(self, accounts=None, timeout=None, spec=None, batch_size=10):
Client.__init__(self, accounts=accounts, timeout=timeout, spec=spec)
self.batch_size = batch_size
self.r_requests = {}
self.w_requests = {}
self.flush()
def report(self, digest, address=("public.pyzor.org", 24441)):
self._add_digest(digest, address, self.r_requests)
def whitelist(self, digest, address=("public.pyzor.org", 24441)):
self._add_digest(digest, address, self.w_requests)
def _add_digest(self, digest, address, requests):
address = (address[0], int(address[1]))
msg = requests[address]
msg.add_digest(digest)
if msg.digest_count >= self.batch_size:
try:
return self.send(msg, address)
finally:
del requests[address]
def flush(self):
"""Deleting any saved digest reports."""
self.r_requests = collections.defaultdict(
functools.partial(pyzor.message.ReportRequest, spec=self.spec))
self.w_requests = collections.defaultdict(
functools.partial(pyzor.message.WhitelistRequest, spec=self.spec))
def force(self):
"""Force send any remaining reports."""
for address, msg in self.r_requests.items():
try:
self.send(msg, address)
except:
continue
for address, msg in self.w_requests.items():
try:
self.send(msg, address)
except:
continue
def __del__(self):
self.force()
class ClientRunner(object):
def __init__(self, routine):
self.log = logging.getLogger("pyzor")
self.routine = routine
self.all_ok = True
self.results = []
def run(self, server, args, kwargs=None):
if kwargs is None:
kwargs = {}
message = "%s:%s\t" % server
response = None
try:
response = self.routine(*args, **kwargs)
self.handle_response(response, message)
except (pyzor.CommError, KeyError, ValueError) as e:
self.results.append("%s%s\n" % (message, (e.code, str(e))))
self.log.error("%s\t%s: %s", server, e.__class__.__name__, e)
self.all_ok = False
def handle_response(self, response, message):
"""mesaage is a string we've built up so far"""
if not response.is_ok():
self.all_ok = False
self.results.append("%s%s\n" % (message, response.head_tuple()))
class CheckClientRunner(ClientRunner):
def __init__(self, routine, r_count=0, wl_count=0):
ClientRunner.__init__(self, routine)
self.found_hit = False
self.whitelisted = False
self.hit_count = 0
self.whitelist_count = 0
self.r_count_found = r_count
self.wl_count_clears = wl_count
def handle_response(self, response, message):
message += "%s\t" % str(response.head_tuple())
if response.is_ok():
self.hit_count = int(response['Count'])
self.whitelist_count = int(response['WL-Count'])
if self.whitelist_count > self.wl_count_clears:
self.whitelisted = True
elif self.hit_count > self.r_count_found:
self.found_hit = True
message += "%d\t%d" % (self.hit_count, self.whitelist_count)
else:
self.all_ok = False
self.results.append(message + "\n")
class InfoClientRunner(ClientRunner):
def handle_response(self, response, message):
message += "%s\n" % str(response.head_tuple())
if response.is_ok():
for key in ('Count', 'Entered', 'Updated', 'WL-Count',
'WL-Entered', 'WL-Updated'):
if key in response:
val = int(response[key])
if 'Count' in key:
stringed = str(val)
elif val == -1:
stringed = 'Never'
else:
stringed = time.ctime(val)
message += ("\t%s: %s\n" % (key, stringed))
else:
self.all_ok = False
self.results.append(message + "\n")
| 10,826 | 33.262658 | 78 | py |
pyzor | pyzor-master/pyzor/__init__.py | """Networked spam-signature detection."""
__author__ = "Frank J. Tobin, ftobin@neverending.org"
__credits__ = "Tony Meyer, Dreas von Donselaar, all the Pyzor contributors."
__version__ = "1.1.1"
import hashlib
proto_name = 'pyzor'
proto_version = 2.1
anonymous_user = 'anonymous'
# We would like to use sha512, but that would mean that all the digests
# changed, so for now, we stick with sha1 (which is the same as the old
# sha module).
sha = hashlib.sha1
# This is the maximum time between a client signing a Pyzor request and the
# server checking the signature.
MAX_TIMESTAMP_DIFFERENCE = 300 # seconds
class CommError(Exception):
"""Something in general went wrong with the transaction."""
code = 400
class ProtocolError(CommError):
"""Something is wrong with talking the protocol."""
code = 400
class TimeoutError(CommError):
"""The connection timed out."""
code = 504
class IncompleteMessageError(ProtocolError):
"""A complete requested was not received."""
pass
class UnsupportedVersionError(ProtocolError):
"""Client is using an unsupported protocol version."""
pass
class SignatureError(CommError):
"""Unknown user, signature on msg invalid, or not within allowed time
range."""
pass
class AuthorizationError(CommError):
"""The signature was valid, but the user is not permitted to do the
requested action."""
pass
| 1,410 | 23.327586 | 76 | py |
pyzor | pyzor-master/pyzor/forwarder.py | """Manage the forwarder process."""
import logging
import threading
try:
import Queue
except ImportError:
import queue as Queue
class Forwarder(object):
"""Forwards digest to remote pyzor servers"""
def __init__(self, forwarding_client, remote_servers,
max_queue_size=10000):
"""
forward_client: a pyzor.client.Client instance to use as
forwarding client
remote_servers: a list of (hostname,port) tuples where digests should
be forwarded to
max_queue_size: max amount of queued digests
"""
self.log = logging.getLogger("pyzord")
self.forwarding_client = forwarding_client
self.forward_queue = Queue.Queue(max_queue_size)
self.remote_servers = remote_servers
def _forward_loop(self):
"""read forwarding requests from the queue"""
while True:
try:
digest, whitelist = self.forward_queue.get(block=True,
timeout=2)
except Queue.Empty:
# If the forwarding client has been deleted we should
# end the thread
if self.forwarding_client is None:
return
else:
continue
for server in self.remote_servers:
try:
if whitelist:
self.forwarding_client.whitelist(digest, server)
else:
self.forwarding_client.report(digest, server)
except Exception as ex:
self.log.warn('Forwarding digest %s to %s failed: %s',
digest, server, ex)
def queue_forward_request(self, digest, whitelist=False):
"""If forwarding is enabled, insert a digest into the forwarding queue
if whitelist is True, the digest will be forwarded as whitelist request
if the queue is full, the digest is dropped
"""
if self.forwarding_client is None: # forwarding has been disabled
return
try:
self.forward_queue.put_nowait((digest, whitelist),)
except Queue.Full:
pass
def start_forwarding(self):
"""start the forwarding thread"""
threading.Thread(target=self._forward_loop).start()
def stop_forwarding(self):
"""disable forwarding and tell the forwarding thread to end itself"""
self.forwarding_client = None
| 2,568 | 34.191781 | 79 | py |
pyzor | pyzor-master/pyzor/engines/redis_.py | """Redis database engine."""
import time
import logging
import datetime
import functools
try:
import redis
_has_redis = True
except ImportError:
redis = None
_has_redis = False
from pyzor.engines.common import *
VERSION = "1"
NAMESPACE = "pyzord.digest_v%s" % VERSION
def encode_date(date):
"""Convert the date to Unix Timestamp"""
if date is None:
return 0
return int(time.mktime(date.timetuple()))
def decode_date(stamp):
"""Return a datetime object from a Unix Timestamp."""
stamp = int(stamp)
if stamp == 0:
return None
return datetime.datetime.fromtimestamp(stamp)
def safe_call(f):
"""Decorator that wraps a method for handling database operations."""
def wrapped_f(self, *args, **kwargs):
# This only logs the error and raise the usual Error for consistency,
# the redis library takes care of reconnecting and everything else.
try:
return f(self, *args, **kwargs)
except redis.exceptions.RedisError as e:
self.log.error("Redis error while calling %s: %s",
f.__name__, e)
raise DatabaseError("Database temporarily unavailable.")
return wrapped_f
class RedisDBHandle(BaseEngine):
absolute_source = False
handles_one_step = True
log = logging.getLogger("pyzord")
def __init__(self, fn, mode, max_age=None):
self.max_age = max_age
# The 'fn' is host,port,password,db. We ignore mode.
# We store the authentication details so that we can reconnect if
# necessary.
self._dsn = fn
fn = fn.split(",")
self.host = fn[0] or "localhost"
self.port = fn[1] or "6379"
self.passwd = fn[2] or None
self.db_name = fn[3] or "0"
self.db = self._get_new_connection()
self._check_version()
@staticmethod
def _encode_record(r):
return {"r_count": r.r_count,
"r_entered": encode_date(r.r_entered),
"r_updated": encode_date(r.r_updated),
"wl_count": r.wl_count,
"wl_entered": encode_date(r.wl_entered),
"wl_updated": encode_date(r.wl_updated)
}
@staticmethod
def _decode_record(r):
if not r:
return Record()
return Record(r_count=int(r.get(b"r_count", 0)),
r_entered=decode_date(r.get(b"r_entered", 0)),
r_updated=decode_date(r.get(b"r_updated", 0)),
wl_count=int(r.get(b"wl_count", 0)),
wl_entered=decode_date(r.get(b"wl_entered", 0)),
wl_updated=decode_date(r.get(b"wl_updated", 0)))
def __iter__(self):
for key in self.db.keys(self._real_key("*")):
yield key.rsplit(".", 1)[-1]
def _iteritems(self):
for key in self:
try:
yield key, self[key]
except Exception as ex:
self.log.warning("Invalid record %s: %s", key, ex)
def iteritems(self):
return self._iteritems()
def items(self):
return list(self._iteritems())
@staticmethod
def _real_key(key):
return "%s.%s" % (NAMESPACE, key)
@safe_call
def _get_new_connection(self):
if "/" in self.host:
return redis.StrictRedis(unix_socket_path=self.host,
db=int(self.db_name), password=self.passwd)
return redis.StrictRedis(host=self.host, port=int(self.port),
db=int(self.db_name), password=self.passwd)
@safe_call
def __getitem__(self, key):
return self._decode_record(self.db.hgetall(self._real_key(key)))
@safe_call
def __setitem__(self, key, value):
real_key = self._real_key(key)
self.db.hmset(real_key, self._encode_record(value))
if self.max_age is not None:
self.db.expire(real_key, self.max_age)
@safe_call
def __delitem__(self, key):
self.db.delete(self._real_key(key))
@safe_call
def report(self, keys):
now = int(time.time())
for key in keys:
real_key = self._real_key(key)
self.db.hincrby(real_key, "r_count")
self.db.hsetnx(real_key, "r_entered", now)
self.db.hset(real_key, "r_updated", now)
if self.max_age:
self.db.expire(real_key, self.max_age)
@safe_call
def whitelist(self, keys):
now = int(time.time())
for key in keys:
real_key = self._real_key(key)
self.db.hincrby(real_key, "wl_count")
self.db.hsetnx(real_key, "wl_entered", now)
self.db.hset(real_key, "wl_updated", now)
if self.max_age:
self.db.expire(real_key, self.max_age)
@classmethod
def get_prefork_connections(cls, fn, mode, max_age=None):
"""Yields a number of database connections suitable for a Pyzor
pre-fork server.
"""
while True:
yield functools.partial(cls, fn, mode, max_age=max_age)
def _check_version(self):
"""Check if there are deprecated records and warn the user."""
old_keys = len(self.db.keys("pyzord.digest.*"))
if old_keys:
cmd = ("pyzor-migrate --delete --se=redis_v0 --sd=%s "
"--de=redis --dd=%s" % (self._dsn, self._dsn))
self.log.critical("You have %s records in the deprecated version "
"of the redis engine.", old_keys)
self.log.critical("Please migrate the records with: %r", cmd)
class ThreadedRedisDBHandle(RedisDBHandle):
def __init__(self, fn, mode, max_age=None, bound=None):
RedisDBHandle.__init__(self, fn, mode, max_age=max_age)
if not _has_redis:
handle = DBHandle(single_threaded=None,
multi_threaded=None,
multi_processing=None,
prefork=None)
else:
handle = DBHandle(single_threaded=RedisDBHandle,
multi_threaded=ThreadedRedisDBHandle,
multi_processing=None,
prefork=RedisDBHandle)
| 6,235 | 31.310881 | 80 | py |
pyzor | pyzor-master/pyzor/engines/gdbm_.py | """Gdbm database engine."""
try:
import gdbm as gdbm
_has_gdbm = True
except ImportError:
try:
import dbm.gnu as gdbm
_has_gdbm = True
except ImportError:
_has_gdbm = False
import time
import logging
import datetime
import threading
from pyzor.engines.common import Record, DBHandle, BaseEngine
def _dt_decode(datetime_str):
"""Decode a string into a datetime object."""
if datetime_str == 'None':
return None
try:
return datetime.datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S.%f")
except ValueError:
return datetime.datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S")
class GdbmDBHandle(BaseEngine):
absolute_source = True
handles_one_step = False
sync_period = 60
reorganize_period = 3600 * 24 # 1 day
fields = ('r_count', 'r_entered', 'r_updated',
'wl_count', 'wl_entered', 'wl_updated')
_fields = [('r_count', int),
('r_entered', _dt_decode),
('r_updated', _dt_decode),
('wl_count', int),
('wl_entered', _dt_decode),
('wl_updated', _dt_decode)]
this_version = '1'
log = logging.getLogger("pyzord")
def __init__(self, fn, mode, max_age=None):
self.max_age = max_age
self.db = gdbm.open(fn, mode)
self.reorganize_timer = None
self.sync_timer = None
self.start_reorganizing()
self.start_syncing()
def __iter__(self):
k = self.db.firstkey()
while k is not None:
yield k
k = self.db.nextkey(k)
def _iteritems(self):
for k in self:
try:
yield k, self._really_getitem(k)
except Exception as e:
self.log.warning("Invalid record %s: %s", k, e)
def iteritems(self):
return self._iteritems()
def items(self):
return list(self._iteritems())
def apply_method(self, method, varargs=(), kwargs=None):
if kwargs is None:
kwargs = {}
return method(*varargs, **kwargs)
def __getitem__(self, key):
return self.apply_method(self._really_getitem, (key,))
def _really_getitem(self, key):
return GdbmDBHandle.decode_record(self.db[key])
def __setitem__(self, key, value):
self.apply_method(self._really_setitem, (key, value))
def _really_setitem(self, key, value):
self.db[key] = GdbmDBHandle.encode_record(value)
def __delitem__(self, key):
self.apply_method(self._really_delitem, (key,))
def _really_delitem(self, key):
del self.db[key]
def start_syncing(self):
if self.db:
self.apply_method(self._really_sync)
self.sync_timer = threading.Timer(self.sync_period,
self.start_syncing)
self.sync_timer.setDaemon(True)
self.sync_timer.start()
def _really_sync(self):
self.db.sync()
def start_reorganizing(self):
if not self.max_age:
return
if self.db:
self.apply_method(self._really_reorganize)
self.reorganize_timer = threading.Timer(self.reorganize_period,
self.start_reorganizing)
self.reorganize_timer.setDaemon(True)
self.reorganize_timer.start()
def _really_reorganize(self):
self.log.debug("reorganizing the database")
key = self.db.firstkey()
breakpoint = time.time() - self.max_age
while key is not None:
rec = self._really_getitem(key)
delkey = None
if int(time.mktime(rec.r_updated.timetuple())) < breakpoint:
self.log.debug("deleting key %s", key)
delkey = key
key = self.db.nextkey(key)
if delkey:
self._really_delitem(delkey)
self.db.reorganize()
@classmethod
def encode_record(cls, value):
values = [cls.this_version]
values.extend(["%s" % getattr(value, x) for x in cls.fields])
return ",".join(values)
@classmethod
def decode_record(cls, s):
try:
s = s.decode("utf8")
except UnicodeError:
raise StandardError("don't know how to handle db value %s" %
repr(s))
parts = s.split(',')
version = parts[0]
if len(parts) == 3:
dispatch = cls.decode_record_0
elif version == '1':
dispatch = cls.decode_record_1
else:
raise StandardError("don't know how to handle db value %s" %
repr(s))
return dispatch(s)
@staticmethod
def decode_record_0(s):
r = Record()
parts = s.split(',')
fields = ('r_count', 'r_entered', 'r_updated')
assert len(parts) == len(fields)
for i in range(len(parts)):
setattr(r, fields[i], int(parts[i]))
return r
@classmethod
def decode_record_1(cls, s):
r = Record()
parts = s.split(',')[1:]
assert len(parts) == len(cls.fields)
for part, field in zip(parts, cls._fields):
f, decode = field
setattr(r, f, decode(part))
return r
class ThreadedGdbmDBHandle(GdbmDBHandle):
"""Like GdbmDBHandle, but handles multi-threaded access."""
def __init__(self, fn, mode, max_age=None, bound=None):
self.db_lock = threading.Lock()
GdbmDBHandle.__init__(self, fn, mode, max_age=max_age)
def apply_method(self, method, varargs=(), kwargs=None):
if kwargs is None:
kwargs = {}
with self.db_lock:
return GdbmDBHandle.apply_method(self, method, varargs=varargs,
kwargs=kwargs)
# This won't work because the gdbm object needs to be in shared memory of the
# spawned processes.
# class ProcessGdbmDBHandle(ThreadedGdbmDBHandle):
# def __init__(self, fn, mode, max_age=None, bound=None):
# ThreadedGdbmDBHandle.__init__(self, fn, mode, max_age=max_age,
# bound=bound)
# self.db_lock = multiprocessing.Lock()
if not _has_gdbm:
handle = DBHandle(single_threaded=None,
multi_threaded=None,
multi_processing=None,
prefork=None)
else:
handle = DBHandle(single_threaded=GdbmDBHandle,
multi_threaded=ThreadedGdbmDBHandle,
multi_processing=None,
prefork=None)
| 6,609 | 30.327014 | 79 | py |
pyzor | pyzor-master/pyzor/engines/mysql.py | """MySQLdb database engine."""
import time
import logging
import datetime
import itertools
import functools
import threading
try:
import Queue
except ImportError:
import queue as Queue
try:
import MySQLdb
import MySQLdb.cursors
_has_mysql = True
except ImportError:
_has_mysql = False
from pyzor.engines.common import *
class MySQLDBHandle(BaseEngine):
absolute_source = False
handles_one_step = True
# The table must already exist, and have this schema:
# CREATE TABLE `public` (
# `digest` char(40) default NULL,
# `r_count` int(11) default NULL,
# `wl_count` int(11) default NULL,
# `r_entered` datetime default NULL,
# `wl_entered` datetime default NULL,
# `r_updated` datetime default NULL,
# `wl_updated` datetime default NULL,
# PRIMARY KEY (`digest`)
# )
# XXX Re-organising might be faster with a r_updated index. However,
# XXX the re-organisation time isn't that important, and that would
# XXX (slightly) slow down all inserts, so we leave it for now.
reorganize_period = 3600 * 24 # 1 day
reconnect_period = 60 # seconds
log = logging.getLogger("pyzord")
def __init__(self, fn, mode, max_age=None):
self.max_age = max_age
self.db = None
# The 'fn' is host,user,password,db,table. We ignore mode.
# We store the authentication details so that we can reconnect if
# necessary.
self.host, self.user, self.passwd, self.db_name, \
self.table_name = fn.split(",")
self.last_connect_attempt = 0 # We have never connected.
self.reorganize_timer = None
self.reconnect()
self.start_reorganizing()
def _get_new_connection(self):
"""Returns a new db connection."""
db = MySQLdb.connect(host=self.host, user=self.user,
db=self.db_name, passwd=self.passwd)
db.autocommit(True)
return db
def _check_reconnect_time(self):
if time.time() - self.last_connect_attempt < self.reconnect_period:
# Too soon to reconnect.
self.log.debug("Can't reconnect until %s",
(time.ctime(self.last_connect_attempt +
self.reconnect_period)))
return False
return True
def reconnect(self):
if not self._check_reconnect_time():
return
if self.db:
try:
self.db.close()
except MySQLdb.Error:
pass
try:
self.db = self._get_new_connection()
except MySQLdb.Error as e:
self.log.error("Unable to connect to database: %s", e)
self.db = None
# Keep track of when we connected, so that we don't retry too often.
self.last_connect_attempt = time.time()
def _iter(self, db):
c = db.cursor(cursorclass=MySQLdb.cursors.SSCursor)
c.execute("SELECT digest FROM %s" % self.table_name)
while True:
row = c.fetchone()
if not row:
break
yield row[0]
c.close()
def __iter__(self):
return self._safe_call("iter", self._iter, ())
def _iteritems(self, db):
c = db.cursor(cursorclass=MySQLdb.cursors.SSCursor)
c.execute("SELECT digest, r_count, wl_count, r_entered, r_updated, "
"wl_entered, wl_updated FROM %s" % self.table_name)
while True:
row = c.fetchone()
if not row:
break
yield row[0], Record(*row[1:])
c.close()
def iteritems(self):
return self._safe_call("iteritems", self._iteritems, ())
def items(self):
return list(self._safe_call("iteritems", self._iteritems, ()))
def __del__(self):
"""Close the database when the object is no longer needed."""
try:
if self.db:
self.db.close()
except MySQLdb.Error:
pass
def _safe_call(self, name, method, args):
try:
return method(*args, db=self.db)
except (MySQLdb.Error, AttributeError) as ex:
self.log.error("%s failed: %s", name, ex)
self.reconnect()
# Retrying just complicates the logic - we don't really care if
# a single query fails (and it's possible that it would fail)
# on the second attempt anyway. Any exceptions are caught by
# the server, and a 'nice' message provided to the caller.
raise DatabaseError("Database temporarily unavailable.")
def report(self, keys):
return self._safe_call("report", self._report, (keys,))
def whitelist(self, keys):
return self._safe_call("whitelist", self._whitelist, (keys,))
def __getitem__(self, key):
return self._safe_call("getitem", self._really__getitem__, (key,))
def __setitem__(self, key, value):
return self._safe_call("setitem", self._really__setitem__,
(key, value))
def __delitem__(self, key):
return self._safe_call("delitem", self._really__delitem__, (key,))
def _report(self, keys, db=None):
c = db.cursor()
try:
c.executemany("INSERT INTO %s (digest, r_count, wl_count, "
"r_entered, r_updated, wl_entered, wl_updated) "
"VALUES (%%s, 1, 0, NOW(), NOW(), NOW(), NOW()) ON "
"DUPLICATE KEY UPDATE r_count=r_count+1, "
"r_updated=NOW()" % self.table_name,
map(lambda key: (key,), keys))
finally:
c.close()
def _whitelist(self, keys, db=None):
c = db.cursor()
try:
c.executemany("INSERT INTO %s (digest, r_count, wl_count, "
"r_entered, r_updated, wl_entered, wl_updated) "
"VALUES (%%s, 0, 1, NOW(), NOW(), NOW(), NOW()) ON "
"DUPLICATE KEY UPDATE wl_count=wl_count+1, "
"wl_updated=NOW()" % self.table_name,
map(lambda key: (key,), keys))
finally:
c.close()
def _really__getitem__(self, key, db=None):
"""__getitem__ without the exception handling."""
c = db.cursor()
# The order here must match the order of the arguments to the
# Record constructor.
c.execute("SELECT r_count, wl_count, r_entered, r_updated, "
"wl_entered, wl_updated FROM %s WHERE digest=%%s" %
self.table_name, (key,))
try:
try:
return Record(*c.fetchone())
except TypeError:
# fetchone() returned None, i.e. there is no such record
raise KeyError()
finally:
c.close()
def _really__setitem__(self, key, value, db=None):
"""__setitem__ without the exception handling."""
c = db.cursor()
try:
c.execute("INSERT INTO %s (digest, r_count, wl_count, "
"r_entered, r_updated, wl_entered, wl_updated) "
"VALUES (%%s, %%s, %%s, %%s, %%s, %%s, %%s) ON "
"DUPLICATE KEY UPDATE r_count=%%s, wl_count=%%s, "
"r_entered=%%s, r_updated=%%s, wl_entered=%%s, "
"wl_updated=%%s" % self.table_name,
(key, value.r_count, value.wl_count, value.r_entered,
value.r_updated, value.wl_entered, value.wl_updated,
value.r_count, value.wl_count, value.r_entered,
value.r_updated, value.wl_entered, value.wl_updated))
finally:
c.close()
def _really__delitem__(self, key, db=None):
"""__delitem__ without the exception handling."""
c = db.cursor()
try:
c.execute("DELETE FROM %s WHERE digest=%%s" % self.table_name,
(key,))
finally:
c.close()
def start_reorganizing(self):
if not self.max_age:
return
self.log.debug("reorganizing the database")
breakpoint = (datetime.datetime.now() -
datetime.timedelta(seconds=self.max_age))
db = self._get_new_connection()
c = db.cursor()
try:
c.execute("DELETE FROM %s WHERE r_updated<%%s" %
self.table_name, (breakpoint,))
except (MySQLdb.Error, AttributeError) as e:
self.log.warn("Unable to reorganise: %s", e)
finally:
c.close()
db.close()
self.reorganize_timer = threading.Timer(self.reorganize_period,
self.start_reorganizing)
self.reorganize_timer.setDaemon(True)
self.reorganize_timer.start()
@classmethod
def get_prefork_connections(cls, fn, mode, max_age=None):
"""Yields a number of database connections suitable for a Pyzor
pre-fork server.
"""
# Only run the reorganize timer in the first child process.
yield functools.partial(cls, fn, mode, max_age=max_age)
while True:
yield functools.partial(cls, fn, mode, max_age=None)
class ThreadedMySQLDBHandle(MySQLDBHandle):
def __init__(self, fn, mode, max_age=None, bound=None):
self.bound = bound
if self.bound:
self.db_queue = Queue.Queue()
MySQLDBHandle.__init__(self, fn, mode, max_age=max_age)
def _get_connection(self):
if self.bound:
return self.db_queue.get()
else:
return self._get_new_connection()
def _release_connection(self, db):
if self.bound:
self.db_queue.put(db)
else:
db.close()
def _safe_call(self, name, method, args):
db = self._get_connection()
try:
return method(*args, db=db)
except (MySQLdb.Error, AttributeError) as ex:
self.log.error("%s failed: %s", name, ex)
if not self.bound:
raise DatabaseError("Database temporarily unavailable.")
try:
# Connection might be timeout, ping and retry
db.ping(True)
return method(*args, db=db)
except (MySQLdb.Error, AttributeError) as ex:
# attempt a new connection, if we can retry
db = self._reconnect(db)
raise DatabaseError("Database temporarily unavailable.")
finally:
self._release_connection(db)
def reconnect(self):
if not self.bound:
return
for _ in range(self.bound):
self.db_queue.put(self._get_new_connection())
def _reconnect(self, db):
if not self._check_reconnect_time():
return db
else:
self.last_connect_attempt = time.time()
return self._get_new_connection()
def __del__(self):
if not self.bound:
return
for db in iter(self.db_queue.get_nowait):
try:
db.close()
except MySQLdb.Error:
continue
except Queue.Empty:
break
class ProcessMySQLDBHandle(MySQLDBHandle):
def __init__(self, fn, mode, max_age=None):
MySQLDBHandle.__init__(self, fn, mode, max_age=max_age)
def reconnect(self):
pass
def __del__(self):
pass
def _safe_call(self, name, method, args):
db = None
try:
db = self._get_new_connection()
return method(*args, db=db)
except (MySQLdb.Error, AttributeError) as ex:
self.log.error("%s failed: %s", name, ex)
raise DatabaseError("Database temporarily unavailable.")
finally:
if db is not None:
db.close()
if not _has_mysql:
handle = DBHandle(single_threaded=None,
multi_threaded=None,
multi_processing=None,
prefork=None)
else:
handle = DBHandle(single_threaded=MySQLDBHandle,
multi_threaded=ThreadedMySQLDBHandle,
multi_processing=ProcessMySQLDBHandle,
prefork=MySQLDBHandle)
| 12,438 | 34.438746 | 78 | py |
pyzor | pyzor-master/pyzor/engines/redis_v0.py | """Redis database engine.
XXX Deprecated version.
"""
import logging
import datetime
import functools
try:
import redis
_has_redis = True
except ImportError:
redis = None
_has_redis = False
from pyzor.engines.common import *
NAMESPACE = "pyzord.digest"
encode_date = lambda d: "" if d is None else d.strftime("%Y-%m-%d %H:%M:%S")
decode_date = lambda x: None if x == "" else datetime.datetime.strptime(
x, "%Y-%m-%d %H:%M:%S")
def safe_call(f):
"""Decorator that wraps a method for handling database operations."""
def wrapped_f(self, *args, **kwargs):
# This only logs the error and raise the usual Error for consistency,
# the redis library takes care of reconnecting and everything else.
try:
return f(self, *args, **kwargs)
except redis.exceptions.RedisError as e:
self.log.error("Redis error while calling %s: %s",
f.__name__, e)
raise DatabaseError("Database temporarily unavailable.")
return wrapped_f
class RedisDBHandle(BaseEngine):
absolute_source = False
handles_one_step = False
log = logging.getLogger("pyzord")
def __init__(self, fn, mode, max_age=None):
self.max_age = max_age
# The 'fn' is host,port,password,db. We ignore mode.
# We store the authentication details so that we can reconnect if
# necessary.
fn = fn.split(",")
self.host = fn[0] or "localhost"
self.port = fn[1] or "6379"
self.passwd = fn[2] or None
self.db_name = fn[3] or "0"
self.db = self._get_new_connection()
@staticmethod
def _encode_record(r):
return ("%s,%s,%s,%s,%s,%s" %
(r.r_count,
encode_date(r.r_entered),
encode_date(r.r_updated),
r.wl_count,
encode_date(r.wl_entered),
encode_date(r.wl_updated))).encode()
@staticmethod
def _decode_record(r):
if r is None:
return Record()
fields = r.decode().split(",")
return Record(r_count=int(fields[0]),
r_entered=decode_date(fields[1]),
r_updated=decode_date(fields[2]),
wl_count=int(fields[3]),
wl_entered=decode_date(fields[4]),
wl_updated=decode_date(fields[5]))
def __iter__(self):
for key in self.db.keys(self._real_key("*")):
yield key.rsplit(".", 1)[-1]
def _iteritems(self):
for key in self:
try:
yield key, self[key]
except Exception as ex:
self.log.warning("Invalid record %s: %s", key, ex)
def iteritems(self):
return self._iteritems()
def items(self):
return list(self._iteritems())
@staticmethod
def _real_key(key):
return "%s.%s" % (NAMESPACE, key)
@safe_call
def _get_new_connection(self):
if "/" in self.host:
return redis.StrictRedis(unix_socket_path=self.host,
db=int(self.db_name), password=self.passwd)
return redis.StrictRedis(host=self.host, port=int(self.port),
db=int(self.db_name), password=self.passwd)
@safe_call
def __getitem__(self, key):
return self._decode_record(self.db.get(self._real_key(key)))
@safe_call
def __setitem__(self, key, value):
if self.max_age is None:
self.db.set(self._real_key(key), self._encode_record(value))
else:
self.db.setex(self._real_key(key), self.max_age,
self._encode_record(value))
@safe_call
def __delitem__(self, key):
self.db.delete(self._real_key(key))
@classmethod
def get_prefork_connections(cls, fn, mode, max_age=None):
"""Yields a number of database connections suitable for a Pyzor
pre-fork server.
"""
while True:
yield functools.partial(cls, fn, mode, max_age=max_age)
class ThreadedRedisDBHandle(RedisDBHandle):
def __init__(self, fn, mode, max_age=None, bound=None):
RedisDBHandle.__init__(self, fn, mode, max_age=max_age)
if not _has_redis:
handle = DBHandle(single_threaded=None,
multi_threaded=None,
multi_processing=None,
prefork=None)
else:
handle = DBHandle(single_threaded=RedisDBHandle,
multi_threaded=ThreadedRedisDBHandle,
multi_processing=None,
prefork=RedisDBHandle)
| 4,666 | 29.703947 | 80 | py |
pyzor | pyzor-master/pyzor/engines/common.py | """Common library shared by different engines."""
import sys
import datetime
from collections import namedtuple
__all__ = ["DBHandle", "DatabaseError", "Record", "BaseEngine"]
DBHandle = namedtuple("DBHandle", ["single_threaded", "multi_threaded",
"multi_processing", "prefork"])
class DatabaseError(Exception):
pass
class Record(object):
"""Prefix conventions used in this class:
r = report (spam)
wl = whitelist
"""
def __init__(self, r_count=0, wl_count=0, r_entered=None,
r_updated=None, wl_entered=None, wl_updated=None):
self.r_count = r_count
self.wl_count = wl_count
self.r_entered = r_entered
self.r_updated = r_updated
self.wl_entered = wl_entered
self.wl_updated = wl_updated
def wl_increment(self):
# overflow prevention
if self.wl_count < sys.maxsize:
self.wl_count += 1
if self.wl_entered is None:
self.wl_entered = datetime.datetime.now()
self.wl_update()
def r_increment(self):
# overflow prevention
if self.r_count < sys.maxsize:
self.r_count += 1
if self.r_entered is None:
self.r_entered = datetime.datetime.now()
self.r_update()
def r_update(self):
self.r_updated = datetime.datetime.now()
def wl_update(self):
self.wl_updated = datetime.datetime.now()
class BaseEngine(object):
"""Base class for Pyzor engines."""
absolute_source = True
handles_one_step = False
def __iter__(self):
"""Iterate over all keys"""
raise NotImplementedError()
def iteritems(self):
"""Iterate over pairs of (key, record)."""
raise NotImplementedError()
def items(self):
"""Return a list of (key, record)."""
raise NotImplementedError()
def __getitem__(self, key):
"""Get the record for this corresponding key."""
raise NotImplementedError()
def __setitem__(self, key, value):
"""Set the record for this corresponding key. 'value' should be a
instance of the ``Record`` class.
"""
raise NotImplementedError()
def __delitem__(self, key):
"""Remove the corresponding record from the database."""
raise NotImplementedError()
def report(self, keys):
"""Report the corresponding key as spam, incrementing the report count.
Engines that implement don't implement this method should have
handles_one_step set to False.
"""
raise NotImplementedError()
def whitelist(self, keys):
"""Report the corresponding key as ham, incrementing the whitelist
count.
Engines that implement don't implement this method should have
handles_one_step set to False.
"""
raise NotImplementedError()
@classmethod
def get_prefork_connections(cls, fn, mode, max_age=None):
"""Yields an unlimited number of partial functions that return a new
engine instance, suitable for using toghether with the Pre-Fork server.
"""
raise NotImplementedError()
| 3,190 | 28.009091 | 79 | py |
pyzor | pyzor-master/pyzor/engines/__init__.py | """Database backends for pyzord.
The database class must expose a dictionary-like interface, allowing access
via __getitem__, __setitem__, and __delitem__. The key will be a forty
character string, and the value should be an instance of the Record class.
If the database backend cannot store the Record objects natively, then it
must transparently take care of translating to/from Record objects in
__setitem__ and __getitem__.
The database class should take care of expiring old values at the
appropriate interval.
"""
from pyzor.engines import gdbm_
from pyzor.engines import mysql
from pyzor.engines import redis_
from pyzor.engines import redis_v0
__all__ = ["database_classes"]
database_classes = {"gdbm": gdbm_.handle,
"mysql": mysql.handle,
"redis_v0": redis_v0.handle,
"redis": redis_.handle,
}
| 891 | 30.857143 | 75 | py |
pyzor | pyzor-master/pyzor/hacks/py3.py | """Hacks for python2-3 compatibility."""
import sys
def reload(module):
"""Reload the modeule.
This is handled differently according to the
python version. This even varies across Python3
versions
"""
if sys.version_info[0] == 2:
# Built-in method
return reload(module)
elif sys.version_info[0] == 3 and sys.version_info[1] <= 3:
import imp
return imp.reload(module)
else:
import importlib
return importlib.reload(module)
| 506 | 22.045455 | 63 | py |
pyzor | pyzor-master/pyzor/hacks/__init__.py | """Various hack to make pyzor compatible with different Python versions."""
| 76 | 37.5 | 75 | py |
pyzor | pyzor-master/pyzor/hacks/py26.py | """Hacks for Python 2.6"""
__all__ = ["hack_all", "hack_email", "hack_select"]
def hack_all(email=True, select=True):
"""Apply all Python 2.6 patches."""
if email:
hack_email()
if select:
hack_select()
def hack_email():
"""The python2.6 version of email.message_from_string, doesn't work with
unicode strings. And in python3 it will only work with a decoded.
So switch to using only message_from_bytes.
"""
import email
if not hasattr(email, "message_from_bytes"):
email.message_from_bytes = email.message_from_string
def hack_select():
"""The python2.6 version of SocketServer does not handle interrupt calls
from signals. Patch the select call if necessary.
"""
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 6:
import select
import errno
real_select = select.select
def _eintr_retry(*args):
"""restart a system call interrupted by EINTR"""
while True:
try:
return real_select(*args)
except (OSError, select.error) as ex:
if ex.args[0] != errno.EINTR:
raise
select.select = _eintr_retry
| 1,258 | 26.977778 | 76 | py |
pyzor | pyzor-master/scripts/summarise.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Summarise Pyzor database.
Generate a summary of the current state of a Pyzor database.
This currently only works with a MySQL (or compatible) database.
This can currently only output to a Slack channel.
There are extra requirements for this script:
* click
* requests
"""
import os
import datetime
import ConfigParser
import MySQLdb
import requests
import click
@click.command()
@click.option("--config", default=None)
@click.argument("hook")
def summarise(config, hook):
"""Generate a summary of a Pyzor database."""
if config is None:
config = os.path.expanduser("~/.pyzor/config")
conf = ConfigParser.ConfigParser()
conf.read(config)
(host, user, password, db_name,
table) = conf.get("server", "DigestDB").split(",")
db = MySQLdb.connect(
host=host,
user=user,
db=db_name,
passwd=password,
)
c = db.cursor()
# TODO: With a newer Python, this could use f-strings.
data = {}
c.execute(
"SELECT COUNT(*) FROM `%s`" % table
)
data["total"] = c.fetchone()[0]
c.execute(
"SELECT MIN(wl_entered), MIN(wl_updated), "
"MIN(r_entered), MIN(r_updated), MAX(wl_entered), MAX(wl_updated), "
"MAX(r_entered), MAX(r_updated) from `%s`" % table
)
(data["oldest_white"], data["oldest_white_update"],
data["oldest_spam"], data["oldest_spam_update"],
data["newest_white"], data["newest_white_update"],
data["newest_spam"], data["newest_spam_update"]
) = c.fetchone()
c.execute(
"SELECT MAX(r_count), MAX(wl_count) FROM `%s`" % table
)
data["max_spam"], data["max_white"] = c.fetchone()
# Frequency table for counts.
for column in ("r_count", "wl_count"):
buckets = []
for bucket in range(10):
low = bucket * 100
high = (bucket + 1) * 100
c.execute(
"SELECT COUNT(*) FROM `%s` WHERE %s BETWEEN %%s AND %%s" %
(table, column), (low, high)
)
buckets.append(c.fetchone()[0])
data[column] = buckets
# Frequency table for age.
for column in ("r_updated", "wl_updated"):
buckets = []
for bucket in range(10):
now = datetime.datetime.now()
low = now - datetime.timedelta(days=(bucket + 1) * 7)
high = now - datetime.timedelta(days=bucket * 7)
c.execute(
"SELECT COUNT(*) FROM `%s` WHERE %s BETWEEN %%s AND %%s" %
(table, column), (low, high)
)
buckets.append(c.fetchone()[0])
data[column] = buckets
data["table"] = table
notify_slack(hook, data)
c.close()
db.close()
# Borrowed from https://raw.githubusercontent.com/kennethreitz/spark.py/master/spark.py
def spark_string(ints, fit_min=False):
"""Returns a spark string from given iterable of ints.
Keyword Arguments:
fit_min: Matches the range of the sparkline to the input integers
rather than the default of zero. Useful for large numbers with
relatively small differences between the positions
"""
ticks = u" ▁▂▃▄▅▆▇█"
min_range = min(ints) if fit_min else 0
step_range = max(ints) - min_range
step = (step_range / float(len(ticks) - 1)) or 1
return u''.join(ticks[int(round((i - min_range) / step))] for i in ints)
def notify_slack(hook, data):
"""Send a notification containing a summary of a Pyzor database to a
Slack channel."""
text = "Pyzor summary for _%(table)s_ (%(total)s digests)" % data
format = "%d %b %Y"
if data["max_spam"] < 100:
spam_colour = "danger"
else:
spam_colour = "good"
if data["max_white"] < 100:
white_colour = "danger"
else:
white_colour = "good"
if (datetime.datetime.now() - data["newest_spam_update"]).days > 2:
spam_age_colour = "danger"
else:
spam_age_colour = "good"
if (datetime.datetime.now() - data["newest_white_update"]).days > 2:
white_age_colour = "danger"
else:
white_age_colour = "good"
attachments = [
{
"title": "Spam Reports",
"text": spark_string(data["r_count"], fit_min=True),
"fields": [
{
"title": "Most common count",
"value": data["max_spam"],
"short": True,
},
],
"color": spam_colour,
},
{
"title": "Whitelist Reports",
"text": spark_string(data["wl_count"], fit_min=True),
"fields": [
{
"title": "Most common count",
"value": data["max_white"],
"short": True,
},
],
"color": white_colour,
},
{
"title": "Spam Age",
"text": spark_string(data["r_updated"], fit_min=True),
"fields": [
{
"title": "Oldest",
"value": data["oldest_spam"].strftime(format),
"short": True,
},
{
"title": "Oldest Update",
"value": data["oldest_spam_update"].strftime(format),
"short": True,
},
{
"title": "Latest",
"value": data["newest_spam"].strftime(format),
"short": True,
},
{
"title": "Latest Update",
"value": data["newest_spam_update"].strftime(format),
"short": True,
},
],
"color": spam_age_colour,
},
{
"title": "Whitelist Age",
"text": spark_string(data["wl_updated"], fit_min=True),
"fields": [
{
"title": "Oldest",
"value": data["oldest_white"].strftime(format),
"short": True,
},
{
"title": "Oldest Update",
"value": data["oldest_white_update"].strftime(format),
"short": True,
},
{
"title": "Latest",
"value": data["newest_white"].strftime(format),
"short": True,
},
{
"title": "Latest Update",
"value": data["newest_white_update"].strftime(format),
"short": True,
},
],
"color": white_age_colour,
},
]
response = requests.post(
hook,
json={"text": text, "attachments": attachments}
)
if __name__ == "__main__":
summarise()
| 6,995 | 29.819383 | 87 | py |
pyzor | pyzor-master/tests/__init__.py | """Package reserved for tests and test utilities."""
import unittest
def suite():
"""Gather all the tests from this package in a test suite."""
import tests.unit as unit
import tests.functional as functional
test_suite = unittest.TestSuite()
test_suite.addTest(unit.suite())
test_suite.addTest(functional.suite())
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 433 | 20.7 | 65 | py |
pyzor | pyzor-master/tests/unit/test_forwarder.py | """Test the pyzor.forwarder module
"""
import time
import unittest
import threading
try:
from unittest.mock import call, Mock
except ImportError:
from mock import call, Mock
import pyzor.forwarder
class ForwarderTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_queue(self):
client = Mock()
servlist = []
max_qsize = 10
forwarder = pyzor.forwarder.Forwarder(client, servlist,
max_queue_size=max_qsize)
for _ in range(max_qsize * 2):
forwarder.queue_forward_request('975422c090e7a43ab7c9bf0065d5b661259e6d74')
self.assertGreater(forwarder.forward_queue.qsize(), 0, 'queue insert failed')
self.assertLessEqual(forwarder.forward_queue.qsize(), max_qsize, 'queue overload')
self.assertEqual(forwarder.forward_queue.qsize(), max_qsize, 'queue should be full at this point')
t = threading.Thread(target=forwarder._forward_loop)
t.start()
time.sleep(1)
self.assertEqual(forwarder.forward_queue.qsize(), 0, 'queue should be empty')
forwarder.stop_forwarding()
t.join(5)
self.assertFalse(t.is_alive(), 'forward thread did not end')
def test_remote_servers(self):
client = Mock()
digest = '975422c090e7a43ab7c9bf0065d5b661259e6d74'
servlist = [("test1.example.com", 24441),
("test2.example.com", 24442)]
forwarder = pyzor.forwarder.Forwarder(client, servlist)
forwarder.queue_forward_request(digest)
forwarder.queue_forward_request(digest, whitelist=True)
forwarder.start_forwarding()
time.sleep(2)
forwarder.stop_forwarding()
client.report.assert_has_calls([call(digest, servlist[0]),
call(digest, servlist[1])])
client.whitelist.assert_has_calls([call(digest, servlist[0]),
call(digest, servlist[1])])
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(ForwarderTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 2,352 | 32.614286 | 106 | py |
pyzor | pyzor-master/tests/unit/test_account.py | """Test the pyzor.account module"""
import io
import os
import time
import email
import hashlib
import unittest
import pyzor
import pyzor.config
import pyzor.account
class AccountTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.timestamp = 1381219396
self.msg = email.message_from_string("")
self.msg["Op"] = "ping"
self.msg["Thread"] = "14941"
self.msg["PV"] = "2.1"
self.msg["User"] = "anonymous"
self.msg["Time"] = str(self.timestamp)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_sign_msg(self):
"""Test the sign message function"""
hashed_key = hashlib.sha1(b"test_key").hexdigest()
expected = "2ab1bad2aae6fd80c656a896c82eef0ec1ec38a0"
result = pyzor.account.sign_msg(hashed_key, self.timestamp, self.msg)
self.assertEqual(result, expected)
def test_hash_key(self):
"""Test the hash key function"""
user = "testuser"
key = "testkey"
expected = "0957bd79b58263657127a39762879098286d8477"
result = pyzor.account.hash_key(key, user)
self.assertEqual(result, expected)
def test_verify_signature(self):
"""Test the verify signature function"""
def mock_sm(h, t, m):
return "testsig"
real_sm = pyzor.account.sign_msg
pyzor.account.sign_msg = mock_sm
try:
self.msg["Sig"] = "testsig"
del self.msg["Time"]
self.msg["Time"] = str(int(time.time()))
pyzor.account.verify_signature(self.msg, "testkey")
finally:
pyzor.account.sign_msg = real_sm
def test_verify_signature_old_timestamp(self):
"""Test the verify signature with old timestamp"""
def mock_sm(h, t, m):
return "testsig"
real_sm = pyzor.account.sign_msg
pyzor.account.sign_msg = mock_sm
try:
self.msg["Sig"] = "testsig"
self.assertRaises(pyzor.SignatureError, pyzor.account.verify_signature, self.msg, "testkey")
finally:
pyzor.account.sign_msg = real_sm
def test_verify_signature_bad_signature(self):
"""Test the verify signature with invalid signature"""
def mock_sm(h, t, m):
return "testsig"
real_sm = pyzor.account.sign_msg
pyzor.account.sign_msg = mock_sm
try:
self.msg["Sig"] = "testsig-bad"
del self.msg["Time"]
self.msg["Time"] = str(int(time.time()))
self.assertRaises(pyzor.SignatureError, pyzor.account.verify_signature, self.msg, "testkey")
finally:
pyzor.account.sign_msg = real_sm
class LoadAccountTest(unittest.TestCase):
"""Tests for the load_accounts function"""
filepath = "test_file"
def setUp(self):
unittest.TestCase.setUp(self)
self.real_exists = os.path.exists
os.path.exists = lambda p: True if p == self.filepath else \
self.real_exists(p)
self.mock_file = io.StringIO()
try:
self.real_open = pyzor.account.__builtins__.open
except AttributeError:
self.real_open = pyzor.account.__builtins__["open"]
def mock_open(path, mode="r", buffering=-1):
if path == self.filepath:
self.mock_file.seek(0)
return self.mock_file
else:
return self.real_open(path, mode, buffering)
try:
pyzor.account.__builtins__.open = mock_open
except AttributeError:
pyzor.account.__builtins__["open"] = mock_open
def tearDown(self):
unittest.TestCase.tearDown(self)
os.path.exists = self.real_exists
try:
pyzor.account.__builtins__.open = self.real_open
except AttributeError:
pyzor.account.__builtins__["open"] = self.real_open
def test_load_accounts_nothing(self):
result = pyzor.config.load_accounts("foobar")
self.assertEqual(result, {})
def test_load_accounts(self):
"""Test loading the account file"""
self.mock_file.write(u"public.pyzor.org : 24441 : test : 123abc,cba321\n"
u"public2.pyzor.org : 24441 : test2 : 123abc,cba321")
result = pyzor.config.load_accounts(self.filepath)
self.assertIn(("public.pyzor.org", 24441), result)
self.assertIn(("public2.pyzor.org", 24441), result)
account = result[("public.pyzor.org", 24441)]
self.assertEqual((account.username, account.salt, account.key),
("test", "123abc", "cba321"))
account = result[("public2.pyzor.org", 24441)]
self.assertEqual((account.username, account.salt, account.key),
("test2", "123abc", "cba321"))
def test_load_accounts_invalid_line(self):
"""Test loading the account file"""
self.mock_file.write(u"public.pyzor.org : 24441 ; test : 123abc,cba321\n"
u"public2.pyzor.org : 24441 : test2 : 123abc,cba321")
result = pyzor.config.load_accounts(self.filepath)
self.assertNotIn(("public.pyzor.org", 24441), result)
self.assertEqual(len(result), 1)
self.assertIn(("public2.pyzor.org", 24441), result)
account = result[("public2.pyzor.org", 24441)]
self.assertEqual((account.username, account.salt, account.key),
("test2", "123abc", "cba321"))
def test_load_accounts_invalid_port(self):
"""Test loading the account file"""
self.mock_file.write(u"public.pyzor.org : a4441 : test : 123abc,cba321\n"
u"public2.pyzor.org : 24441 : test2 : 123abc,cba321")
result = pyzor.config.load_accounts(self.filepath)
self.assertNotIn(("public.pyzor.org", 24441), result)
self.assertEqual(len(result), 1)
self.assertIn(("public2.pyzor.org", 24441), result)
account = result[("public2.pyzor.org", 24441)]
self.assertEqual((account.username, account.salt, account.key),
("test2", "123abc", "cba321"))
def test_load_accounts_invalid_key(self):
"""Test loading the account file"""
self.mock_file.write(u"public.pyzor.org : 24441 : test : ,\n"
u"public2.pyzor.org : 24441 : test2 : 123abc,cba321")
result = pyzor.config.load_accounts(self.filepath)
self.assertNotIn(("public.pyzor.org", 24441), result)
self.assertEqual(len(result), 1)
self.assertIn(("public2.pyzor.org", 24441), result)
account = result[("public2.pyzor.org", 24441)]
self.assertEqual((account.username, account.salt, account.key),
("test2", "123abc", "cba321"))
def test_load_accounts_invalid_missing_comma(self):
"""Test loading the account file"""
self.mock_file.write(u"public.pyzor.org : 24441 : test : 123abccba321\n"
u"public2.pyzor.org : 24441 : test2 : 123abc,cba321")
result = pyzor.config.load_accounts(self.filepath)
self.assertNotIn(("public.pyzor.org", 24441), result)
self.assertEqual(len(result), 1)
self.assertIn(("public2.pyzor.org", 24441), result)
account = result[("public2.pyzor.org", 24441)]
self.assertEqual((account.username, account.salt, account.key),
("test2", "123abc", "cba321"))
def test_load_accounts_comment(self):
"""Test skipping commented lines"""
self.mock_file.write(u"#public1.pyzor.org : 24441 : test : 123abc,cba321")
result = pyzor.config.load_accounts(self.filepath)
self.assertNotIn(("public.pyzor.org", 24441), result)
self.assertFalse(result)
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(AccountTest))
test_suite.addTest(unittest.makeSuite(LoadAccountTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 8,254 | 39.665025 | 104 | py |
pyzor | pyzor-master/tests/unit/test_config.py | import os
import logging
import unittest
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
try:
from unittest.mock import patch, Mock
except ImportError:
from mock import patch, Mock
import pyzor.config
from tests.util import mock_open
class MockData(list):
def close(self):
pass
class TestPasswdLoad(unittest.TestCase):
fp = "pyzord.passwd"
alice_key = "alice_key"
bob_key = "bob_key"
def setUp(self):
super(TestPasswdLoad, self).setUp()
self.data = MockData()
self.exists = True
real_exists = os.path.exists
patch("pyzor.config.open", return_value=self.data,
create=True).start()
_exists = lambda fp: True if fp == self.fp else real_exists(fp)
patch("pyzor.config.os.path.exists", side_effect=_exists).start()
def get_passwd(self, fp=None):
if not fp:
fp = self.fp
return pyzor.config.load_passwd_file(fp)
def tearDown(self):
super(TestPasswdLoad, self).tearDown()
patch.stopall()
def test_nothing(self):
result = self.get_passwd()
self.assertEqual(result, {})
def test_default(self):
result = self.get_passwd("foobar")
self.assertEqual(result, {})
def test_passwd(self):
self.data.append("alice : %s\n" % self.alice_key)
self.data.append("bob : %s\n" % self.bob_key)
result = self.get_passwd()
self.assertEqual(result, {"alice": self.alice_key,
"bob": self.bob_key})
def test_invalid_line(self):
self.data.append("alice ; %s\n" % self.alice_key)
self.data.append("bob : %s\n" % self.bob_key)
result = self.get_passwd()
self.assertEqual(result, {"bob": self.bob_key})
def test_ignore_comment(self):
self.data.append("alice : %s\n" % self.alice_key)
self.data.append("# bob : %s\n" % self.bob_key)
result = self.get_passwd()
self.assertEqual(result, {"alice": self.alice_key})
class TestAccessLoad(unittest.TestCase):
fp = "pyzord.access"
accounts = ["alice", "bob"]
all = {'report', 'info', 'pong', 'ping', 'check', 'whitelist'}
anonymous_privileges = {'report', 'info', 'pong', 'ping', 'check'}
def setUp(self):
super(TestAccessLoad, self).setUp()
self.data = MockData()
self.exists = True
real_exists = os.path.exists
patch("pyzor.config.open", return_value=self.data,
create=True).start()
_exists = lambda fp: True if fp == self.fp else real_exists(fp)
patch("pyzor.config.os.path.exists", side_effect=_exists).start()
def get_access(self, fp=None, accounts=None):
if not fp:
fp = self.fp
if not accounts:
accounts = self.accounts
return pyzor.config.load_access_file(fp, accounts)
def tearDown(self):
super(TestAccessLoad, self).tearDown()
patch.stopall()
def test_nothing(self):
result = self.get_access()
self.assertEqual(result, {})
def test_default(self):
result = self.get_access(fp="foobar")
self.assertEqual(result, {'anonymous': self.anonymous_privileges})
def test_invalid_line(self):
self.data.append("all : allice ; allow\n")
self.data.append("ping : bob : allow\n")
result = self.get_access()
self.assertEqual(result, {'bob': {'ping'}})
def test_invalid_action(self):
self.data.append("all : allice : don't allow\n")
self.data.append("ping : bob : allow\n")
result = self.get_access()
self.assertEqual(result, {'bob': {'ping'}})
def test_all_privilege(self):
self.data.append("all : bob : allow\n")
result = self.get_access()
self.assertEqual(result, {'bob': self.all})
def test_all_accounts(self):
self.data.append("all : all : allow\n")
result = self.get_access()
self.assertEqual(result, {'alice': self.all,
'bob': self.all})
def test_deny_action(self):
self.data.append("all : all : allow\n")
self.data.append("ping : bob : deny\n")
result = self.get_access()
self.assertEqual(result, {'alice': self.all,
'bob': self.all - {'ping'}})
def test_multiple_users(self):
self.data.append("all : alice bob: allow\n")
result = self.get_access()
self.assertEqual(result, {'alice': self.all,
'bob': self.all})
def test_multiple_privileges(self):
self.data.append("ping pong : alice: allow\n")
result = self.get_access()
self.assertEqual(result, {'alice': {'ping', 'pong'}})
def test_ignore_comments(self):
self.data.append("all: alice: allow\n")
self.data.append("# all: bob : allow\n")
result = self.get_access()
self.assertEqual(result, {'alice': self.all})
class TestServersLoad(unittest.TestCase):
fp = "servers"
public_server = ("public.pyzor.org", 24441)
random_server1 = ("random.pyzor.org", 33544)
random_server2 = ("127.1.2.45", 13587)
def setUp(self):
super(TestServersLoad, self).setUp()
self.data = []
self.exists = True
real_exists = os.path.exists
_exists = lambda fp: True if fp == self.fp else real_exists(fp)
patch("pyzor.config.os.path.exists", side_effect=_exists).start()
def get_servers(self, fp=None):
if not fp:
fp = self.fp
name = "pyzor.config.open"
with patch(name, mock_open(read_data=''.join(self.data)),
create=True) as m:
return pyzor.config.load_servers(fp)
def tearDown(self):
super(TestServersLoad, self).tearDown()
patch.stopall()
def test_nothing(self):
result = self.get_servers()
self.assertEqual(result, [self.public_server])
def test_default(self):
result = self.get_servers("foobar")
self.assertEqual(result, [self.public_server])
def test_servers(self):
self.data.append("%s:%s\n" % self.random_server1)
self.data.append("%s:%s\n" % self.random_server2)
result = self.get_servers()
self.assertEqual(result, [self.random_server1,
self.random_server2])
def test_ignore_comment(self):
self.data.append("#%s:%s\n" % self.random_server1)
self.data.append("%s:%s\n" % self.random_server2)
result = self.get_servers()
self.assertEqual(result, [self.random_server2])
class TestLogSetup(unittest.TestCase):
log_file = "this_is_a_test_log_file"
def setUp(self):
super(TestLogSetup, self).setUp()
def tearDown(self):
super(TestLogSetup, self).tearDown()
try:
os.remove(self.log_file)
except OSError:
pass
def test_logging(self):
pyzor.config.setup_logging("pyzor.test1", None, False)
log = logging.getLogger("pyzor.test1")
self.assertEqual(log.getEffectiveLevel(), logging.INFO)
self.assertEqual(log.handlers[0].level, logging.CRITICAL)
def test_logging_debug(self):
pyzor.config.setup_logging("pyzor.test2", None, True)
log = logging.getLogger("pyzor.test2")
self.assertEqual(log.getEffectiveLevel(), logging.DEBUG)
self.assertEqual(log.handlers[0].level, logging.DEBUG)
def test_logging_file(self):
pyzor.config.setup_logging("pyzor.test3", self.log_file, False)
log = logging.getLogger("pyzor.test3")
self.assertEqual(log.getEffectiveLevel(), logging.INFO)
self.assertEqual(log.handlers[0].level, logging.CRITICAL)
self.assertEqual(log.handlers[1].level, logging.INFO)
def test_logging_file_debug(self):
pyzor.config.setup_logging("pyzor.test4", self.log_file, True)
log = logging.getLogger("pyzor.test4")
self.assertEqual(log.getEffectiveLevel(), logging.DEBUG)
self.assertEqual(log.handlers[0].level, logging.DEBUG)
self.assertEqual(log.handlers[1].level, logging.DEBUG)
class TestExpandHomeFiles(unittest.TestCase):
home = "/home/user/pyzor"
def setUp(self):
super(TestExpandHomeFiles, self).setUp()
def tearDown(self):
super(TestExpandHomeFiles, self).tearDown()
def check_expand(self, homefiles, homedir, config, expected):
section = "test"
conf = ConfigParser.ConfigParser()
conf.add_section(section)
for key, value in config.items():
conf.set(section, key, value)
pyzor.config.expand_homefiles(homefiles, section, homedir, conf)
result = dict(conf.items(section))
self.assertEqual(result, expected)
def test_homedir(self):
self.check_expand(
["testfile"],
self.home,
{"testfile": "my.file"},
{"testfile": "%s/my.file" % self.home},
)
def test_homedir_none(self):
self.check_expand(
["testfile"],
self.home,
{"testfile": ""},
{"testfile": ""},
)
def test_homedir_abs(self):
self.check_expand(
["testfile"],
self.home,
{"testfile": "/home/user2/pyzor"},
{"testfile": "/home/user2/pyzor"},
)
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(TestLogSetup))
test_suite.addTest(unittest.makeSuite(TestAccessLoad))
test_suite.addTest(unittest.makeSuite(TestPasswdLoad))
test_suite.addTest(unittest.makeSuite(TestServersLoad))
return test_suite
if __name__ == '__main__':
unittest.main()
| 9,903 | 30.948387 | 74 | py |
pyzor | pyzor-master/tests/unit/test_server.py | """Test the pyzor.server module
"""
import io
import sys
import time
import logging
import unittest
try:
import socketserver as SocketServer
except ImportError:
import SocketServer
from datetime import datetime, timedelta
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import pyzor.server
import pyzor.engines.common
class MockServer():
"""Mocks the pyzor.server.Server class"""
def __init__(self):
self.log = logging.getLogger("pyzord")
self.usage_log = logging.getLogger("pyzord-usage")
self.log.addHandler(logging.NullHandler())
self.usage_log.addHandler(logging.NullHandler())
self.forwarder = None
self.one_step = False
class MockDatagramRequestHandler():
""" Mock the SocketServer.DatagramRequestHand."""
def __init__(self, headers, database=None, acl=None, accounts=None):
"""Initiates an request handler and set's the data in `headers` as
the request. Also set's the database, acl and accounts for the
MockServer.
This will be set as base class for RequestHandler.
"""
self.rfile = io.BytesIO()
self.wfile = io.BytesIO()
for i, j in headers.items():
self.rfile.write(("%s: %s\n" % (i, j)).encode("utf8"))
self.rfile.seek(0)
self.packet = None
self.client_address = ["127.0.0.1"]
# Setup MockServer data
self.server = MockServer()
self.server.database = database
if acl:
self.server.acl = acl
else:
self.server.acl = {pyzor.anonymous_user: ("check", "report",
"ping", "pong", "info",
"whitelist",)}
self.server.accounts = accounts
self.handle()
def handle(self):
pass
class RequestHandlerTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.real_drh = SocketServer.DatagramRequestHandler
SocketServer.DatagramRequestHandler = MockDatagramRequestHandler
pyzor.server.RequestHandler.__bases__ = (MockDatagramRequestHandler,)
# setup the basic values for request and response
self.request = {"User": pyzor.anonymous_user,
"Time": str(int(time.time())),
"PV": str(pyzor.proto_version),
"Thread": "3597"}
self.expected_response = {"Code": "200",
"Diag": "OK",
"PV": str(pyzor.proto_version),
"Thread": "3597"}
def tearDown(self):
unittest.TestCase.tearDown(self)
SocketServer.DatagramRequestHandler = self.real_drh
pyzor.server.RequestHandler.__bases__ = (self.real_drh,)
patch.stopall()
def check_response(self, handler):
"""Checks if the response from the handler is equal to
the expected response.
"""
handler.wfile.seek(0)
response = handler.wfile.read()
response = response.decode("utf8").replace("\n\n", "\n")
result = {}
try:
for line in response.splitlines():
key = line.split(":", 1)[0].strip()
value = line.split(":")[1].strip()
result[key] = value
except (IndexError, TypeError) as e:
self.fail("Error parsing %r: %s" % (response, e))
self.assertEqual(result, self.expected_response)
def timestamp(self, time_obj):
if not time_obj:
return 0
else:
return str(int(time.mktime(time_obj.timetuple())))
def test_ping(self):
"""Tests the ping command handler"""
self.request["Op"] = "ping"
handler = pyzor.server.RequestHandler(self.request)
self.check_response(handler)
def test_pong(self):
"""Tests the pong command handler"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
database = {digest: pyzor.engines.common.Record(24, 42)}
self.request["Op"] = "pong"
self.request["Op-Digest"] = digest
handler = pyzor.server.RequestHandler(self.request, database)
self.expected_response["Count"] = str(sys.maxsize)
self.expected_response["WL-Count"] = "0"
self.check_response(handler)
def test_check(self):
"""Tests the check command handler"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
database = {digest: pyzor.engines.common.Record(24, 42)}
self.request["Op"] = "check"
self.request["Op-Digest"] = digest
handler = pyzor.server.RequestHandler(self.request, database)
self.expected_response["Count"] = "24"
self.expected_response["WL-Count"] = "42"
self.check_response(handler)
def test_check_new(self):
"""Tests the check command handler with a new record"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
database = {}
self.request["Op"] = "check"
self.request["Op-Digest"] = digest
handler = pyzor.server.RequestHandler(self.request, database)
self.expected_response["Count"] = "0"
self.expected_response["WL-Count"] = "0"
self.check_response(handler)
def test_info(self):
"""Tests the info command handler"""
entered = datetime.now() - timedelta(days=10)
updated = datetime.now()
wl_entered = datetime.now() - timedelta(days=20)
wl_updated = datetime.now() - timedelta(days=2)
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
database = {digest: pyzor.engines.common.Record(24, 42, entered, updated,
wl_entered, wl_updated)}
self.request["Op"] = "info"
self.request["Op-Digest"] = digest
handler = pyzor.server.RequestHandler(self.request, database)
self.expected_response["Count"] = "24"
self.expected_response["WL-Count"] = "42"
self.expected_response["Entered"] = self.timestamp(entered)
self.expected_response["Updated"] = self.timestamp(updated)
self.expected_response["WL-Entered"] = self.timestamp(wl_entered)
self.expected_response["WL-Updated"] = self.timestamp(wl_updated)
self.check_response(handler)
def test_info_new(self):
"""Tests the info command handler with a new record"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
database = {}
self.request["Op"] = "info"
self.request["Op-Digest"] = digest
handler = pyzor.server.RequestHandler(self.request, database)
self.expected_response["Count"] = "0"
self.expected_response["WL-Count"] = "0"
self.expected_response["Entered"] = "0"
self.expected_response["Updated"] = "0"
self.expected_response["WL-Entered"] = "0"
self.expected_response["WL-Updated"] = "0"
self.check_response(handler)
def test_report(self):
"""Tests the report command handler"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
database = {digest: pyzor.engines.common.Record(24, 42)}
self.request["Op"] = "report"
self.request["Op-Digest"] = digest
handler = pyzor.server.RequestHandler(self.request, database)
self.check_response(handler)
self.assertEqual(database[digest].r_count, 25)
def test_report_new(self):
"""Tests the report command handler with a new record"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
database = {}
self.request["Op"] = "report"
self.request["Op-Digest"] = digest
handler = pyzor.server.RequestHandler(self.request, database)
self.check_response(handler)
self.assertEqual(database[digest].r_count, 1)
def test_whitelist(self):
"""Tests the whitelist command handler"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
database = {digest: pyzor.engines.common.Record(24, 42)}
self.request["Op"] = "whitelist"
self.request["Op-Digest"] = digest
handler = pyzor.server.RequestHandler(self.request, database)
self.check_response(handler)
self.assertEqual(database[digest].wl_count, 43)
def test_whitelist_new(self):
"""Tests the whitelist command handler with a new record"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
database = {}
self.request["Op"] = "whitelist"
self.request["Op-Digest"] = digest
handler = pyzor.server.RequestHandler(self.request, database)
self.check_response(handler)
self.assertEqual(database[digest].wl_count, 1)
def test_handle_no_version(self):
"""Tests handling an request with no version specified"""
self.request["Op"] = "ping"
del self.request["PV"]
handler = pyzor.server.RequestHandler(self.request)
self.expected_response["Code"] = "400"
self.expected_response["Diag"] = "Bad request"
self.check_response(handler)
def test_handle_unsupported_version(self):
"""Tests handling an request with an unsupported version specified"""
self.request["Op"] = "ping"
self.request["PV"] = str(pyzor.proto_version + 2)
handler = pyzor.server.RequestHandler(self.request)
self.expected_response["Code"] = "505"
self.expected_response["Diag"] = "Version Not Supported"
self.check_response(handler)
def test_handle_not_implemented(self):
"""Tests handling an request with an unimplemented command"""
self.request["Op"] = "notimplemented"
acl = {pyzor.anonymous_user: "notimplemented"}
handler = pyzor.server.RequestHandler(self.request, acl=acl)
self.expected_response["Code"] = "501"
self.expected_response["Diag"] = "Not implemented"
self.check_response(handler)
def test_handle_unauthorized(self):
"""Tests handling an request with an unauthorized command"""
self.request["Op"] = "report"
acl = {pyzor.anonymous_user: ("ping", "check")}
handler = pyzor.server.RequestHandler(self.request, acl=acl)
self.expected_response["Code"] = "403"
self.expected_response["Diag"] = "Forbidden"
self.check_response(handler)
def test_handle_account(self):
"""Tests handling an request where user is not anonymous"""
self.request["Op"] = "ping"
self.request["User"] = "testuser"
acl = {"testuser": ("ping", "check")}
accounts = {"testuser": "testkey"}
mock_vs = lambda x, y: None
real_vs = pyzor.account.verify_signature
pyzor.account.verify_signature = mock_vs
try:
handler = pyzor.server.RequestHandler(self.request, acl=acl,
accounts=accounts)
self.check_response(handler)
finally:
pyzor.account.verify_signature = real_vs
def test_handle_unknown_account(self):
"""Tests handling an request where user is unkwown"""
self.request["Op"] = "ping"
self.request["User"] = "testuser"
acl = {"testuser": ("ping", "check")}
accounts = {}
self.expected_response["Code"] = "401"
self.expected_response["Diag"] = "Unauthorized"
def mock_vs(x, y):
pass
real_vs = pyzor.account.verify_signature
pyzor.account.verify_signature = mock_vs
try:
handler = pyzor.server.RequestHandler(self.request, acl=acl,
accounts=accounts)
self.check_response(handler)
finally:
pyzor.account.verify_signature = real_vs
def test_handle_invalid_signature(self):
"""Tests handling an request where user key is invalid"""
self.request["Op"] = "ping"
self.request["User"] = "testuser"
acl = {"testuser": ("ping", "check")}
accounts = {"testuser": ("ping", "check")}
self.expected_response["Code"] = "401"
self.expected_response["Diag"] = "Unauthorized"
def mock_vs(x, y):
raise pyzor.SignatureError("Invalid signature.")
real_vs = pyzor.account.verify_signature
pyzor.account.verify_signature = mock_vs
try:
handler = pyzor.server.RequestHandler(self.request, acl=acl,
accounts=accounts)
self.check_response(handler)
finally:
pyzor.account.verify_signature = real_vs
def test_invalid_pv(self):
self.request["Op"] = "ping"
self.request["PV"] = "ab2.13"
handler = pyzor.server.RequestHandler(self.request)
self.expected_response["Code"] = "400"
self.expected_response["Diag"] = "Bad request"
self.check_response(handler)
def test_uncaught_exception(self):
patch("pyzor.server.RequestHandler._really_handle",
side_effect=Exception("test")).start()
self.request["Op"] = "ping"
handler = pyzor.server.RequestHandler(self.request)
self.expected_response["Code"] = "500"
self.expected_response["Diag"] = "Internal Server Error"
del self.expected_response["Thread"]
self.check_response(handler)
class ServerTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.mock_config = patch("pyzor.config").start()
def tearDown(self):
unittest.TestCase.tearDown(self)
patch.stopall()
def test_server(self):
pyzor.server.Server(("127.0.0.1", 24441), {}, "passwd_fn", "access_fn",
None)
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(RequestHandlerTest))
test_suite.addTest(unittest.makeSuite(ServerTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 14,225 | 34.654135 | 81 | py |
pyzor | pyzor-master/tests/unit/test_client.py | import time
import email
import unittest
try:
from unittest.mock import Mock, patch, call
except ImportError:
from mock import Mock, patch, call
import pyzor.client
import pyzor.account
class TestBase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.thread = 33715
self.timeout = None
self.time = str(int(time.time()))
patch("pyzor.account.sign_msg", return_value="TestSig").start()
patch("pyzor.account.hash_key").start()
# the response the mock socket will send
self.response = {"Code": "200",
"Diag": "OK",
"PV": "2.1",
"Thread": "33715",
"Time": self.time
}
self.mresponse = None
self.mock_socket = None
# the expected request that the client should send
self.expected = {"Thread": str(self.thread),
"PV": str(pyzor.proto_version),
"User": "anonymous",
"Time": self.time,
"Sig": "TestSig"
}
def tearDown(self):
unittest.TestCase.tearDown(self)
patch.stopall()
def get_requests(self):
for mock_call in self.mock_socket.mock_calls:
name, args, kwargs = mock_call
if name == "socket().sendto":
yield args, kwargs
def check_request(self):
"""Check if the request sent by the client is equal
to the expected one.
"""
req = {}
for args, _ in self.get_requests():
self.assertEqual(args[2], ('127.0.0.1', 24441))
req = dict(email.message_from_string(args[0].decode()))
self.assertEqual(req, self.expected)
def patch_all(self, conf=None):
if conf is None:
conf = {}
patch("pyzor.message.ThreadId.generate",
return_value=self.thread).start()
if self.response:
response = "\n".join("%s: %s" % (key, value)
for key, value in self.response.items()) + "\n\n"
self.mresponse = response.encode(), ("127.0.0.1", 24441)
else:
self.mresponse = None
addrinfo = [(2, 2, 17, '', ('127.0.0.1', 24441))]
config = {"socket.return_value": Mock(),
"socket.return_value.recvfrom.return_value": self.mresponse,
"getaddrinfo.return_value": addrinfo}
config.update(conf)
self.mock_socket = patch("pyzor.client.socket", **config).start()
def check_client(self, accounts, method, *args, **kwargs):
"""Tests if the request and response are sent
and read correctly by the client.
"""
client = pyzor.client.Client(accounts=accounts,
timeout=self.timeout)
got_response = getattr(client, method)(*args, **kwargs)
self.assertEqual(str(got_response), self.mresponse[0].decode())
if self.expected is not None:
self.check_request()
return client
class ClientTest(TestBase):
def test_ping(self):
"""Test the client ping request"""
self.expected["Op"] = "ping"
self.patch_all()
self.check_client(None, "ping")
def test_pong(self):
"""Test the client pong request"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.expected["Op"] = "pong"
self.expected["Op-Digest"] = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
self.check_client(None, "pong", digest)
def test_check(self):
"""Test the client check request"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.expected["Op"] = "check"
self.expected["Op-Digest"] = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
self.check_client(None, "check", digest)
def test_info(self):
"""Test the client info request"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.expected["Op"] = "info"
self.expected["Op-Digest"] = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
self.check_client(None, "info", digest)
def test_report(self):
"""Test the client report request"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.expected["Op"] = "report"
self.expected["Op-Digest"] = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.expected["Op-Spec"] = "20,3,60,3"
self.patch_all()
self.check_client(None, "report", digest)
def test_whitelist(self):
"""Test the client whitelist request"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.expected["Op"] = "whitelist"
self.expected["Op-Digest"] = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.expected["Op-Spec"] = "20,3,60,3"
self.patch_all()
self.check_client(None, "whitelist", digest)
def test_handle_account(self):
"""Test client handling accounts"""
test_account = pyzor.account.Account("TestUser", "TestKey", "TestSalt")
self.expected["Op"] = "ping"
self.expected["User"] = "TestUser"
self.patch_all()
self.check_client({("public.pyzor.org", 24441): test_account}, "ping")
def test_handle_invalid_thread(self):
"""Test invalid thread id"""
self.thread += 20
self.expected["Op"] = "ping"
self.patch_all()
self.assertRaises(pyzor.ProtocolError, self.check_client, None, "ping")
def test_set_timeout(self):
self.expected = None
self.patch_all()
self.timeout = 10
self.check_client(None, "ping")
calls = [call.socket().settimeout(10), ]
self.mock_socket.assert_has_calls(calls)
class BatchClientTest(TestBase):
def test_report(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
client = pyzor.client.BatchClient()
for i in range(10):
client.report(digest)
args, kwargs = list(self.get_requests())[0]
msg = email.message_from_string(args[0].decode())
self.assertEqual(len(msg.get_all("Op-Digest")), 10)
def test_report_to_few(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
client = pyzor.client.BatchClient()
for i in range(9):
client.report(digest)
self.assertEqual(list(self.get_requests()), [])
def test_whitelist(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
client = pyzor.client.BatchClient()
for i in range(10):
client.whitelist(digest)
args, kwargs = list(self.get_requests())[0]
msg = email.message_from_string(args[0].decode())
self.assertEqual(len(msg.get_all("Op-Digest")), 10)
def test_whitelist_to_few(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
client = pyzor.client.BatchClient()
for i in range(9):
client.whitelist(digest)
self.assertEqual(list(self.get_requests()), [])
def test_force_report(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
client = pyzor.client.BatchClient()
for i in range(9):
client.report(digest)
client.force()
args, kwargs = list(self.get_requests())[0]
msg = email.message_from_string(args[0].decode())
self.assertEqual(len(msg.get_all("Op-Digest")), 9)
def test_force_whitelist(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
client = pyzor.client.BatchClient()
for i in range(9):
client.whitelist(digest)
client.force()
args, kwargs = list(self.get_requests())[0]
msg = email.message_from_string(args[0].decode())
self.assertEqual(len(msg.get_all("Op-Digest")), 9)
def test_flush_report(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
client = pyzor.client.BatchClient()
for i in range(9):
client.report(digest)
client.flush()
client.report(digest)
self.assertEqual(list(self.get_requests()), [])
def test_flush_whitelist(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.patch_all()
client = pyzor.client.BatchClient()
for i in range(9):
client.whitelist(digest)
client.flush()
client.whitelist(digest)
self.assertEqual(list(self.get_requests()), [])
class ClientRunnerTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.server = "test.example.com", 24441
def tearDown(self):
unittest.TestCase.tearDown(self)
def check_runner(self, test_class, response, results, kwargs=None):
if kwargs is None:
kwargs = {}
kwargs["return_value"] = response
mock_routine = Mock(**kwargs)
runner = test_class(mock_routine)
runner.run(self.server, ())
self.assertEqual(runner.results, results)
return runner
def test_normal(self):
response = pyzor.message.Response()
response["Diag"] = "OK"
response["Code"] = "200"
server = "%s:%s\t" % self.server
results = ["%s%s\n" % (server, response.head_tuple()), ]
self.check_runner(pyzor.client.ClientRunner, response, results)
def test_check(self):
response = pyzor.message.Response()
response["Diag"] = "OK"
response["Code"] = "200"
response["Count"] = "2"
response["WL-Count"] = "1"
server = "%s:%s\t" % self.server
results = ["%s%s\t%s\t%s\n" % (server, response.head_tuple(), "2",
"1")]
self.check_runner(pyzor.client.CheckClientRunner, response, results)
def test_info(self):
response = """Code: 200
Diag: OK
PV: 2.1
Thread: 8521
Entered: 1400221786
Updated: 1400221794
WL-Entered: 0
WL-Updated: 0
Count: 4
WL-Count: 0
"""
response = email.message_from_string(response,
_class=pyzor.message.Response)
server = "%s:%s" % self.server
result = ("%s\t(200, 'OK')\n"
"\tCount: 4\n"
"\tEntered: %s\n"
"\tUpdated: %s\n"
"\tWL-Count: 0\n"
"\tWL-Entered: %s\n"
"\tWL-Updated: %s\n\n" %
(server, time.ctime(1400221786), time.ctime(1400221794),
time.ctime(0), time.ctime(0)))
self.maxDiff = None
self.check_runner(pyzor.client.InfoClientRunner, response, [result])
def test_info_never(self):
response = """Code: 200
Diag: OK
PV: 2.1
Thread: 8521
Entered: 1400221786
Updated: 1400221794
WL-Entered: -1
WL-Updated: -1
Count: 4
WL-Count: 0
"""
response = email.message_from_string(response,
_class=pyzor.message.Response)
server = "%s:%s" % self.server
result = ("%s\t(200, 'OK')\n"
"\tCount: 4\n"
"\tEntered: %s\n"
"\tUpdated: %s\n"
"\tWL-Count: 0\n"
"\tWL-Entered: Never\n"
"\tWL-Updated: Never\n\n" %
(server, time.ctime(1400221786), time.ctime(1400221794)))
self.maxDiff = None
self.check_runner(pyzor.client.InfoClientRunner, response, [result])
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(ClientTest))
test_suite.addTest(unittest.makeSuite(BatchClientTest))
test_suite.addTest(unittest.makeSuite(ClientRunnerTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 12,149 | 30.889764 | 82 | py |
pyzor | pyzor-master/tests/unit/__init__.py | """A suite of unit tests that verifies the correct behaviour of various
functions/methods in the pyzord code.
Note these tests the source of pyzor, not the version currently installed.
"""
import unittest
def suite():
"""Gather all the tests from this package in a test suite."""
import test_client
import test_config
import test_digest
import test_server
import test_account
import test_forwarder
import test_engines
test_suite = unittest.TestSuite()
test_suite.addTest(test_engines.suite())
test_suite.addTest(test_client.suite())
test_suite.addTest(test_config.suite())
test_suite.addTest(test_digest.suite())
test_suite.addTest(test_server.suite())
test_suite.addTest(test_account.suite())
test_suite.addTest(test_forwarder.suite())
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 897 | 26.212121 | 74 | py |
pyzor | pyzor-master/tests/unit/test_digest.py | """The the pyzor.digest module
"""
import unittest
import pyzor.digest
from pyzor.digest import *
try:
from unittest.mock import patch, Mock, call
except ImportError:
from mock import patch, Mock, call
HTML_TEXT = """<html><head><title>Email spam</title></head><body>
<p><b>Email spam</b>, also known as <b>junk email</b>
or <b>unsolicited bulk email</b> (<i>UBE</i>), is a subset of
<a href="/wiki/Spam_(electronic)" title="Spam (electronic)">electronic spam</a>
involving nearly identical messages sent to numerous recipients by <a href="/wiki/Email" title="Email">
email</a>. Clicking on <a href="/wiki/Html_email#Security_vulnerabilities" title="Html email" class="mw-redirect">
links in spam email</a> may send users to <a href="/wiki/Phishing" title="Phishing">phishing</a>
web sites or sites that are hosting <a href="/wiki/Malware" title="Malware">malware</a>.</body></html>"""
HTML_TEXT_STRIPED = 'Email spam Email spam , also known as junk email or unsolicited bulk email ( UBE ),' \
' is a subset of electronic spam involving nearly identical messages sent to numerous recipients by email' \
' . Clicking on links in spam email may send users to phishing web sites or sites that are hosting malware .'
HTML_STYLE = """<html><head></head><sTyle>Some random style</stylE>
<body>This is a test.</body></html>
"""
HTML_SCRIPT = """<html><head></head><SCRIPT>Some random script</SCRIPT>
<body>This is a test.</body></html>
"""
class HTMLStripperTests(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.data = []
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_HTMLStripper(self):
stripper = HTMLStripper(self.data)
stripper.feed(HTML_TEXT)
res = " ".join(self.data)
self.assertEqual(res, HTML_TEXT_STRIPED)
def test_strip_style(self):
stripper = HTMLStripper(self.data)
stripper.feed(HTML_STYLE)
res = " ".join(self.data)
self.assertEqual(res, "This is a test.")
def test_strip_script(self):
stripper = HTMLStripper(self.data)
stripper.feed(HTML_SCRIPT)
res = " ".join(self.data)
self.assertEqual(res, "This is a test.")
class PreDigestTests(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.lines = []
def mock_digest_paylods(c, message):
yield message.decode("utf8")
def mock_handle_line(s, line):
self.lines.append(line.decode("utf8"))
self.real_digest_payloads = DataDigester.digest_payloads
self.real_handle_line = DataDigester.handle_line
DataDigester.digest_payloads = mock_digest_paylods
DataDigester.handle_line = mock_handle_line
def tearDown(self):
unittest.TestCase.tearDown(self)
DataDigester.digest_payloads = self.real_digest_payloads
DataDigester.handle_line = self.real_handle_line
def test_predigest_emails(self):
"""Test email removal in the predigest process"""
real_longstr = DataDigester.longstr_ptrn
DataDigester.longstr_ptrn = re.compile(r'\S{100,}')
emails = ["test@example.com",
"test123@example.com",
"test+abc@example.com",
"test.test2@example.com",
"test.test2+abc@example.com", ]
message = "Test %s Test2"
expected = "TestTest2"
try:
for email in emails:
self.lines = []
DataDigester((message % email).encode("utf8"))
self.assertEqual(self.lines[0], expected)
finally:
DataDigester.longstr_ptrn = real_longstr
def test_predigest_urls(self):
"""Test url removal in the predigest process"""
real_longstr = DataDigester.longstr_ptrn
DataDigester.longstr_ptrn = re.compile(r'\S{100,}')
urls = ["http://www.example.com",
# "www.example.com", # XXX This also fail
"http://example.com",
# "example.com", # XXX This also fails
"http://www.example.com/test/"
"http://www.example.com/test/test2", ]
message = "Test %s Test2"
expected = "TestTest2"
try:
for url in urls:
self.lines = []
DataDigester((message % url).encode("utf8"))
self.assertEqual(self.lines[0], expected)
finally:
DataDigester.longstr_ptrn = real_longstr
def test_predigest_long(self):
"""Test long "words" removal in the predigest process"""
strings = ["0A2D3f%a#S",
"3sddkf9jdkd9",
"@@#@@@@@@@@@"]
message = "Test %s Test2"
expected = "TestTest2"
for string in strings:
self.lines = []
DataDigester((message % string).encode("utf8"))
self.assertEqual(self.lines[0], expected)
def test_predigest_min_line_lenght(self):
"""Test small lines removal in the predigest process"""
message = "This line is included\n" \
"not this\n" \
"This also"
expected = ["Thislineisincluded", "Thisalso"]
DataDigester(message.encode("utf8"))
self.assertEqual(self.lines, expected)
def test_predigest_atomic(self):
"""Test atomic messages (lines <= 4) in the predigest process"""
message = "All this message\nShould be included\nIn the predigest"
expected = ["Allthismessage", "Shouldbeincluded", "Inthepredigest"]
DataDigester(message.encode("utf8"))
self.assertEqual(self.lines, expected)
def test_predigest_pieced(self):
"""Test pieced messages (lines > 4) in the predigest process"""
message = ""
for i in range(100):
message += "Line%d test test test\n" % i
expected = []
for i in [20, 21, 22, 60, 61, 62]:
expected.append("Line%dtesttesttest" % i)
DataDigester(message.encode("utf8"))
self.assertEqual(self.lines, expected)
class DigestTests(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.lines = []
def mock_digest_paylods(c, message):
yield message.decode("utf8")
self.real_digest_payloads = DataDigester.digest_payloads
DataDigester.digest_payloads = mock_digest_paylods
def tearDown(self):
unittest.TestCase.tearDown(self)
DataDigester.digest_payloads = self.real_digest_payloads
def test_digest(self):
message = b"That's some good ham right there"
predigested = b"That'ssomegoodhamrightthere"
digest = hashlib.sha1()
digest.update(predigested)
expected = digest.hexdigest()
result = DataDigester(message).value
self.assertEqual(result, expected)
def test_digest_w_null(self):
message = b"That's some good ham rig\x00ht there"
predigested = b"That'ssomegoodhamrightthere"
digest = hashlib.sha1()
digest.update(predigested)
expected = digest.hexdigest()
result = DataDigester(message).value
self.assertEqual(result, expected)
class MessageDigestTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
patch("pyzor.digest.DataDigester.normalize_html_part",
return_value="normalized").start()
self.config = {
"get_content_maintype.return_value": "text",
"get_content_charset.return_value": "utf8",
"get_payload.return_value": Mock(),
"get_payload.return_value.decode.return_value": "decoded"
}
def tearDown(self):
unittest.TestCase.tearDown(self)
patch.stopall()
def check_msg(self):
mock_part = Mock(**self.config)
conf = {"walk.return_value": [mock_part]}
mock_msg = Mock(**conf)
return mock_part, mock_msg, list(DataDigester.digest_payloads(mock_msg))
def test_text(self):
mock_part, mock_msg, result = self.check_msg()
self.assertEqual(result, ["decoded"])
expected = [call.decode('utf8', 'ignore')]
payload = mock_part.get_payload.return_value
payload.assert_has_calls(expected, True)
def test_text_no_charset(self):
self.config["get_content_charset.return_value"] = None
mock_part, mock_msg, result = self.check_msg()
self.assertEqual(result, ["decoded"])
expected = [call.decode('ascii', 'ignore')]
payload = mock_part.get_payload.return_value
payload.assert_has_calls(expected)
def test_text_quopri(self):
self.config["get_content_charset.return_value"] = "quopri"
mock_part, mock_msg, result = self.check_msg()
self.assertEqual(result, ["decoded"])
expected = [call.decode('quopri', 'strict')]
payload = mock_part.get_payload.return_value
payload.assert_has_calls(expected)
def test_text_lookuperror(self):
def _decode(encoding, errors):
if encoding not in ("ascii",):
raise LookupError()
return "decoded"
self.config["get_payload.return_value.decode.side_effect"] = _decode
mock_part, mock_msg, result = self.check_msg()
self.assertEqual(result, ["decoded"])
expected = [call.decode('utf8', 'ignore'),
call.decode('ascii', 'ignore')]
payload = mock_part.get_payload.return_value
payload.assert_has_calls(expected)
def test_text_unicodeerror(self):
self.config["get_payload.return_value.decode.side_effect"] = UnicodeError
mock_part, mock_msg, result = self.check_msg()
self.assertEqual(result, [])
expected = [call.decode('utf8', 'ignore'),
call.decode('ascii', 'ignore')]
payload = mock_part.get_payload.return_value
payload.assert_has_calls(expected)
def test_html(self):
self.config["get_content_subtype.return_value"] = "html"
mock_part, mock_msg, result = self.check_msg()
self.assertEqual(result, ["normalized"])
def test_multipart(self):
self.config["get_content_maintype.return_value"] = "nottext"
self.config["is_multipart.return_value"] = True
mock_part, mock_msg, result = self.check_msg()
self.assertEqual(result, [])
def test_nontext(self):
self.config["get_content_maintype.return_value"] = "nottext"
self.config["is_multipart.return_value"] = False
mock_part, mock_msg, result = self.check_msg()
self.assertEqual(result, [mock_part.get_payload.return_value])
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(HTMLStripperTests))
test_suite.addTest(unittest.makeSuite(PreDigestTests))
test_suite.addTest(unittest.makeSuite(DigestTests))
test_suite.addTest(unittest.makeSuite(MessageDigestTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 11,210 | 35.399351 | 129 | py |
pyzor | pyzor-master/tests/unit/test_engines/test_redis.py | """Test the pyzor.engines.gdbm_ module."""
import time
import logging
import unittest
from datetime import datetime
try:
from unittest.mock import Mock, patch, call
except ImportError:
from mock import Mock, patch, call
import pyzor.engines.redis_
import pyzor.engines.common
class EncodingRedisTest(unittest.TestCase):
"""Test the RedisDBHandle class"""
r_count = 24
wl_count = 42
entered = datetime(2014, 4, 23, 15, 41, 30)
updated = datetime(2014, 4, 25, 17, 22, 25)
wl_entered = datetime(2014, 2, 12, 11, 10, 55)
wl_updated = datetime(2014, 3, 25, 5, 1, 50)
def setUp(self):
unittest.TestCase.setUp(self)
self.record = pyzor.engines.common.Record(self.r_count, self.wl_count,
self.entered, self.updated,
self.wl_entered,
self.wl_updated)
self.entered_st = int(time.mktime(self.entered.timetuple()))
self.updated_st = int(time.mktime(self.updated.timetuple()))
self.wl_entered_st = int(time.mktime(self.wl_entered.timetuple()))
self.wl_updated_st = int(time.mktime(self.wl_updated.timetuple()))
def compare_records(self, r1, r2):
attrs = ("r_count", "r_entered", "r_updated",
"wl_count", "wl_entered", "wl_updated")
self.assertTrue(all(getattr(r1, attr) == getattr(r2, attr)
for attr in attrs))
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_encode_record(self):
expected = {
"r_count": 24,
"r_entered": self.entered_st,
"r_updated": self.updated_st,
"wl_count": 42,
"wl_entered": self.wl_entered_st,
"wl_updated": self.wl_updated_st
}
result = pyzor.engines.redis_.RedisDBHandle._encode_record(self.record)
self.assertEqual(result, expected)
def test_encode_record_no_date(self):
expected = {
"r_count": 24,
"r_entered": self.entered_st,
"r_updated": 0,
"wl_count": 42,
"wl_entered": self.wl_entered_st,
"wl_updated": self.wl_updated_st
}
self.record.r_updated = None
result = pyzor.engines.redis_.RedisDBHandle._encode_record(self.record)
self.assertEqual(result, expected)
def test_encode_record_no_white(self):
expected = {
"r_count": 24,
"r_entered": self.entered_st,
"r_updated": self.updated_st,
"wl_count": 0,
"wl_entered": 0,
"wl_updated": 0
}
self.record.wl_count = 0
self.record.wl_entered = None
self.record.wl_updated = None
result = pyzor.engines.redis_.RedisDBHandle._encode_record(self.record)
self.assertEqual(result, expected)
def test_decode_record(self):
encoded = {
b"r_count": 24,
b"r_entered": self.entered_st,
b"r_updated": self.updated_st,
b"wl_count": 42,
b"wl_entered": self.wl_entered_st,
b"wl_updated": self.wl_updated_st
}
result = pyzor.engines.redis_.RedisDBHandle._decode_record(encoded)
self.compare_records(result, self.record)
def test_decode_record_no_date(self):
encoded = {
b"r_count": 24,
b"r_entered": self.entered_st,
b"r_updated": 0,
b"wl_count": 42,
b"wl_entered": self.wl_entered_st,
b"wl_updated": self.wl_updated_st
}
result = pyzor.engines.redis_.RedisDBHandle._decode_record(encoded)
self.record.r_updated = None
self.compare_records(result, self.record)
def test_decode_record_no_white(self):
encoded = {
b"r_count": 24,
b"r_entered": self.entered_st,
b"r_updated": self.updated_st,
b"wl_count": 0,
b"wl_entered": 0,
b"wl_updated": 0
}
result = pyzor.engines.redis_.RedisDBHandle._decode_record(encoded)
self.record.wl_count = 0
self.record.wl_entered = None
self.record.wl_updated = None
self.compare_records(result, self.record)
class RedisTest(unittest.TestCase):
max_age = 60 * 60
def setUp(self):
unittest.TestCase.setUp(self)
logger = logging.getLogger("pyzord")
logger.addHandler(logging.NullHandler())
self.mredis = patch("pyzor.engines.redis_.redis", create=True).start()
patch("pyzor.engines.redis_.RedisDBHandle._encode_record",
side_effect=lambda x: x).start()
patch("pyzor.engines.redis_.RedisDBHandle._decode_record",
side_effect=lambda x: x).start()
def tearDown(self):
unittest.TestCase.tearDown(self)
patch.stopall()
def test_init(self):
expected = {"host": "example.com",
"port": 6387,
"password": "passwd",
"db": 5,
}
db = pyzor.engines.redis_.RedisDBHandle("example.com,6387,passwd,5",
None)
self.mredis.StrictRedis.assert_called_with(**expected)
def test_init_defaults(self):
expected = {"host": "localhost",
"port": 6379,
"password": None,
"db": 0,
}
db = pyzor.engines.redis_.RedisDBHandle(",,,", None)
self.mredis.StrictRedis.assert_called_with(**expected)
def test_set(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
value = "record test"
db = pyzor.engines.redis_.RedisDBHandle(",,,", None)
db[digest] = value
expected = ("pyzord.digest_v1.%s" % digest, value)
self.mredis.StrictRedis.return_value.hmset.assert_called_with(*expected)
def test_set_max_age(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
value = "record test"
db = pyzor.engines.redis_.RedisDBHandle(",,,", None,
max_age=self.max_age)
db[digest] = value
expected1 = ("pyzord.digest_v1.%s" % digest, value)
expected2 = ("pyzord.digest_v1.%s" % digest, self.max_age)
self.mredis.StrictRedis.return_value.hmset.assert_called_with(*expected1)
self.mredis.StrictRedis.return_value.expire.assert_called_with(*expected2)
def test_get(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
db = pyzor.engines.redis_.RedisDBHandle(",,,", None)
result = db[digest]
expected = ("pyzord.digest_v1.%s" % digest,)
self.mredis.StrictRedis.return_value.hgetall.assert_called_with(*expected)
def test_items(self):
patch("pyzor.engines.redis_.redis.StrictRedis.return_value.keys",
return_value=["2aedaac999d71421c9ee49b9d81f627a7bc570aa"]).start()
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
db = pyzor.engines.redis_.RedisDBHandle(",,,", None)
db.items()[0]
expected = ("pyzord.digest_v1.%s" % digest,)
self.mredis.StrictRedis.return_value.keys.assert_called_with("pyzord.digest_v1.*")
self.mredis.StrictRedis.return_value.hgetall.assert_called_with(*expected)
def test_delete(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
db = pyzor.engines.redis_.RedisDBHandle(",,,", None)
del db[digest]
expected = ("pyzord.digest_v1.%s" % digest,)
self.mredis.StrictRedis.return_value.delete.assert_called_with(*expected)
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(EncodingRedisTest))
test_suite.addTest(unittest.makeSuite(RedisTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 8,064 | 34.528634 | 90 | py |
pyzor | pyzor-master/tests/unit/test_engines/test_mysql.py | """Test the pyzor.engines.mysql module."""
import unittest
import threading
from datetime import datetime, timedelta
import pyzor.engines
import pyzor.engines.mysql
import pyzor.engines.common
class MockTimer():
def __init__(self, *args, **kwargs):
pass
def start(self):
pass
def setDaemon(self, daemon):
pass
def make_MockMySQL(result, queries):
class MockCursor():
def __init__(self):
self.done = False
def fetchone(self):
if not self.done:
self.done = True
return result
else:
return None
def fetchall(self):
return [result]
def execute(self, query, args=None):
queries.append((query, args))
def close(self):
pass
class MockDB():
def cursor(self, *args, **kwargs):
return MockCursor()
def close(self):
pass
def commit(self):
pass
def autocommit(self, value):
pass
class MockMysql():
@staticmethod
def connect(*args, **kwargs):
return MockDB()
class Error(Exception):
pass
class cursors:
class SSCursor:
pass
return MockMysql
class MySQLTest(unittest.TestCase):
"""Test the GdbmDBHandle class"""
max_age = 60 * 60 * 24 * 30 * 4
r_count = 24
wl_count = 42
entered = datetime.now() - timedelta(days=10)
updated = datetime.now() - timedelta(days=2)
wl_entered = datetime.now() - timedelta(days=20)
wl_updated = datetime.now() - timedelta(days=3)
handler = pyzor.engines.mysql.MySQLDBHandle
def setUp(self):
unittest.TestCase.setUp(self)
self.real_timer = threading.Timer
threading.Timer = MockTimer
self.record = pyzor.engines.common.Record(self.r_count, self.wl_count,
self.entered, self.updated,
self.wl_entered, self.wl_updated)
self.response = self.record_unpack()
self.queries = []
mock_MySQL = make_MockMySQL(self.response, self.queries)
try:
self.real_mysql = pyzor.engines.mysql.MySQLdb
except AttributeError:
self.real_mysql = None
setattr(pyzor.engines.mysql, "MySQLdb", mock_MySQL)
def tearDown(self):
unittest.TestCase.tearDown(self)
threading.Timer = self.real_timer
pyzor.engines.mysql.MySQLdb = self.real_mysql
def record_unpack(self, record=None):
if not record:
record = self.record
return (record.r_count, record.wl_count,
record.r_entered, record.r_updated,
record.wl_entered, record.wl_updated)
def test_reconnect(self):
"""Test MySQLDBHandle.__init__"""
expected = "DELETE FROM testtable WHERE r_updated<%s"
self.handler("testhost,testuser,testpass,testdb,testtable", None,
max_age=self.max_age)
self.assertEqual(self.queries[0][0], expected)
def test_no_reorganize(self):
self.handler("testhost,testuser,testpass,testdb,testtable", None,
max_age=None)
self.assertFalse(self.queries)
def test_set_item(self):
"""Test MySQLDBHandle.__setitem__"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
expected = ("INSERT INTO testtable (digest, r_count, wl_count, "
"r_entered, r_updated, wl_entered, wl_updated) "
"VALUES (%s, %s, %s, %s, %s, %s, %s) ON "
"DUPLICATE KEY UPDATE r_count=%s, wl_count=%s, "
"r_entered=%s, r_updated=%s, wl_entered=%s, "
"wl_updated=%s",
(digest, self.r_count, self.wl_count, self.entered,
self.updated, self.wl_entered, self.wl_updated,
self.r_count, self.wl_count, self.entered,
self.updated, self.wl_entered, self.wl_updated))
handle = self.handler("testhost,testuser,testpass,testdb,testtable",
None, max_age=self.max_age)
handle[digest] = self.record
self.assertEqual(self.queries[1], expected)
def test_get_item(self):
"""Test MySQLDBHandle.__getitem__"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
expected = ("SELECT r_count, wl_count, r_entered, r_updated, "
"wl_entered, wl_updated FROM testtable WHERE digest=%s",
(digest,))
handle = self.handler("testhost,testuser,testpass,testdb,testtable",
None, max_age=self.max_age)
result = handle[digest]
self.assertEqual(self.queries[1], expected)
self.assertEqual(self.record_unpack(result), self.record_unpack())
def test_del_item(self):
"""Test MySQLDBHandle.__detitem__"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
expected = ("DELETE FROM testtable WHERE digest=%s", (digest,))
handle = self.handler("testhost,testuser,testpass,testdb,testtable",
None, max_age=self.max_age)
del handle[digest]
self.assertEqual(self.queries[1], expected)
def test_items(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
expected = ("SELECT digest, r_count, wl_count, r_entered, r_updated, "
"wl_entered, wl_updated FROM testtable", None)
self.response = (digest, self.response)
handle = self.handler("testhost,testuser,testpass,testdb,testtable",
None, max_age=self.max_age)
handle.items()
self.assertEqual(self.queries[1], expected)
def test_iter(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
expected = ("SELECT digest FROM testtable", None)
self.response = (digest,)
handle = self.handler("testhost,testuser,testpass,testdb,testtable",
None, max_age=self.max_age)
for d in handle:
pass
self.assertEqual(self.queries[1], expected)
class ThreadedMySQLTest(MySQLTest):
"""Test the GdbmDBHandle class"""
handler = pyzor.engines.mysql.ThreadedMySQLDBHandle
class ProcessesMySQLTest(MySQLTest):
"""Test the GdbmDBHandle class"""
handler = pyzor.engines.mysql.ProcessMySQLDBHandle
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(MySQLTest))
test_suite.addTest(unittest.makeSuite(ThreadedMySQLTest))
test_suite.addTest(unittest.makeSuite(ProcessesMySQLTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 6,907 | 33.368159 | 83 | py |
pyzor | pyzor-master/tests/unit/test_engines/test_gdbm.py | """Test the pyzor.engines.gdbm_ module."""
import sys
import time
import unittest
import threading
from datetime import datetime, timedelta
import pyzor.engines.gdbm_
import pyzor.engines.common
class MockTimer():
def __init__(self, *args, **kwargs):
pass
def start(self):
pass
def setDaemon(self, daemon):
pass
class MockGdbmDB(dict):
"""Mock a gdbm database"""
def firstkey(self):
if not self.keys():
return None
self.key_index = 1
return list(self.keys())[0]
def nextkey(self, key):
if len(self.keys()) <= self.key_index:
return None
else:
self.key_index += 1
return self.keys()[self.key_index]
def sync(self):
pass
def reorganize(self):
pass
class GdbmTest(unittest.TestCase):
"""Test the GdbmDBHandle class"""
handler = pyzor.engines.gdbm_.GdbmDBHandle
max_age = 60 * 60 * 24 * 30 * 4
r_count = 24
wl_count = 42
entered = datetime.now() - timedelta(days=10)
updated = datetime.now() - timedelta(days=2)
wl_entered = datetime.now() - timedelta(days=20)
wl_updated = datetime.now() - timedelta(days=3)
def setUp(self):
unittest.TestCase.setUp(self)
self.real_timer = threading.Timer
threading.Timer = MockTimer
self.db = MockGdbmDB()
class MockGdbm():
@staticmethod
def open(fn, mode):
return self.db
try:
self.real_gdbm = pyzor.engines.gdbm_.gdbm
except AttributeError:
self.real_gdbm = None
setattr(pyzor.engines.gdbm_, "gdbm", MockGdbm())
self.record = pyzor.engines.common.Record(self.r_count, self.wl_count,
self.entered, self.updated,
self.wl_entered, self.wl_updated)
def tearDown(self):
unittest.TestCase.tearDown(self)
threading.Timer = self.real_timer
pyzor.engines.gdbm_.gdbm = self.real_gdbm
def record_as_str(self, record=None):
if not record:
record = self.record
return ("1,%s,%s,%s,%s,%s,%s" % (record.r_count, record.r_entered,
record.r_updated, record.wl_count,
record.wl_entered, record.wl_updated)).encode("utf8")
def test_set_item(self):
"""Test GdbmDBHandle.__setitem__"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
handle = self.handler(None, None, max_age=self.max_age)
handle[digest] = self.record
self.assertEqual(self.db[digest], self.record_as_str().decode("utf8"))
def test_get_item(self):
"""Test GdbmDBHandle.__getitem__"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
handle = self.handler(None, None, max_age=self.max_age)
self.db[digest] = self.record_as_str()
result = handle[digest]
self.assertEqual(self.record_as_str(result), self.record_as_str())
def test_items(self):
"""Test GdbmDBHandle.items()"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
handle = self.handler(None, None, max_age=self.max_age)
self.db[digest] = self.record_as_str()
key, result = handle.items()[0]
self.assertEqual(key, digest)
self.assertEqual(self.record_as_str(result), self.record_as_str())
def test_del_item(self):
"""Test GdbmDBHandle.__delitem__"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
handle = self.handler(None, None, max_age=self.max_age)
self.db[digest] = self.record_as_str()
del handle[digest]
self.assertFalse(self.db.get(digest))
def test_reorganize_older(self):
"""Test GdbmDBHandle.start_reorganizing with older records"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.db[digest] = self.record_as_str()
handle = self.handler(None, None, max_age=3600 * 24)
self.assertFalse(self.db.get(digest))
def test_reorganize_older_no_max_age(self):
"""Test GdbmDBHandle.start_reorganizing with older records, but no
max_age set.
"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.db[digest] = self.record_as_str()
handle = self.handler(None, None, max_age=None)
self.assertEqual(self.db[digest], self.record_as_str())
def test_reorganize_fresh(self):
"""Test GdbmDBHandle.start_reorganizing with newer records"""
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
self.db[digest] = self.record_as_str()
handle = self.handler(None, None, max_age=3600 * 24 * 3)
self.assertEqual(self.db[digest], self.record_as_str())
class ThreadingGdbmTest(GdbmTest):
"""Test the GdbmDBHandle class"""
handler = pyzor.engines.gdbm_.ThreadedGdbmDBHandle
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(GdbmTest))
test_suite.addTest(unittest.makeSuite(ThreadingGdbmTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 5,304 | 29.843023 | 94 | py |
pyzor | pyzor-master/tests/unit/test_engines/test_redis_v0.py | """Test the pyzor.engines.gdbm_ module."""
import unittest
from datetime import datetime
import pyzor.engines.redis_v0
import pyzor.engines.common
class EncodingRedisTest(unittest.TestCase):
"""Test the RedisDBHandle class"""
r_count = 24
wl_count = 42
entered = datetime(2014, 4, 23, 15, 41, 30)
updated = datetime(2014, 4, 25, 17, 22, 25)
wl_entered = datetime(2014, 2, 12, 11, 10, 55)
wl_updated = datetime(2014, 3, 25, 5, 1, 50)
def setUp(self):
unittest.TestCase.setUp(self)
self.record = pyzor.engines.common.Record(self.r_count, self.wl_count,
self.entered, self.updated,
self.wl_entered, self.wl_updated)
def compare_records(self, r1, r2):
attrs = ("r_count", "r_entered", "r_updated",
"wl_count", "wl_entered", "wl_updated")
self.assertTrue(all(getattr(r1, attr) == getattr(r2, attr)
for attr in attrs))
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_encode_record(self):
expected = ("24,2014-04-23 15:41:30,2014-04-25 17:22:25,"
"42,2014-02-12 11:10:55,2014-03-25 05:01:50").encode()
result = pyzor.engines.redis_v0.RedisDBHandle._encode_record(self.record)
self.assertEqual(result, expected)
def test_encode_record_no_date(self):
expected = ("24,2014-04-23 15:41:30,,"
"42,2014-02-12 11:10:55,2014-03-25 05:01:50").encode()
self.record.r_updated = None
result = pyzor.engines.redis_v0.RedisDBHandle._encode_record(self.record)
self.assertEqual(result, expected)
def test_encode_record_no_white(self):
expected = ("24,2014-04-23 15:41:30,2014-04-25 17:22:25,"
"0,,").encode()
self.record.wl_count = 0
self.record.wl_entered = None
self.record.wl_updated = None
result = pyzor.engines.redis_v0.RedisDBHandle._encode_record(self.record)
self.assertEqual(result, expected)
def test_decode_record(self):
encoded = ("24,2014-04-23 15:41:30,2014-04-25 17:22:25,"
"42,2014-02-12 11:10:55,2014-03-25 05:01:50").encode()
result = pyzor.engines.redis_v0.RedisDBHandle._decode_record(encoded)
self.compare_records(result, self.record)
def test_decode_record_no_date(self):
encoded = ("24,2014-04-23 15:41:30,,"
"42,2014-02-12 11:10:55,2014-03-25 05:01:50").encode()
result = pyzor.engines.redis_v0.RedisDBHandle._decode_record(encoded)
self.record.r_updated = None
self.compare_records(result, self.record)
def test_decode_record_no_white(self):
encoded = ("24,2014-04-23 15:41:30,2014-04-25 17:22:25,"
"0,,").encode()
result = pyzor.engines.redis_v0.RedisDBHandle._decode_record(encoded)
self.record.wl_count = 0
self.record.wl_entered = None
self.record.wl_updated = None
self.compare_records(result, self.record)
def make_MockRedis(commands):
class MockStrictRedis():
def __init__(self, *args, **kwargs):
commands.append(("init", args, kwargs))
def set(self, *args, **kwargs):
commands.append(("set", args, kwargs))
def setex(self, *args, **kwargs):
commands.append(("setex", args, kwargs))
def get(self, *args, **kwargs):
commands.append(("get", args, kwargs))
def delete(self, *args, **kwargs):
commands.append(("delete", args, kwargs))
def keys(self, *args, **kwargs):
commands.append(("keys", args, kwargs))
yield "pyzord.digest.2aedaac999d71421c9ee49b9d81f627a7bc570aa"
class MockError(Exception):
pass
class exceptions():
def __init__(self):
self.RedisError = MockError
class MockRedis():
def __init__(self):
self.StrictRedis = MockStrictRedis
self.exceptions = exceptions()
return MockRedis()
mock_encode_record = lambda s, x: x
mock_decode_record = lambda s, x: x
class RedisTest(unittest.TestCase):
max_age = 60 * 60
def setUp(self):
unittest.TestCase.setUp(self)
self.commands = []
try:
self.real_redis = pyzor.engines.redis_v0.redis
except AttributeError:
self.real_redis = None
self.real_encode = pyzor.engines.redis_v0.RedisDBHandle._encode_record
self.real_decode = pyzor.engines.redis_v0.RedisDBHandle._decode_record
setattr(pyzor.engines.redis_v0, "redis", make_MockRedis(self.commands))
pyzor.engines.redis_v0.RedisDBHandle._encode_record = mock_encode_record
pyzor.engines.redis_v0.RedisDBHandle._decode_record = mock_decode_record
def tearDown(self):
unittest.TestCase.tearDown(self)
pyzor.engines.redis_v0.redis = self.real_redis
pyzor.engines.redis_v0.RedisDBHandle._encode_record = self.real_encode
pyzor.engines.redis_v0.RedisDBHandle._decode_record = self.real_decode
def test_init(self):
expected = {"host": "example.com",
"port": 6387,
"password": "passwd",
"db": 5,
}
db = pyzor.engines.redis_v0.RedisDBHandle("example.com,6387,passwd,5",
None)
self.assertEqual(self.commands[0], ("init", (), expected))
def test_init_defaults(self):
expected = {"host": "localhost",
"port": 6379,
"password": None,
"db": 0,
}
db = pyzor.engines.redis_v0.RedisDBHandle(",,,", None)
self.assertEqual(self.commands[0], ("init", (), expected))
def test_set(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
value = "record test"
db = pyzor.engines.redis_v0.RedisDBHandle(",,,", None)
db[digest] = value
expected = ("pyzord.digest.%s" % digest, value)
self.assertEqual(self.commands[1], ("set", expected, {}))
def test_set_max_age(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
value = "record test"
db = pyzor.engines.redis_v0.RedisDBHandle(",,,", None,
max_age=self.max_age)
db[digest] = value
expected = ("pyzord.digest.%s" % digest, self.max_age, value)
self.assertEqual(self.commands[1], ("setex", expected, {}))
def test_get(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
db = pyzor.engines.redis_v0.RedisDBHandle(",,,", None)
result = db[digest]
expected = ("pyzord.digest.%s" % digest,)
self.assertEqual(self.commands[1], ("get", expected, {}))
def test_items(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
db = pyzor.engines.redis_v0.RedisDBHandle(",,,", None)
db.items()[0]
expected = ("pyzord.digest.%s" % digest,)
self.assertEqual(self.commands[1], ("keys", ("pyzord.digest.*",), {}))
self.assertEqual(self.commands[2], ("get", expected, {}))
def test_delete(self):
digest = "2aedaac999d71421c9ee49b9d81f627a7bc570aa"
db = pyzor.engines.redis_v0.RedisDBHandle(",,,", None)
del db[digest]
expected = ("pyzord.digest.%s" % digest,)
self.assertEqual(self.commands[1], ("delete", expected, {}))
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(EncodingRedisTest))
test_suite.addTest(unittest.makeSuite(RedisTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 7,898 | 36.084507 | 83 | py |
pyzor | pyzor-master/tests/unit/test_engines/__init__.py | """A suite of unit tests that verifies the correct behaviour of various
functions/methods in the pyzord code.
Note these tests the source of pyzor, not the version currently installed.
"""
import unittest
def suite():
"""Gather all the tests from this package in a test suite."""
import test_gdbm
import test_mysql
import test_redis
import test_redis_v0
test_suite = unittest.TestSuite()
test_suite.addTest(test_gdbm.suite())
test_suite.addTest(test_mysql.suite())
test_suite.addTest(test_redis.suite())
test_suite.addTest(test_redis_v0.suite())
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 682 | 23.392857 | 74 | py |
pyzor | pyzor-master/tests/util/__init__.py | """This package contains various utilities use in the pyzor tests."""
import os
import sys
import time
import redis
import shutil
import unittest
import subprocess
from datetime import datetime
try:
from unittest.mock import mock_open as _mock_open
except ImportError:
from mock import mock_open as _mock_open
import pyzor.client
def mock_open(mock=None, read_data=""):
mock = _mock_open(mock, read_data)
mock.return_value.__iter__ = lambda x: iter(read_data.splitlines())
return mock
msg = """Newsgroups:
Date: Wed, 10 Apr 2002 22:23:51 -0400 (EDT)
From: Frank Tobin <ftobin@neverending.org>
Fcc: sent-mail
Message-ID: <20020410222350.E16178@palanthas.neverending.org>
X-Our-Headers: X-Bogus,Anon-To
X-Bogus: aaron7@neverending.org
MIME-Version: 1.0
Content-Type: TEXT/PLAIN; charset=US-ASCII
Test Email
"""
digest = "7421216f915a87e02da034cc483f5c876e1a1338"
_dt_decode = lambda x: None if x == 'None' else datetime.strptime(x, "%a %b %d %H:%M:%S %Y")
class PyzorTestBase(unittest.TestCase):
"""Test base that starts the pyzord daemon in setUpClass with specified
arguments. The daemon is killed in tearDownClass. This also create the
necessary files and the homedir.
"""
pyzord = None
_args = {"homedir": "--homedir",
"engine": "-e",
"dsn": "--dsn",
"address": "-a",
"port": "-p",
"threads": "--threads",
"max_threads": "--max-threads",
"processes": "--processes",
"max_processes": "--max-processes",
"db_connections": "--db-connections",
"password_file": "--password-file",
"access_file": "--access-file",
"cleanup_age": "--cleanup-age",
"log_file": "--log-file",
"detach": "--detach",
"prefork": "--pre-fork",
}
homedir = "./pyzor-test/"
threads = "False"
access_file = "pyzord.access"
password_file = "pyzord.passwd"
log_file = "pyzord-test.log"
dsn = "localhost,,,10"
engine = "redis"
access = """check report ping pong info whitelist : alice : deny
check report ping pong info whitelist : bob : allow
ALL : dan : allow
pong info whitelist : dan : deny
"""
passwd = """alice : fc7f1cad729b5f3862b2ef192e2d9e0d0d4bd515
bob : cf88277c5d4abdc0a3f56f416011966d04a3f462
dan : c1a50281fc43e860fe78c16c73b9618ada59f959
"""
servers = """127.0.0.1:9999
"""
accounts_alice = """127.0.0.1 : 9999 : alice : d28f86151e80a9accba4a4eba81c460532384cd6,fc7f1cad729b5f3862b2ef192e2d9e0d0d4bd515
"""
accounts_bob = """127.0.0.1 : 9999 : bob : de6ef568787256bf5f55909dc0c398e49b5c9808,cf88277c5d4abdc0a3f56f416011966d04a3f462
"""
accounts_chuck = """127.0.0.1 : 9999 : bob : de6ef568787256bf5f55909dc0c398e49b5c9808,af88277c5d4abdc0a3f56f416011966d04a3f462
"""
accounts_dan = """127.0.0.1 : 9999 : dan : 1cc2efa77d8833d83556e0cc4fa617c64eebc7fb,c1a50281fc43e860fe78c16c73b9618ada59f959
"""
@classmethod
def write_homedir_file(cls, name, content):
if not name or not content:
return
with open(os.path.join(cls.homedir, name), "w") as f:
f.write(content)
@classmethod
def setUpClass(cls):
super(PyzorTestBase, cls).setUpClass()
try:
os.mkdir(cls.homedir)
except OSError:
pass
cls.write_homedir_file(cls.access_file, cls.access)
cls.write_homedir_file(cls.password_file, cls.passwd)
cls.write_homedir_file(cls.password_file, cls.passwd)
cls.write_homedir_file("servers", cls.servers)
cls.write_homedir_file("alice", cls.accounts_alice)
cls.write_homedir_file("bob", cls.accounts_bob)
cls.write_homedir_file("chuck", cls.accounts_chuck)
cls.write_homedir_file("dan", cls.accounts_dan)
args = ["pyzord"]
for key, value in cls._args.items():
option = getattr(cls, key, None)
if option:
args.append(value)
args.append(option)
cls.pyzord = []
for line in cls.servers.splitlines():
line = line.strip()
if not line:
continue
addr, port = line.rsplit(":", 1)
cls.pyzord.append(subprocess.Popen(args + ["-a", addr, "-p", port]))
time.sleep(1) # allow time to initialize server
def setUp(self):
unittest.TestCase.setUp(self)
self.client_args = {"--homedir": self.homedir,
"--servers-file": "servers",
"-t": None, # timeout
"-r": None, # report threshold
"-w": None, # whitelist threshold
"-s": None, # style
}
def tearDown(self):
unittest.TestCase.tearDown(self)
@classmethod
def tearDownClass(cls):
super(PyzorTestBase, cls).tearDownClass()
for pyzord in cls.pyzord:
pyzord.terminate()
pyzord.wait()
shutil.rmtree(cls.homedir, True)
redis.StrictRedis(db=10).flushdb()
def check_pyzor(self, cmd, user, input=None,
code=None, exit_code=None, counts=()):
"""Call the pyzor client with the specified args from self.client_args
and verifies the response.
"""
args = ["pyzor"]
if user:
args.append("--accounts-file")
args.append(user)
for key, value in self.client_args.items():
if value:
args.append(key)
args.append(value)
args.append(cmd)
pyzor = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if input:
stdout, stderr = pyzor.communicate(input.encode("utf8"))
else:
stdout, stderr = pyzor.communicate()
if stderr:
self.fail(stderr)
if code is not None:
try:
stdout = stdout.decode("utf8")
results = stdout.strip().split("\t")
status = eval(results[1])
except Exception as e:
self.fail("Parsing error: %s of %r" % (e, stdout))
self.assertEqual(status[0], code, status)
if counts:
self.assertEqual(counts, (int(results[2]), int(results[3])))
if exit_code is not None:
self.assertEqual(exit_code, pyzor.returncode)
return stdout
def check_pyzor_multiple(self, cmd, user, input=None,
code=None, exit_code=None, counts=()):
"""Call the pyzor client with the specified args from self.client_args
and verifies the response.
"""
args = ["pyzor"]
if user:
args.append("--accounts-file")
args.append(user)
for key, value in self.client_args.items():
if value:
args.append(key)
args.append(value)
args.append(cmd)
pyzor = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if input:
stdout, stderr = pyzor.communicate(input.encode("utf8"))
else:
stdout, stderr = pyzor.communicate()
if stderr:
self.fail(stderr)
stdout = stdout.decode("utf8")
for i, line in enumerate(stdout.splitlines()):
try:
line = line.strip()
if not line:
continue
results = line.strip().split("\t")
except Exception as e:
self.fail("Parsing error: %s of %r" % (e, stdout))
if code is not None:
try:
status = eval(results[1])
except Exception as e:
self.fail("Parsing error: %s of %r" % (e, stdout))
self.assertEqual(status[0], code[i], status)
if counts:
self.assertEqual((int(results[2]), int(results[3])),
counts[i])
if exit_code is not None:
self.assertEqual(exit_code, pyzor.returncode)
return stdout
def check_digest(self, digest, address, counts=(0, 0)):
result = self.client.check(digest, address)
self.assertEqual((int(result["Count"]), int(result["WL-Count"])),
counts)
return result
def get_record(self, input, user="bob"):
"""Uses `pyzor info` to get the record data."""
stdout = self.check_pyzor("info", user, input, code=200, exit_code=0)
info = stdout.splitlines()[1:]
record = {}
try:
for line in info:
line = line.strip()
if not line:
continue
key, value = line.split(":", 1)
record[key.strip()] = value.strip()
except Exception as e:
self.fail("Error parsing %r: %s" % (info, e))
return record
def check_fuzzy_date(self, date1, date2=None, seconds=10):
"""Check if the given date is almost equal to now."""
date1 = _dt_decode(date1)
if not date2:
date2 = datetime.now()
delta = abs((date2 - date1).total_seconds())
if delta > seconds:
self.fail("Delta %s is too big: %s, %s" % (delta , date1, date2))
class PyzorTest(object):
"""MixIn class for PyzorTestBase that performs a series of basic tests."""
def test_ping(self):
self.check_pyzor("ping", "bob")
def test_pong(self):
input = "Test1 pong1 Test2"
self.check_pyzor("pong", "bob", input=input, code=200, exit_code=0,
counts=(sys.maxsize, 0))
def test_check(self):
input = "Test1 check1 Test2"
self.check_pyzor("check", "bob", input=input, code=200, exit_code=1,
counts=(0, 0))
r = self.get_record(input)
self.assertEqual(r["Count"], "0")
def test_report(self):
input = "Test1 report1 Test2"
self.check_pyzor("report", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("check", "bob", input=input, code=200, exit_code=0,
counts=(1, 0))
r = self.get_record(input)
self.assertEqual(r["Count"], "1")
self.check_fuzzy_date(r["Entered"])
def test_report_update(self):
input = "Test1 report update1 Test2"
self.check_pyzor("report", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("check", "bob", input=input, code=200, exit_code=0,
counts=(1, 0))
time.sleep(1)
self.check_pyzor("report", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("check", "bob", input=input, code=200, exit_code=0,
counts=(2, 0))
r = self.get_record(input)
self.assertEqual(r["Count"], "2")
self.assertNotEqual(r["Entered"], r["Updated"])
self.check_fuzzy_date(r["Updated"])
def test_whitelist(self):
input = "Test1 white list1 Test2"
self.check_pyzor("whitelist", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("check", "bob", input=input, code=200, exit_code=1,
counts=(0, 1))
r = self.get_record(input)
self.assertEqual(r["WL-Count"], "1")
self.check_fuzzy_date(r["WL-Entered"])
def test_whitelist_update(self):
input = "Test1 white list update1 Test2"
self.check_pyzor("whitelist", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("check", "bob", input=input, code=200, exit_code=1,
counts=(0, 1))
time.sleep(1)
self.check_pyzor("whitelist", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("check", "bob", input=input, code=200, exit_code=1,
counts=(0, 2))
r = self.get_record(input)
self.assertEqual(r["WL-Count"], "2")
self.assertNotEqual(r["WL-Entered"], r["WL-Updated"])
self.check_fuzzy_date(r["WL-Updated"])
def test_report_whitelist(self):
input = "Test1 white list report1 Test2"
self.check_pyzor("whitelist", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("report", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("check", "bob", input=input, code=200, exit_code=1,
counts=(1, 1))
r = self.get_record(input)
self.assertEqual(r["Count"], "1")
self.check_fuzzy_date(r["Entered"])
self.assertEqual(r["WL-Count"], "1")
self.check_fuzzy_date(r["WL-Entered"])
def test_report_whitelist_update(self):
input = "Test1 white list report update1 Test2"
self.check_pyzor("whitelist", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("report", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("check", "bob", input=input, code=200, exit_code=1,
counts=(1, 1))
time.sleep(1)
self.check_pyzor("whitelist", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("report", "bob", input=input, code=200, exit_code=0)
self.check_pyzor("check", "bob", input=input, code=200, exit_code=1,
counts=(2, 2))
r = self.get_record(input)
self.assertEqual(r["Count"], "2")
self.assertNotEqual(r["Entered"], r["Updated"])
self.check_fuzzy_date(r["Updated"])
self.assertEqual(r["WL-Count"], "2")
self.assertNotEqual(r["WL-Entered"], r["WL-Updated"])
self.check_fuzzy_date(r["WL-Updated"])
| 14,111 | 36.73262 | 132 | py |
pyzor | pyzor-master/tests/benchmark/__init__.py | 0 | 0 | 0 | py | |
pyzor | pyzor-master/tests/benchmark/measure_server_response.py | from __future__ import division
import json
import Queue
import timeit
import optparse
import threading
import collections
DIGEST = "da39a3ee5e6b4b0d3255bfef95601890afd80709"
SETUP = """
import pyzor
import string
import random
import hashlib
import pyzor.client
digest = "".join(random.choice(string.letters) for _ in range(50))
digest = hashlib.sha1(digest).hexdigest()
client = pyzor.client.Client(timeout=%f)
"""
CMD = """
try:
client.%s(digest, address=(%r, %s))
except pyzor.TimeoutError:
pass
"""
ALL_METHODS = ("pong", "check", "report", "info", "whitelist")
def measure_method(method, repeats, timeout, server, queue):
setup = SETUP % timeout
cmd = CMD % ((method,) + server)
results = timeit.repeat(stmt=cmd, setup=setup, repeat=repeats, number=1)
timeouts = sum(1 for result in results if result >= timeout)
queue.put((method, results, timeouts))
def measure_methods(methods, repeats, timeout, server, queue):
if methods == "all":
methods = ALL_METHODS
else:
methods = methods.split(",")
threads = []
for method in methods:
thread = threading.Thread(target=measure_method,
args=(method, repeats, timeout, server,
queue))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
def json_handler(res):
fres = {}
for method, result in res.items():
fres[method] = {"runs": [],
"timeouts": result["timeouts"],
"totals": {}}
total = 0
best_all = []
for i, results in enumerate(result["results"]):
results.sort()
average = sum(results) / len(results)
best = results[:3]
total += average
best_all.extend(best)
fres[method]["runs"].append({"average": average,
"best": best})
fres[method]["totals"]["average"] = total / len(result["results"])
fres[method]["totals"]["best"] = best_all[:3]
print json.dumps(fres, indent=4)
def print_handler(res):
for method, result in res.items():
print "=" * 80
print "Method: %s" % method
print "Timeouts: %s" % result["timeouts"]
print "=" * 80
total = 0
best_all = []
for i, results in enumerate(result["results"]):
results.sort()
average = sum(results) / len(results)
best = results[:3]
total += average
best_all.extend(best)
print "\t(%s) %s %s" % (i, average, best)
print "=" * 80
print "Total: %s %s" % (total / len(result["results"]), best_all[:3])
print "\n"
def main():
opt = optparse.OptionParser()
opt.add_option("-n", "--nice", dest="nice", type="int",
help="'nice' level", default=0)
opt.add_option("-s", "--server", dest="server", default="127.0.0.1:24441")
opt.add_option("-m", "--method", dest="method", default="all")
opt.add_option("-f", "--format", dest="format", default="print")
opt.add_option("-t", "--threads", dest="threads", type="int", default=1)
opt.add_option("--timeout", dest="timeout", type="float", default=5.0)
opt.add_option("-r", "--repeats", dest="repeats", type="int", default=1000)
options, args = opt.parse_args()
server = tuple(options.server.rsplit(":", 1))
queue = Queue.Queue()
threads = []
for dummy in range(options.threads):
thread = threading.Thread(target=measure_methods,
args=(options.method, options.repeats,
options.timeout, server, queue))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
def get_new_info():
return {"results": [],
"timeouts": 0}
res = collections.defaultdict(get_new_info)
while True:
try:
method, results, timeouts = queue.get_nowait()
res[method]["results"].append(results)
res[method]["timeouts"] += timeouts
except Queue.Empty:
break
globals()["%s_handler" % options.format](res)
if __name__ == '__main__':
main()
| 4,339 | 27.552632 | 79 | py |
pyzor | pyzor-master/tests/functional/test_forwarder.py | import os
import time
import shutil
import unittest
import subprocess
import redis
class ForwardSetup(object):
"""Setup forwarding client and 'remote' pyzord"""
def write_homedir_file(self, name, content):
if not name or not content:
return
with open(os.path.join(self.homedir, name), "w") as f:
f.write(content)
def __init__(self, homedir):
self.homedir = homedir
try:
os.mkdir(homedir)
except OSError:
pass
@unittest.skip("This fails randomly on PyPy.")
class ForwarderTest(unittest.TestCase):
def setUp(self):
self.localserver = ForwardSetup('./pyzor-test-forwardserver') # we also use this dir for the local client
self.localserver.write_homedir_file('servers', '127.0.0.1:9999\n')
self.fwdclient = ForwardSetup('./pyzor-test-forwardingclient')
self.fwdclient.write_homedir_file('servers', '127.0.0.1:9998\n')
args = ["pyzord", "--homedir", self.localserver.homedir, '-e', 'redis', '--dsn', 'localhost,,,10', '-a', '127.0.0.1', '-p', '9999', '--forward-client-homedir', self.fwdclient.homedir]
self.local_pyzord_proc = subprocess.Popen(args)
self.remoteserver = ForwardSetup('./pyzor-test-remoteserver')
args = ["pyzord", "--homedir", self.remoteserver.homedir, '-e', 'redis', '--dsn', 'localhost,,,9', '-a', '127.0.0.1', '-p', '9998']
self.remote_pyzord_proc = subprocess.Popen(args)
time.sleep(0.3)
def test_forward_report(self):
# submit hash to local server
for i in range(10):
self.check_pyzor("report", self.localserver.homedir)
# make sure the local submission worked
self.check_pyzor("check", self.localserver.homedir, counts=(10, 0))
# now use the forwarding client's config to check forwarded submission
time.sleep(1)
self.check_pyzor("check", self.fwdclient.homedir, counts=(10, 0))
# submit the hash to the remote system, the count should go up
self.check_pyzor("report", self.fwdclient.homedir)
self.check_pyzor("check", self.fwdclient.homedir, counts=(11, 0))
# switch back to our local server, the count should still be the old value
self.check_pyzor("check", self.localserver.homedir, counts=(10, 0))
def tearDown(self):
if self.remote_pyzord_proc is not None:
self.remote_pyzord_proc.kill()
if self.local_pyzord_proc is not None:
self.local_pyzord_proc.kill()
shutil.rmtree(self.localserver.homedir, True)
shutil.rmtree(self.fwdclient.homedir, True)
shutil.rmtree(self.remoteserver.homedir, True)
redis.StrictRedis(db=9).flushdb()
redis.StrictRedis(db=10).flushdb()
def check_pyzor(self, cmd, homedir, counts=None, msg=None):
"""simplified check_pyzor version from PyzorTestBase"""
msg = "This is a test message for the forwading feature"
args = ["pyzor", '--homedir', homedir, cmd]
pyzor = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = pyzor.communicate(msg.encode("utf8"))
if stderr:
self.fail(stderr)
try:
stdout = stdout.decode("utf8")
results = stdout.strip().split("\t")
status = eval(results[1])
except Exception as e:
self.fail("Parsing error: %s of %r" % (e, stdout))
self.assertEqual(status[0], 200, status)
if counts:
self.assertEqual(counts, (int(results[2]), int(results[3])))
return stdout
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(ForwarderTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 4,002 | 35.390909 | 191 | py |
pyzor | pyzor-master/tests/functional/test_account.py | import unittest
from tests.util import *
class AccountPyzorTest(PyzorTestBase):
# test bob which has access to everything
def test_ping(self):
self.check_pyzor("ping", "bob", code=200, exit_code=0)
def test_pong(self):
self.check_pyzor("pong", "bob", input=msg, code=200, exit_code=0)
def test_check(self):
self.check_pyzor("check", "bob", input=msg, code=200)
def test_report(self):
self.check_pyzor("report", "bob", input=msg, code=200, exit_code=0)
def test_whitelist(self):
self.check_pyzor("whitelist", "bob", input=msg, code=200, exit_code=0)
def test_info(self):
self.check_pyzor("info", "bob", input=msg, code=200, exit_code=0)
# test alice which does not has access to anything
# Error should be 403 Forbidden
def test_ping_forbidden(self):
self.check_pyzor("ping", "alice", code=403, exit_code=1)
def test_pong_forbidden(self):
self.check_pyzor("pong", "alice", input=msg, code=403, exit_code=1)
def test_check_forbidden(self):
self.check_pyzor("check", "alice", input=msg, code=403, exit_code=1)
def test_report_forbidden(self):
self.check_pyzor("report", "alice", input=msg, code=403, exit_code=1)
def test_whitelist_forbidden(self):
self.check_pyzor("whitelist", "alice", input=msg, code=403, exit_code=1)
def test_info_forbidden(self):
self.check_pyzor("info", "alice", input=msg, code=403, exit_code=1)
# test chuck which does tries to steal bob's account but has the wrong key
# Error should be 401 Unauthorized
def test_ping_unauthorized(self):
self.check_pyzor("ping", "chuck", code=401, exit_code=1)
def test_pong_unauthorized(self):
self.check_pyzor("pong", "chuck", input=msg, code=401, exit_code=1)
def test_check_unauthorized(self):
self.check_pyzor("check", "chuck", input=msg, code=401, exit_code=1)
def test_report_unauthorized(self):
self.check_pyzor("report", "chuck", input=msg, code=401, exit_code=1)
def test_whitelist_unauthorized(self):
self.check_pyzor("whitelist", "chuck", input=msg, code=401, exit_code=1)
def test_info_unauthorized(self):
self.check_pyzor("info", "chuck", input=msg, code=401, exit_code=1)
# test dan account, which has some access
def test_ping_combo(self):
self.check_pyzor("ping", "dan", code=200, exit_code=0)
def test_pong_combo(self):
self.check_pyzor("pong", "dan", input=msg, code=403, exit_code=1)
def test_check_combo(self):
self.check_pyzor("check", "dan", input=msg, code=200)
def test_report_combo(self):
self.check_pyzor("report", "dan", input=msg, code=200, exit_code=0)
def test_whitelist_combo(self):
self.check_pyzor("whitelist", "dan", input=msg, code=403, exit_code=1)
def test_info_combo(self):
self.check_pyzor("info", "dan", input=msg, code=403, exit_code=1)
# test anonymous account, which should is not currently set up in the server
def test_ping_anonymous(self):
self.check_pyzor("ping", None, code=403, exit_code=1)
def test_pong_anonymous(self):
self.check_pyzor("pong", None, input=msg, code=403, exit_code=1)
def test_check_anonymous(self):
self.check_pyzor("check", None, input=msg, code=403, exit_code=1)
def test_report_anonymous(self):
self.check_pyzor("report", None, input=msg, code=403, exit_code=1)
def test_whitelist_anonymous(self):
self.check_pyzor("whitelist", None, input=msg, code=403, exit_code=1)
def test_info_anonymous(self):
self.check_pyzor("info", None, input=msg, code=403, exit_code=1)
class AnonymousPyzorTest(PyzorTestBase):
"""Test accounts with no access or password file set-up. And test
anonymous default access.
"""
access_file = None
password_file = None
def test_ping(self):
self.check_pyzor("ping", None, code=200, exit_code=0)
def test_pong(self):
self.check_pyzor("pong", None, input=msg, code=200, exit_code=0)
def test_check(self):
self.check_pyzor("check", None, input=msg, code=200)
def test_report(self):
self.check_pyzor("report", None, input=msg, code=200, exit_code=0)
def test_whitelist(self):
# anonymous account are not allowed to whitelist by default
self.check_pyzor("whitelist", None, input=msg, code=403, exit_code=1)
def test_info(self):
self.check_pyzor("info", None, input=msg, code=200, exit_code=0)
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(AccountPyzorTest))
test_suite.addTest(unittest.makeSuite(AnonymousPyzorTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 4,999 | 34.971223 | 84 | py |
pyzor | pyzor-master/tests/functional/test_pyzor.py | import io
import sys
import redis
import unittest
from tests.util import *
MBOX_FILE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_data", "test.mbx")
class PyzorScriptTest(PyzorTestBase):
password_file = None
access = """ALL : anonymous : allow
"""
def test_report_threshold(self):
input = "Test1 report threshold 1 Test2"
self.client_args["-r"] = "2"
self.check_pyzor("report", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(1, 0))
self.check_pyzor("report", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(2, 0))
# Exit code will be success now, since the report count exceeds the
# threshold
self.check_pyzor("report", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=0,
counts=(3, 0))
def test_whitelist_threshold(self):
input = "Test1 white list threshold 1 Test2"
self.client_args["-w"] = "2"
self.check_pyzor("report", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=0,
counts=(1, 0))
self.check_pyzor("whitelist", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=0,
counts=(1, 1))
self.check_pyzor("whitelist", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=0,
counts=(1, 2))
# Exit code will be failure now, since the whitelist count exceeds the
# threshold
self.check_pyzor("whitelist", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(1, 3))
def test_report_whitelist_threshold(self):
input = "Test1 report white list threshold 1 Test2"
self.client_args["-w"] = "2"
self.client_args["-r"] = "1"
self.check_pyzor("report", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(1, 0))
# Exit code will be success now, since the report count exceeds the
# thresholdRedisPyzorTest
self.check_pyzor("report", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=0,
counts=(2, 0))
self.check_pyzor("whitelist", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=0,
counts=(2, 1))
self.check_pyzor("whitelist", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=0,
counts=(2, 2))
# Exit code will be failure now, since the whitelist count exceeds the
# threshold
self.check_pyzor("whitelist", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(2, 3))
def test_digest_style(self):
input = "da39a3ee5e6b4b0d3255bfef95601890afd80700"
self.client_args["-s"] = "digests"
self.check_pyzor("pong", None, input=input, code=200, exit_code=0,
counts=(sys.maxsize, 0))
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(0, 0))
self.check_pyzor("report", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=0,
counts=(1, 0))
self.check_pyzor("whitelist", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(1, 1))
r = self.get_record(input, None)
self.assertEqual(r["Count"], "1")
self.assertEqual(r["WL-Count"], "1")
def test_digest_style_multiple(self):
input2 = "da39a3ee5e6b4b0d3255bfef95601890afd80705\n"\
"da39a3ee5e6b4b0d3255bfef95601890afd80706\n"
input3 = "da39a3ee5e6b4b0d3255bfef95601890afd80705\n"\
"da39a3ee5e6b4b0d3255bfef95601890afd80706\n"\
"da39a3ee5e6b4b0d3255bfef95601890afd80707\n"
self.client_args["-s"] = "digests"
self.check_pyzor_multiple("pong", None, input=input3, exit_code=0,
code=[200, 200, 200],
counts=[(sys.maxsize, 0),
(sys.maxsize, 0),
(sys.maxsize, 0)])
self.check_pyzor_multiple("check", None, input=input3, exit_code=1,
code=[200, 200, 200],
counts=[(0, 0), (0, 0), (0, 0)])
self.check_pyzor_multiple("report", None, input=input2, exit_code=0)
self.check_pyzor_multiple("check", None, input=input3, exit_code=0,
code=[200, 200, 200],
counts=[(1, 0), (1, 0), (0, 0)])
self.check_pyzor_multiple("whitelist", None, input=input3, exit_code=0)
self.check_pyzor_multiple("check", None, input=input3, exit_code=1,
code=[200, 200, 200],
counts=[(1, 1), (1, 1), (0, 1)])
def test_mbox_style(self):
input = "From MAILER-DAEMON Mon Jan 6 15:13:33 2014\n\nTest1 message 0 Test2\n\n"
self.client_args["-s"] = "mbox"
self.check_pyzor("pong", None, input=input, code=200, exit_code=0,
counts=(sys.maxsize, 0))
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(0, 0))
self.check_pyzor("report", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=0,
counts=(1, 0))
self.check_pyzor("whitelist", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(1, 1))
r = self.get_record(input, None)
self.assertEqual(r["Count"], "1")
self.assertEqual(r["WL-Count"], "1")
def test_mbox_real(self):
with io.open(MBOX_FILE_PATH, 'rt', encoding='latin-1') as mbox_file:
input = mbox_file.read()
self.client_args["-s"] = "mbox"
self.check_pyzor("pong", None, input=input, code=200, exit_code=0,
counts=(sys.maxsize, 0))
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(0, 0))
self.check_pyzor("report", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=0,
counts=(1, 0))
self.check_pyzor("whitelist", None, input=input, code=200, exit_code=0)
self.check_pyzor("check", None, input=input, code=200, exit_code=1,
counts=(1, 1))
r = self.get_record(input, None)
self.assertEqual(r["Count"], "1")
self.assertEqual(r["WL-Count"], "1")
def test_mbox_style_multiple(self):
input2 = "From MAILER-DAEMON Mon Jan 6 15:08:02 2014\n\nTest1 message 1 Test2\n\n"\
"From MAILER-DAEMON Mon Jan 6 15:08:05 2014\n\nTest1 message 2 Test2\n\n"
input3 = "From MAILER-DAEMON Mon Jan 6 15:08:02 2014\n\nTest1 message 1 Test2\n\n"\
"From MAILER-DAEMON Mon Jan 6 15:08:05 2014\n\nTest1 message 2 Test2\n\n"\
"From MAILER-DAEMON Mon Jan 6 15:08:08 2014\n\nTest1 message 3 Test2\n\n"
self.client_args["-s"] = "mbox"
self.check_pyzor_multiple("pong", None, input=input3, exit_code=0,
code=[200, 200, 200],
counts=[(sys.maxsize, 0),
(sys.maxsize, 0),
(sys.maxsize, 0)])
self.check_pyzor_multiple("check", None, input=input3, exit_code=1,
code=[200, 200, 200],
counts=[(0, 0), (0, 0), (0, 0)])
self.check_pyzor_multiple("report", None, input=input2, exit_code=0)
self.check_pyzor_multiple("check", None, input=input3, exit_code=0,
code=[200, 200, 200],
counts=[(1, 0), (1, 0), (0, 0)])
self.check_pyzor_multiple("whitelist", None, input=input3, exit_code=0)
self.check_pyzor_multiple("check", None, input=input3, exit_code=1,
code=[200, 200, 200],
counts=[(1, 1), (1, 1), (0, 1)])
def test_predigest(self):
out = self.check_pyzor("predigest", None, input=msg).strip()
self.assertEqual(out.decode("utf8"), "TestEmail")
def test_digest(self):
out = self.check_pyzor("digest", None, input=msg).strip()
self.assertEqual(out.decode("utf8"), digest)
class MultipleServerPyzorScriptTest(PyzorTestBase):
password_file = None
access = """ALL : anonymous : allow
"""
servers = """127.0.0.1:9999
127.0.0.1:9998
127.0.0.1:9997
"""
def test_ping(self):
self.check_pyzor_multiple("ping", None, exit_code=0,
code=[200, 200, 200])
def test_pong(self):
input = "Test1 multiple pong Test2"
self.check_pyzor_multiple("pong", None, input=input, exit_code=0,
code=[200, 200, 200],
counts=[(sys.maxsize, 0),
(sys.maxsize, 0),
(sys.maxsize, 0)])
def test_check(self):
input = "Test1 multiple check Test2"
self.check_pyzor_multiple("check", None, input=input, exit_code=1,
code=[200, 200, 200],
counts=[(0, 0), (0, 0), (0, 0)])
def test_report(self):
input = "Test1 multiple report Test2"
self.check_pyzor_multiple("report", None, input=input, exit_code=0,
code=[200, 200, 200])
def test_whitelist(self):
input = "Test1 multiple whitelist Test2"
self.check_pyzor_multiple("whitelist", None, input=input, exit_code=0,
code=[200, 200, 200])
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(PyzorScriptTest))
test_suite.addTest(unittest.makeSuite(MultipleServerPyzorScriptTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 11,584 | 49.58952 | 112 | py |
pyzor | pyzor-master/tests/functional/test_server.py | import sys
import time
import errno
import unittest
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
import pyzor.client
from tests.util import *
try:
import MySQLdb
has_mysql = True
except ImportError:
has_mysql = False
try:
import redis
has_redis = True
except ImportError:
has_redis = False
try:
import gdbm
has_gdbm = True
except ImportError:
has_gdbm = False
class BatchedDigestsTest(object):
def setUp(self):
PyzorTestBase.setUp(self)
self.client = pyzor.client.BatchClient()
def test_batched_report(self):
digest = "da39a3ee5e6b4b0d3255bfef95601890afd80709"
for i in range(9):
self.client.report(digest, ("127.0.0.1", 9999))
self.check_digest(digest, ("127.0.0.1", 9999))
self.client.report(digest, ("127.0.0.1", 9999))
self.check_digest(digest, ("127.0.0.1", 9999), (10, 0))
def test_batched_whitelist(self):
digest = "da39a3ee5e6b4b0d3255bfef95601890afd80708"
for i in range(9):
self.client.whitelist(digest, ("127.0.0.1", 9999))
self.check_digest(digest, ("127.0.0.1", 9999))
self.client.whitelist(digest, ("127.0.0.1", 9999))
self.check_digest(digest, ("127.0.0.1", 9999), (0, 10))
def test_batched_combined(self):
digest = "da39a3ee5e6b4b0d3255bfef95601890afd80707"
for i in range(9):
self.client.report(digest, ("127.0.0.1", 9999))
self.client.whitelist(digest, ("127.0.0.1", 9999))
self.check_digest(digest, ("127.0.0.1", 9999))
self.client.report(digest, ("127.0.0.1", 9999))
self.check_digest(digest, ("127.0.0.1", 9999), (10, 0))
self.client.whitelist(digest, ("127.0.0.1", 9999))
self.check_digest(digest, ("127.0.0.1", 9999), (10, 10))
def test_batched_multiple_report(self):
digest = "%sa39a3ee5e6b4b0d3255bfef95601890afd80706"
for i in range(10):
self.client.report(digest % i, ("127.0.0.1", 9999))
for i in range(10):
self.check_digest(digest % i, ("127.0.0.1", 9999), (1, 0))
def test_batched_multiple_whitelist(self):
digest = "%sa39a3ee5e6b4b0d3255bfef95601890afd80705"
for i in range(10):
self.client.whitelist(digest % i, ("127.0.0.1", 9999))
for i in range(10):
self.check_digest(digest % i, ("127.0.0.1", 9999), (0, 1))
def test_multiple_addresses_report(self):
digest1 = "da39a3ee5e6b4b0d3255bfef95601890afd80704"
digest2 = "da39a3ee5e6b4b0d3255bfef95601890afd80703"
for i in range(9):
self.client.report(digest1, ("127.0.0.1", 9999))
self.client.report(digest2, ("127.0.0.1", 9998))
self.check_digest(digest1, ("127.0.0.1", 9999))
self.check_digest(digest2, ("127.0.0.1", 9998))
self.client.report(digest1, ("127.0.0.1", 9999))
self.check_digest(digest1, ("127.0.0.1", 9999), (10, 0))
self.client.report(digest2, ("127.0.0.1", 9998))
self.check_digest(digest2, ("127.0.0.1", 9998), (10, 0))
def test_multiple_addresses_whitelist(self):
digest1 = "da39a3ee5e6b4b0d3255bfef95601890afd80702"
digest2 = "da39a3ee5e6b4b0d3255bfef95601890afd80701"
for i in range(9):
self.client.whitelist(digest1, ("127.0.0.1", 9999))
self.client.whitelist(digest2, ("127.0.0.1", 9998))
self.check_digest(digest1, ("127.0.0.1", 9999))
self.check_digest(digest2, ("127.0.0.1", 9998))
self.client.whitelist(digest1, ("127.0.0.1", 9999))
self.check_digest(digest1, ("127.0.0.1", 9999), (0, 10))
self.client.whitelist(digest2, ("127.0.0.1", 9998))
self.check_digest(digest2, ("127.0.0.1", 9998), (0, 10))
schema = """
CREATE TABLE IF NOT EXISTS `%s` (
`digest` char(40) default NULL,
`r_count` int(11) default NULL,
`wl_count` int(11) default NULL,
`r_entered` datetime default NULL,
`wl_entered` datetime default NULL,
`r_updated` datetime default NULL,
`wl_updated` datetime default NULL,
PRIMARY KEY (`digest`)
)
"""
@unittest.skipIf(not os.path.exists("./test.conf"),
"test.conf is not available")
@unittest.skipIf(not has_mysql, "MySQLdb library not available")
class MySQLdbBatchedPyzorTest(BatchedDigestsTest, PyzorTestBase):
"""Test the mysql engine."""
dsn = None
engine = "mysql"
password_file = None
access = """ALL : anonymous : allow
"""
servers = """127.0.0.1:9999
127.0.0.1:9998
"""
@classmethod
def setUpClass(cls):
conf = ConfigParser.ConfigParser()
conf.read("./test.conf")
table = conf.get("test", "table")
db = MySQLdb.Connect(host=conf.get("test", "host"),
user=conf.get("test", "user"),
passwd=conf.get("test", "passwd"),
db=conf.get("test", "db"))
c = db.cursor()
c.execute(schema % table)
c.close()
db.close()
cls.dsn = "%s,%s,%s,%s,%s" % (conf.get("test", "host"),
conf.get("test", "user"),
conf.get("test", "passwd"),
conf.get("test", "db"),
conf.get("test", "table"))
super(MySQLdbBatchedPyzorTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(MySQLdbBatchedPyzorTest, cls).tearDownClass()
try:
conf = ConfigParser.ConfigParser()
conf.read("./test.conf")
table = conf.get("test", "table")
db = MySQLdb.Connect(host=conf.get("test", "host"),
user=conf.get("test", "user"),
passwd=conf.get("test", "passwd"),
db=conf.get("test", "db"))
c = db.cursor()
c.execute("DROP TABLE %s" % table)
c.close()
db.close()
except:
pass
@unittest.skipIf(not has_redis, "redis library not available")
class RedisBatchedPyzorTest(BatchedDigestsTest, PyzorTestBase):
"""Test the redis engine"""
dsn = "localhost,,,10"
engine = "redis"
password_file = None
access = """ALL : anonymous : allow
"""
servers = """127.0.0.1:9999
127.0.0.1:9998
"""
@classmethod
def tearDownClass(cls):
super(RedisBatchedPyzorTest, cls).tearDownClass()
redis.StrictRedis(db=10).flushdb()
class DetachPyzorTest(PyzorTestBase):
detach = "/dev/null"
homedir = os.path.join(os.getcwd(), "pyzor-test")
def test_pid(self):
self.assertTrue(os.path.exists(os.path.join(self.homedir,
"pyzord.pid")))
@staticmethod
def is_running(pid):
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
return True
@classmethod
def tearDownClass(cls):
with open(os.path.join(cls.homedir, "pyzord.pid")) as pidf:
pid = int(pidf.read().strip())
os.kill(pid, 15)
while cls.is_running(pid):
time.sleep(0.25)
super(DetachPyzorTest, cls).tearDownClass()
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(MySQLdbBatchedPyzorTest))
# test_suite.addTest(unittest.makeSuite(GdbmBatchedPyzorTest))
test_suite.addTest(unittest.makeSuite(RedisBatchedPyzorTest))
test_suite.addTest(unittest.makeSuite(DetachPyzorTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 7,853 | 31.188525 | 70 | py |
pyzor | pyzor-master/tests/functional/__init__.py | """A suite of functional tests that verifies the correct behaviour of the
pyzor client and server as a whole.
Functional test should not touch real data and are usually safe, but it's not
recommended to run theses on production servers.
Note these tests the installed version of pyzor, not the version from the
source.
"""
import unittest
def suite():
"""Gather all the tests from this package in a test suite."""
import test_pyzor
import test_server
import test_digest
import test_engines
import test_account
import test_forwarder
test_suite = unittest.TestSuite()
test_suite.addTest(test_pyzor.suite())
test_suite.addTest(test_digest.suite())
test_suite.addTest(test_server.suite())
test_suite.addTest(test_engines.suite())
test_suite.addTest(test_account.suite())
test_suite.addTest(test_forwarder.suite())
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 964 | 26.571429 | 77 | py |
pyzor | pyzor-master/tests/functional/test_digest.py | # -*- coding: utf-8 -*-
import sys
import hashlib
import unittest
from tests.util import *
TEXT = """MIME-Version: 1.0
Sender: chirila@spamexperts.com
Received: by 10.216.90.129 with HTTP; Fri, 23 Aug 2013 01:59:03 -0700 (PDT)
Date: Fri, 23 Aug 2013 11:59:03 +0300
Delivered-To: chirila@spamexperts.com
X-Google-Sender-Auth: p6ay4c-tEtdFpavndA9KBmP0CVs
Message-ID: <CAK-mJS9aV6Kb7Z5XCRJ_z_UOKEaQjRY8gMzsuxUQcN5iqxNWUg@mail.gmail.com>
Subject: Test
From: Alexandru Chirila <chirila@spamexperts.com>
To: Alexandru Chirila <chirila@spamexperts.com>
Content-Type: multipart/alternative; boundary=001a11c2893246a9e604e4999ea3
--001a11c2893246a9e604e4999ea3
Content-Type: text/plain; charset=ISO-8859-1
%s
--001a11c2893246a9e604e4999ea3
"""
HTML_TEXT = """MIME-Version: 1.0
Sender: chirila@gapps.spamexperts.com
Received: by 10.216.157.70 with HTTP; Thu, 16 Jan 2014 00:43:31 -0800 (PST)
Date: Thu, 16 Jan 2014 10:43:31 +0200
Delivered-To: chirila@gapps.spamexperts.com
X-Google-Sender-Auth: ybCmONS9U9D6ZUfjx-9_tY-hF2Q
Message-ID: <CAK-mJS8sE-V6qtspzzZ+bZ1eSUE_FNMt3K-5kBOG-z3NMgU_Rg@mail.gmail.com>
Subject: Test
From: Alexandru Chirila <chirila@spamexperts.com>
To: Alexandru Chirila <chirila@gapps.spamexperts.com>
Content-Type: multipart/alternative; boundary=001a11c25ff293069304f0126bfd
--001a11c25ff293069304f0126bfd
Content-Type: text/plain; charset=ISO-8859-1
Email spam.
Email spam, also known as junk email or unsolicited bulk email, is a subset
of electronic spam involving nearly identical messages sent to numerous
recipients by email. Clicking on links in spam email may send users to
phishing web sites or sites that are hosting malware.
--001a11c25ff293069304f0126bfd
Content-Type: text/html; charset=ISO-8859-1
Content-Transfer-Encoding: quoted-printable
<div dir=3D"ltr"><div>Email spam.</div><div><br></div><div>Email spam, also=
known as junk email or unsolicited bulk email, is a subset of electronic s=
pam involving nearly identical messages sent to numerous recipients by emai=
l. Clicking on links in spam email may send users to phishing web sites or =
sites that are hosting malware.</div>
</div>
--001a11c25ff293069304f0126bfd--
"""
HTML_TEXT_STYLE_SCRIPT = """MIME-Version: 1.0
Sender: chirila@gapps.spamexperts.com
Received: by 10.216.157.70 with HTTP; Thu, 16 Jan 2014 00:43:31 -0800 (PST)
Date: Thu, 16 Jan 2014 10:43:31 +0200
Delivered-To: chirila@gapps.spamexperts.com
X-Google-Sender-Auth: ybCmONS9U9D6ZUfjx-9_tY-hF2Q
Message-ID: <CAK-mJS8sE-V6qtspzzZ+bZ1eSUE_FNMt3K-5kBOG-z3NMgU_Rg@mail.gmail.com>
Subject: Test
From: Alexandru Chirila <chirila@spamexperts.com>
To: Alexandru Chirila <chirila@gapps.spamexperts.com>
Content-Type: multipart/alternative; boundary=001a11c25ff293069304f0126bfd
--001a11c25ff293069304f0126bfd
Content-Type: text/plain; charset=ISO-8859-1
This is a test.
--001a11c25ff293069304f0126bfd
Content-Type: text/html; charset=ISO-8859-1
Content-Transfer-Encoding: quoted-printable
<div dir=3D"ltr">
<style> This is my style.</style>
<script> This is my script.</script>
<div>This is a test.</div>
</div>
--001a11c25ff293069304f0126bfd--
"""
TEXT_ATTACHMENT = """MIME-Version: 1.0
Received: by 10.76.127.40 with HTTP; Fri, 17 Jan 2014 02:21:43 -0800 (PST)
Date: Fri, 17 Jan 2014 12:21:43 +0200
Delivered-To: chirila.s.alexandru@gmail.com
Message-ID: <CALTHOsuHFaaatiXJKU=LdDCo4NmD_h49yvG2RDsWw17D0-NXJg@mail.gmail.com>
Subject: Test
From: Alexandru Chirila <chirila.s.alexandru@gmail.com>
To: Alexandru Chirila <chirila.s.alexandru@gmail.com>
Content-Type: multipart/mixed; boundary=f46d040a62c49bb1c804f027e8cc
--f46d040a62c49bb1c804f027e8cc
Content-Type: multipart/alternative; boundary=f46d040a62c49bb1c404f027e8ca
--f46d040a62c49bb1c404f027e8ca
Content-Type: text/plain; charset=ISO-8859-1
This is a test mailing
--f46d040a62c49bb1c404f027e8ca--
--f46d040a62c49bb1c804f027e8cc
Content-Type: image/png; name="tar.png"
Content-Disposition: attachment; filename="tar.png"
Content-Transfer-Encoding: base64
X-Attachment-Id: f_hqjas5ad0
iVBORw0KGgoAAAANSUhEUgAAAskAAADlCAAAAACErzVVAAAACXBIWXMAAAsTAAALEwEAmpwYAAAD
GGlDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjaY2BgnuDo4uTKJMDAUFBUUuQe5BgZERmlwH6e
gY2BmYGBgYGBITG5uMAxIMCHgYGBIS8/L5UBFTAyMHy7xsDIwMDAcFnX0cXJlYE0wJpcUFTCwMBw
gIGBwSgltTiZgYHhCwMDQ3p5SUEJAwNjDAMDg0hSdkEJAwNjAQMDg0h2SJAzAwNjCwMDE09JakUJ
AwMDg3N+QWVRZnpGiYKhpaWlgmNKflKqQnBlcUlqbrGCZ15yflFBflFiSWoKAwMD1A4GBgYGXpf8
EgX3xMw8BSMDVQYqg4jIKAUICxE+CDEESC4tKoMHJQODAIMCgwGDA0MAQyJDPcMChqMMbxjFGV0Y
SxlXMN5jEmMKYprAdIFZmDmSeSHzGxZLlg6WW6x6rK2s99gs2aaxfWMPZ9/NocTRxfGFM5HzApcj
1xZuTe4FPFI8U3mFeCfxCfNN45fhXyygI7BD0FXwilCq0A/hXhEVkb2i4aJfxCaJG4lfkaiQlJM8
JpUvLS19QqZMVl32llyfvIv8H4WtioVKekpvldeqFKiaqP5UO6jepRGqqaT5QeuA9iSdVF0rPUG9
V/pHDBYY1hrFGNuayJsym740u2C+02KJ5QSrOutcmzjbQDtXe2sHY0cdJzVnJRcFV3k3BXdlD3VP
XS8Tbxsfd99gvwT//ID6wIlBS4N3hVwMfRnOFCEXaRUVEV0RMzN2T9yDBLZE3aSw5IaUNak30zky
LDIzs+ZmX8xlz7PPryjYVPiuWLskq3RV2ZsK/cqSql01jLVedVPrHzbqNdU0n22VaytsP9op3VXU
fbpXta+x/+5Em0mzJ/+dGj/t8AyNmf2zvs9JmHt6vvmCpYtEFrcu+bYsc/m9lSGrTq9xWbtvveWG
bZtMNm/ZarJt+w6rnft3u+45uy9s/4ODOYd+Hmk/Jn58xUnrU+fOJJ/9dX7SRe1LR68kXv13fc5N
m1t379TfU75/4mHeY7En+59lvhB5efB1/lv5dxc+NH0y/fzq64Lv4T8Ffp360/rP8f9/AA0ADzT6
lvFdAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAGrVSURBVHja
7J13YE3nG8c/Nzc7BLFX7F01ajcENVrU3qpVtalds2oWNWtTe1Zqj1IzLaVG0VJq7y1myM79/v44
92ZI/FDNoHn/4Nxz3vOcc28+5z3P+7zPoBivT+sko9V6je65nvWeP32N7rm89Z77m16fe34b0tV8
XZqLl/UX9nyNqMhuveeiLq/N75w6hfWea9q/NvecCaip16V5vtYke742v3PFCJJTvDb33DyJ5CSS
k0hORCSn/PDpPXaZ8nokkZxE8mtCcrpKn45Ze+yeJJWNSk+1dmseSjr1uW3G4tX5k7RRO6RqPaRv
FfuIj3mbZk8iOYnkBCP5o5uSpCcXjh7dN8otCjzOku78uHRnkL6iaxko+4eknYC5Qds8QJWldyXp
WM6Ut1eZgU9CdTzy5DKdxyzZuGzmrA+hWiHK9Ju6YtPBP5aXsB6uNXjh4iVfN47xVijewBGwy+Dp
mkRyEskvSXKFKZP7t6mrqUCZbXcO1HCNJPlgLoBs14PzaDSZ74Uv/qRrObBbKoV8BD5aX7tw7e+1
30saB++HXT8vZ+PUCtuDbFdbiJvW5QiWJP8wzbLK3mscDKmKqUbnombodaMkeG6RRpuqrrkjhf/e
PInkJJJfXk9OoflQIzjotPRwZgpjn4N2GBtD1EU+DFJf42MH/dnhYWBu1qoQwDb1ljQ+z92Qsnts
JPd+fPwHrcqeMnv2zA6gvdk1J4+HG3jZxvwzux1T5O4ZoKHmxZK2ubBXp1yd/9Yhbamvez9Omb41
XF2SSE4i+aVJzq7vyXrvbvF0jebe1NFkhvqstcbBpvoqfC9LVZDcH6SF46F5aa8ZjFAFsnecd1Ff
aM453dTnnAqxybMnh+bTcrsT4KjddZ5e0Qj5gYprw7TLY7Sml9+mBeyWuvbUUg/tLqsVAO21O4nk
JJJfmuTimsV3agGQbJmaAVBU842D3dT10V766MTPUnD1KvIBN/875r7qfE5S4IQK+jrfBY3AMeRc
pERvTWRTIEBabe+uKcOnZo08mEmTW0saayoRvsKE41+Wwr+HBf55KTiTo/bk0lJMVdeHW1olkZxE
8j8geYZ70EXDDPG5PjdMFxphHFwsL//fcFwi/TZZGyboxJX79wNVubsu6maPova8r8G45IA82hQp
sYEGcOVvnEq2maoVAyQpyoJ+RQ0o/KekcdtDMwKfatTNs4ukJWTStg80qfIx6a/3k/TkJJJfnuT8
+qGGJhrbPqps1YfbAZDsvp+LfIH875jwu3BIunns+GlN76bRAdpZDOqqFwA1NDlSYm996hZ+b88T
SZrVTz82zB+Fy5ZqC2k/PiWtByigjUH7Cwf6l6CwlrdWzyuyjHdOsl0kPMmWtc1yOznl+/i8JIVK
kq58UdQzp3evvVK49+hESHI2rRmu6gDkCr7kCMBY1QBguL7NpO+tHe9cDTntDrgGnO6l+nk2KrQZ
LdXZyme3SImTVC6vFHp4Rqe+Gt1PjaNxOVQfWl1sPgbIpZ+1nBxpoaYm9lfjTrekOz1MSSQnNMnT
wSlfHleS7dLdRuaShyW9Rfp8acHUTTdonAhJLqylK5QLIM1xtTb2rVYegKrBd9N76dt3CwIU1BWt
AmC/uqk11HryOHsrGRazFuofKXGO8qXXFjegokZ1iMo4MEclALpLtY3Rf56mA/CZ+g1XTey8vw1Q
+ySSE5rkAQwJk0LHUuVJUbyh2oUAB5ZL11Y5my2XqJsISS6qKUuVBUwNzmmhdTA8EeoI5h4BIVX4
RN1O/QQk36ZFGmfVQdrpS6CHhtpIbmhTrAFmqaynlhr69oQGGgEOmTLbVIb1ygFk8rNovhvJugc8
qaIZVh29zZdqCFBZ3yWRnNAkL2W6pIvV6NiftuETK5LhxnoG3v97bx2q6TAdEyHJxfRtb61rN+ys
QobY3uqPzuFS6y/5VYEv1Wh6eAHHcoe1tYzGALDSUk2LAI+wAy0MowfemhIpsYfaptM6Q3fYVkG/
LD8VKoXuywfArypcpdv82xpwVgHXpXsV8+trANqqVxu1BZL/qk+SSE5okr9nYphvV0eq3PFIfl9S
FwYe57OKQLEb+pk+iZDkIprvsk9S6KrIiZlfwIlQaWMGYIq8K0kh0mKnytbpXaYmTnemAYwbVtuq
YWe2XIh0NyqhOXYPtgJwOLi4pJBj69f8eiq7oZVLkgK/sss46cSVvSMzkvJSPQBKak9dDSmX5dPT
WulxsHUSyQlL8kaaZYMMMyx7DZ34Au8cpeOYLJB+nzYyPBGS7DykHHbvd6qTKsow2N8/5PRcbwC+
uZmPtod/X9rQhH2dbLYODjYXurbWpbtxDwtGnG1qk5eMRo/CTWjRtnikpxHmGUfXT2z3rktMRyHT
z2rb8RdJltkuH0Y1hSSRnDAkQ4v1wdK3TJSkMLPnUdpKFweaCmgbIxIhyc9p9uYX6mZK8+o+bxvz
UWD5yq8LkPyUvJNITmiSU5L+qwfSeFZJ0jGq7qCzJBXjzs0SPq8fyQnQPPZFuBslkZxQJG9geDMX
0qzVHIZLstRl8hoGSFIZbib83b4eJDe2zHFMIjmBSd7CQN2f4Gb+6aI540n9XYdSoQuYoNA/2lFU
D8aGvN4kN+oeLyTbpUha40twkn+jnaTf7DM+7os5GVS4q+9IW9AFMpxSZUYlQpI/XLVkZLWo5Dja
Q54Je87/Nb18dMIyPvS3i5i55Snf+JNSUQ5mqPnl9Jkdnklnnur2MXc2O7C5EoBjUsxIoiP5bxpI
UlcWWxYVyVttdpjkg8k1e52ZD6Rh5pmJj+TmFkkag0dZsAP40O9q6nmhYVf+uClNp9YYaF0AAJe9
GmCc8+7swwGS9MgRINXhud/9fF2SdAMgU3bnZvM2bVg0xCvK8stj7c1k3X7rLQB+nPKxRQprCyVD
PgTsK6YE76+m9i7rkERywpP8xFRTks64T4zYFTThqG0zMPHpyWnv+hXOVu7Hrxn8yH7PrlTwUZhU
R3fTASUPq95ykTrwURnAvEY/WO0YV+S/b/GYnt0HlQf4QJIlxH9401L5kmf5cMQey52Tkn+opDkR
lo1j2q4LVvPGXwcBclr+ehBStOKN0JL0UQPAW11cf5Qk3WqTRHKCk6x5Pxv4Js67jUlyIw0xNr6Q
5xMdz1It+KHlrsN62RsZi/rvt0CNsNvZYLx22LSAhSoCwC41B/qoSybXGZas9AiQJP3lo1+yQsYG
Fy0201xl7WGCFhkfVoS5A/10TLOgorawUsWAypqUSz8XSllqSoB6JJGc4CQn7haT5A5qa2x0VGP5
Wa49DGir5cxXFrBr/igk39VbQFcdcqtlORexitdHnwGwSvcywTDVhSGq5fDkypGtExtmwSPcFyDT
7Se2iKeZqofbtaD0AEzWu2A+F7ZXFcB0OtTtXIA9UE1TyhuedXmfHE4iOZGRfDXRk1xFJycdvXJ2
hGNrfar5TUJD6rVUd2ZoVOsx5/WwtosOAqYF2uwXWjqKK/3MblM72jFWWgQzVQm6qmUaTc74Vi4T
cOcUUO6s+tlOOPvEGXpYw/P6qTW8r1VXgxyB5SoT/hdAdY2qYT3jmF8SyYmL5JVudxM7yXwnhV3x
15yO6qUl5MpHN31mrE+GrvbkLS0BcPpNGhjJVi1JUh76yWIpwXQVgubqkEeSNBk4GTx/6XlZ/Y2A
FJZrk5f9ckErAaivYTBDLRTw593wx0H6TqsBmqt3LQ0GSBF6IInkxEXyZZYlepKpXSs56R486KyP
QvcDfKyBTNaYHnXTAy30JQDp74VFcZX4RL99Uak8dNHs8KPO01UUPtYnBXVu4/LN3YCTkkK31Yzo
X0GSFBB2ycjyoo3YXfF/Vwq8cPTYfR02+G2pjtU0FaCdRiaRnMi0i6z9Ez/JABxVbzW/cBmgrBbz
jaoY+4epvrGxTMUje3ezKtef6fMJmj1dReEzNc9udZmHw7qlsOWR12ithRWyubFLyQFMN54kq6Bl
dY25Zj9dNaJKmqtzSS0DMvo/Tp9EciIjuVb1xE5yjqwAyZ6c76jmu0IB3J7csusha3TzChvAtgAp
AAYbHvF8ou5Ox3ROxaGPGuQyvOuBo8FOH5/Sg4a2/q3UEeBblQNgguqPVwvr49BPfmoK0ExfZtMW
sN9mfQ8kkZyISO6XNbGTfOOgEzBQYzqq6XoBsFRVG9hsc9uV3v2jaSva0cUIuzPaN1ase6sJJZ9I
FaG7ambRGmwhJ+DY/XFIhYgxuYPhgd/K6om88UJgqsGqCTBKUgWAUlqeTL+Td5t22CeRnNhInmd6
nMhJHqdfmndZrlsebVVvdjAAdTQml29V4/AGrfaXdM++VtTA0rkqZ1/owxYdh2/PDU3D9QF8oQZO
utj/61GTF256Z588gFo6ZUWyur4wArBHGZ/3SasZojoAC3RRt+763bjd7GJIZv8Hq4O1LnnSGl+i
I/ligfuJnGS3jZL0ZyEyj8/oYey1/65OBEUfSbdHls2Yn3RfZYxka6aOGosgXgBf3csHDYLrcc2Q
HfbJCpU34lo7Gv2T+w0y5pITjM+VpDZ436gIUP7rWmcuXLhw6Ub5kl91lfR73SS/i0RIcmJrsc34
6g3oXObZ3vQVKzjE3NlF4We+H9qtYx/Dochs/cf1g5pVvMoWTkPuTi4A7kNtuYfM1lQCtpimnnNd
YrvYOzO6FkzyIEoi+VVsFy/ZUjj8o9MyJOVPfqrd27fx5yexHQg/fCA4MZBsORRNP1558uWEXvvx
mYf2DAxPBCQn5bT/N0gOnVAJIPXiyF1XN0/+bvkJ6UBOSDfbknAk+/1p/L+QL6L0uW1OfT066Gcl
6fQPP9yTFHjr9N41s8ZaHebOBkpqwQ5b16gPZuhhqR33k0h+Q0h+hFPVLv0+c2aeLnUsuVz6u7kd
gKn2dhe7mp+4MzDhSK5u5ytJ9zKZ9xxftewPaXx3i6Q+TLC9NGY/kDTBdER3qwH1FNrZyPNqhEgp
0HWYpA10ubo2UJKqpQ/b/0D+mQdK0hguqrRzcBLJb8qY7JZTklbgtT8FkO9wDz5Z5us7OzdvmxZJ
5z3sgxKM5HXUl6T2VMkO0OW+0/uStI1W1g4/4iNpCaPUEa9JK0/rJrnqftJ9vM/GW5KkX5gkKcCp
RH+2SLKkcb7E17pDDUnrTG9JTgX0xpDcKd9/XE/Ol14KX5OOKekdv+1fmBLfsl7SAvvMm76VdNrd
PeFIViGn29KvZvfkDm3fKpODdoYvzjW8rcf786ek3bRSMe5I0gn6RRU3jYWSVMalAVMkXaD6X3SW
cruc0zkPp726yvv/OsnODXI+k6LSk0d7AThGTvGqzqj0f7BLnzv6ZLBov0IR22UnZyZa1s5ZT1cW
HTH969rm/w7JhdPPrO2JecpgxkiqRm8m63x7UvwsKXxVWsYl4Izva7rqbh5TWQboyaNVOLBOkkRh
6/Gy5jBJl6mm+uy6tGDZkz18GVVcf36SpCbkJH+ItIrel6grLef9gKKmBdIR2v3rJJfxafQsLtuH
SapKunlnbreBNuMdYYy0M+2MguD++bdFMfX60gXACaDo7AybLArY1igy1WaXMAV1wm72AMN3412A
rIZPR8bbAXlT5ajkbnR8rxjmOZKkY9n+MyRnAdI13q2sTvcljaElPb5zoOTfksJr4TItIW0Xj9Ob
t33G50Xtbko6DvwuSfcoZhwOdC4kSUHk0lKSA2mm41n0vcsR57fkD0NqdpgojWXhXSpJqkZhOkr6
/dVSy8VKcjuf0tGTsLjbhsUyIfcrl95Yyu0Pi2RpwWXNNY3WXa3qp02mj69Ix+kr/ZUdKgcPAr5T
nTu75p6TVtmsxe9ZTmy+YymZU5a3gev3TACrLQ0A0zaNnBYiXc1jX9gOx6BTTos0MmfKwlMse/4z
JLsC7uN1lxKStIRmNGsNrp8HSGsNoBPQCrfV7GGX+6FrXknaCxyRpD+pbRw9Zd3wSKnQrm5eI1pS
FHDZH4XkS5JUiYIkcz2u3myWOb+kq6koHiTpHO3/bZId589zAs8vvlswOCfm+p7t5/rMMfKBcyCs
CsBsdf+iY+DtZJKO6UJ3jR0lHZNllY56+D9crEtpmaPgzPBH6M5jQOXd2mAN9z8WWpg6WlNMmg0e
2gVAqUD/AvC5jp2xHNd6fd9enrBWjY0iI+xViv8Iyf54rW2XijFXqC5JC+hP1UczP8pAuSBNpXtC
r4z0xW7vTePW5mAyzGmTGWQc/JOPJUnpUhmfD5GOoVHPbsI5SaEp0r/HNAoHdcNXqTJLuprScPK1
fLzl3yW5ULOOPpM79HFqs2TkJJ+leUr5+PjM/mrau9YEnCsBSlh+AXzkrYOndDLrMDWeqmBtf7uS
Fg9Wb8ZoEdukRTgHHf3lFoDjQav8DzUVTMdDSkuPklNIy2yuccfNBQIfnVB7j5mOoRfX3jFBOw3T
WMC11l1/+/8IyedoLJ1O4XzN0TNUUj9WOBaRFFqP6XqYpUFCkxyWyxx6h3clqQE1WCBJpUwnjIMn
jTE5yJTX+LyF/NFzYHRhv6SRdG7EhdZ078jvypZSUk2y8LMkyw39qySbFvj4+Pj4LE/lmPXdFNV8
2uTz8alnFxlOUt1wxqwLjFAfrUn/kTublXe1SlU18YW6XAzywO10eO4dfpdDixfRcl85APygAlaf
0GLARHWQNJgKEf7Lq9XtkOYZecXvPXi0GfhAG7R98R83wxTW5Y3VLgIDJeluqBT45cRw7aWbpIF8
15jR0tnMznc9U0vSanpLJ+4lNMmqzi15pAqV/rTLvpFukhbZlAuFuKcNluRL0x4fGcpEN76KevIk
pkl/uiW/2IF9QQVNhTmifIRrIh/utysuhdUwbfp3x+TCFZaPzJ/NhVKLfZZ09Wnytk/vSITOBJsB
Dod7AIM1woiE+iPMfrs8gDVqpYfn/CX1/eVqNR2rqTGblBrgaLgbgJ3fOYBeGqo9AQ/TVbQmEoe8
CtOsNSoJOFmCNBWoogOSQo//OK7MG2u7OOiU4qF03jnTfc0Af01kqqRTVL+elqI13fhGXiy4oeBK
/KgjTu7bEprk5vyl5szTzUKsfujkGaqtLslO2w42Zbp0pATLGyS7oYBO5N9LP0lh1/f9eFSSrtiV
1yVPFmoQP2qfGY6oDKH3PZwuqhG/aRf/uhWuik9twGX+wkbj5vnkL+9TLxKhuzeN/24BLNJww5Xt
7H3+CAY4oIFS4MlNG/X92mA2aKd6/KjsQDVtN9wsDIflbpqgWQM1vaLNqRM26GCyKw+MCJUnGgQ0
1SmdVdhc9zfYnrwb9kp/eb4jzaartPOdy5JU3O7mX+9B9gXSzBSYsnpQzaIlsDShSe6Kr444Onkl
p7P0KfVamZ23Rhw845aso5eJhuE/UrxTNvJfO0DzuW2LOwJvS5KqUSMtQ6RR7JA6wEmVJHQmXaS1
dNJ+bBr3v0ZyK598QFGfZqRZNtnuo6h2jD/DkwGcC3IE0+mwVkZs9JkwhxPXAS74L1MOM2TS1pni
bYvUbLmyQwG/oCLW+pQ7DB/QURrvdDpocCTJLTWUx9cM7/s96gV8pT/Uvt55Xaz4Bq+MnNxnkRQW
JMk/cu8PLW5KfpfCJOnsgHKp0ra8L2np7ATXLrrjK23Pa8oz2SLdLg4FfKMc9XHDruZWydIOUra5
aYRgOhdr1HvkZknStQK4T5fkWzdYupc+xWN920kbqlyRnrhmk3x+sPzLJHf3yQRU8amb/FOf9/nS
J0oI3Th1gre+uqXaMFjrmqgRwPcqceYcwPmA368C2GvXFHnwk/T+fGWn2s0I582rAVkg5S2/TupN
BUtIZHh2A43idGhyYJP6qTeY/7JMV3OcB4YEN0/y6kyQFpPk7z+4LtlKmCl8/97ozmt3d1m9ic4e
DpZ0Y2jXSQfDohwPvxhlifJyVJvi2u2vfLexkFzf55ve3Umz2MfHx6eDadIyuyjrdU/C9/4t3Qp7
PONnXcvQ1Kja1EE9rv0BsEdPtgMk009DVIhGUun5en+2wiIyerbV76Wz79TgPvoU5ioy/0V79WC8
+kEVy4EC2mZisNa1V3eg6pOAzEkkJw6SE3OLhWSXIct9FkLhIcNaT5ifvHGbqIvNXscVvqttslb3
pcOFyPyFM0CGzbWuLAaYLsOs5vT44ICQ1Ji2nXKbI+nvCpFrLCslaadrf5WHZAetpYSBnmpDxgeP
27R+GFaVtfrrkO7kKhY6zkgRMDyJ5CSS/8nKCK6prWt6bulj6KhpnABSeeeJVusxZQaAT+43SwlA
7typ37bmf1s+t1FUvwm7RrMXtjaToaUZcC0TIaTAlNzQLFQKagkpF1vCNueAVCYAp365kkhOIvkf
kZxgLf/QgUa5KTenpPzJSSS/xiT/1/2TT/d5nERyEsmvP8nh+dwfJpGcRPLrT/JvNNBrRPLJqa8D
ycYszcn++Vg5Fcj0/ztUb/l0pb6qU1ctrpZEcozWy4iqeF1InlggMZNcbPLx07v3n/YLvZvb1PCg
RRdHuUG2z1q6QZqCZas16vbN8sVWA0e1ftNX/3LyrrTduWHfRhG2ulI7rDEhKa2BJ75Khlv1XFls
doqSByXJ0hLytstuuHoM750GoMjEXybFjH/K1hegaJNPy5nfbJLDczs/fJ1IHpAmEZOcL0ChjxRy
/9yBza4TdXPD5pvab+4eKl2pstF2yt+GXeGcJCnwyt7Ve25LWpE2tbF4ctlS1uBtSdCHANx5QOnb
klYbsFcO0KpKaYof6k7bUAX8VgnzMknX80OXMEn+BU3DbMla3LIbFR+y0OysJP2e840m+bAR8/na
kNzd/CDxklxUvxkeOykoYpnkCG4HNdByd+h0Hdk07zdN6/tJZU/rmt/7dStvuJgKSp2ybG9ezNdy
5Uo+oxyUNRUiI/SkGOBiOZT+TsDYXceMbIbZ7gc1sWbQCt//41k9fmuUZl1bp4Om6pY79bK2vF0n
h7ESMqWF3aGwhlBZm2kQptPfLzyrvxzfZJLHMvu1IrkHfydeku38npgAaqtIQzUwdIOQ0LLQviH0
1nvR3/vfP4Asl0+XB+ZoTOiJZNhvkI9Ni+gsnU0DJbRwqtpCZS03KpFYU8PZnzzrCk30S/BanNgp
7+PWag+NjR5Bk72ky3YM1KfufpYWgPNWa0bQN5TkDzj1WpH8BX8mYj15mzIaNDX2Uk+gikK1PsJf
rVV0kpcHQk1VAIdeYX7ugzWGSdrtEukUdEMbTHyswX7+TpBOvwN5w0852FIlNgC4KlUFumiLLUJq
qioCTlrxuZ6oDjNU/VNtM8pXN7F/k0nOmzz0tSK5P7sTMcmLNWDEzgG0UNdyGpG99DcBIVO0fvja
3Td6Q131jE7yYqXCW8t3H/XXoxrYHwweoT9TRxytpolb1ZseGqk1gCn0PNBKQ8F+9LQFC/+85wiw
X4/NwIcKUzuYej1o1EV/Z8BdG0dpsPYxU17r1fA/YLsIcCil14rkIUawdCIleaKkkJHU1lAvhUm6
+l4VSQp7FF6cmnqqSvUMvUV5BUqPp+QASlp0NYrbWnEty3QjpNAQTTSCnG49ANpqILj6SdJaAM7r
pJEMXyoJA/7UVS02nPKXj1LNnSo2XZWPqzB4bzj6c8c3meTzicua/HyShyfqMXmQ1lRxAS+N9pak
H1yhzRc10jg10+CYJA/Sh1TSuJnBmmQCOKXCUVO2aA8ttHmIBmgVYPfED/DSZsAudVYZXvbJLfIF
WKkrKg7Ut/6CFNCiUarZWFvHqeZv+gDqWyQ/uzeY5G10e93G5N8SMckdjRKRXhpSSxM2SutSkNkO
aKZBMUlurzaU1Gjy/KFeRiKWHFEPh5zH/KsOqvbNwMyQQ3sB8x9GKvymMlIAfCn9ATS2nPpCw8Hl
L/krA0ARTR+lqnaH9JOaDdJhM5A55NabPCavZMzrRnJiHpNbGeEd5TSxtrrjvVMH0j/pB3ytj2mk
p4Kcm6gf2TQb0voF5wVWKy1EVlK4dh8K+0s1e2hnMqyhTgUfhk31JM35sPMh3XO8M11Xz4Z5JhsQ
4lcw/eOwaa13aNaVy8YSjWaOUm3qK1x93Y5rODBQO99kkqcnMiPcc0nuy8FETPKnqmeUsJn2gQaD
eaGG/2RZXKbU/dBcNH/adlFJ3+Kq1UA3rQU2WuxIdW287fC5+0BPqaZprW5tDgs0Bux3Livs8F2N
fvuuJF0o3FWP7up+eXj3jqSfHK9ftEvZdO6WPPpxqcrBPmkyRYLC61I1PKRSsjeY5CFsfL1I7szh
RExyM1UDyKgfK2gGkPru4/yHJKkLdIkIx7O2gpoEPw0G7LZuBb5/BL100+assekGYP5FNXEc7a9z
tjSI7p8f0pPxZrJ+s3zpp86Y+54+NTUzQPLG/SrZsUyPQ6VrmXbN7HfQETpJk6GrQg+GamifoNpv
Lsk92Pp6kdyEC4mYZI/dRnaJ3lXtO+QCqDkMuwZz1tUBcszPGp1k+/bvRngc2QGVOkDO5RG+QenT
AyRvYAacMkWNMXH+P+5Baacd3TG4REScidOK+1WArk8U2Mf0x6P6bzLJP79eJJd0CknEJCfalqxA
Mkib7g3WLtoYyS9fH5Kz5FQSyUn+yTFbC/a9ViRfSVxu+P8+ySmLlS9RysvLK3fmt3J7ej6tR5iT
JZH8jPbRa0byz/Fb9STeSS4WPQ9f+P371y6cOXn+wl+HfXceOnH9YUi5JJJjb58lKvPs80n+hmWJ
n2THjzz+KWV9n3fFiTbkx//wRVpbFEkBt2eIM+cp4PAfIbkjO18rkutxOfGS/P4GY13jc30dtbTI
4qhcO7Z+OtopHak/HTDo887DPwdaS1LoqZ1Tun1Up8mEWK5oLSXSIETSrVT2zces3HE+NCI5i13U
IiMlTf3vS4988v0nSO7GlteJZEu6bEq8JM9QIYC8AY/eGbRlZg7wrgL00WCAglvzANRUNzIsP7+3
Ldhbs3s3vGPIuACUkqZHOHaWUcjlo9eCdPOhJPmPCpaszLrdDmyUcdj35m6S5P/HktLtD6xMj3la
sO+YNNBkmQtw73x9/Tnxu8u6m5Oiw3674PNmkzyaFa8TyQeiVZhMbCS31TozOB5Q18OSHpZk189A
lrBLAOU1AqCr2tr9psfh+rTRrUyA6VfLfG2uW+eT9o3yAM7SnooVvGm889jJoW1LOwMN5FQ0XFJP
ZknWFe+2GoZR56FdrhRgWqjg8FUMV7h07S3WqjG46sgueYL9MC1ZaVHYtUVvNsmTmfE6kfwlvyZi
ks2+6g5TtWCWZuS8rF+5uwlgnzyAtzQFYKAavqdtdnnD95XWMKC2Vs2M6gPnL0m7GKVHOcDx7Uat
mi96ACsk5XB/LKkHACNlJJMdLW+jgvu+dL025Ak9mrVOT8tph9/1I3jId2u4C1BA0vaGad907WIe
419c3qMrCUyyJUfqkERMMnkCH9VspGP5Qi67YL75xNGIWPJVciCXFhghHZW+1qfwm7L7XzXDT3rv
O207ecGWMPymJPkySuepcyjgifEiIvMDhdi/I0nfAjBWTUr2HlmSIVp14GjhFA/upAMmqRWwRE2u
KywHWeT7vTIC70na3i/bm07ywpepttcoa2jCkryP3krMJPPRY/+7AQU+Vy9gnYpqHcCNABOQWisz
lGkx+LTybVAB1wzj9MliVcQj/JTpKynobm0rZn9J0k+M0lHSpIEOkjaQiXqWU9SSZC0tUl0WSfp4
iCS/oh30JcClB65AI40JDdYyPLV2tooDX2neH1JI+zec5LXPt/so8MhPc4Z2H3Tme1slA8veLRH5
s3cM7TpyR3jMkx7O/O7CC9yiX+npL0Fyc+MP/bwWmmAkM0jqzxKVBObLSxuB5DoCkMN2TmprtZ/V
NTWDTzWCTzQ00ubwuyT5MEG/kr64d6URktYwoBLfrqWPJI21xqsentWunZZNV+vksE5vARm01fDn
XKGDO9TcUwtHqArYHQ9JR7aON0Lzvtkk//K8GqFXG2e0OmfN9Eh1VZJ0+12w8dcZgK8ky5Si/a9G
5odfnRbsvo4Us7umZ5nu352QfpndOH/2mtOtqehC3nv6nfD/SD5vX/kFvvWV2g75YsRf3558XpLk
v2d0i3mRu//aNn+X9TEcWvuZ9r3Zh16Y5OI64chP8gA2qYS2A8WMQKWSOr9mepdqf8ruir+v7/qJ
9/xTPLnjNFfVqK6vIl/9W4yBd4HWMMkqexv9rqd3+oI5irBdGLqM1vygQsB+OQGlNR+guZbJN8/D
wGZaNUgfQhsjVLWPOr3ZJJ97XvTTUYeM7k7DZ6w/cORYcp+TacZLIe+Qzuafv538Ow6saP2H1Aeg
wCXrWUvNLsMmZaspaWaOQcct2uCUq3pWoNJZcMqeGYosC5cU0pDKoS9Ochd+ef6XvpydvOYCYdLt
EZGm8kdvk9nv+NSmuU1AQ9veOx8AFDcYr045XRrnJ0n7mudrbatbcSBEwc9+2mOQnE7bYZPSg/OD
AHftB9oY9FXUSKPOY+q7xw1kc81R273KSPnI3PTgK0n92azVRoluST/TUcsws12y/s7kzQtU0qw1
ygH8qNxAXq0BXA6oijZRKyxAP3RXS8o9elIAYI4avdkk36fic4SEqT5+hlKhMaQ4pt406GTzoGvM
98bGQVNqJ6dM1DE+7XVw3yMFhlkN+UWvvY+/dGJOXvJS86F0611YJ9304t1HL64nX3Wu8vzvHPgO
wzWIU7qUFVPvH4MfFal1VWpLKlaVgNTeny/6O9jaNaikqVHbos4UeLL/3atSBQ6vY7lk6WVHFtr+
4OYvhXxhGqRQ+yovTLKHtsN4jctWYb1mcOeBmeR/GfaF9wxe16jA8esAC1S8QPCtJzehovol827d
b8rwNGAsuQ5mtzaTr/ZnvQeMGrVxHZ0VntY6GRxpFBzxrwAr1WSLMoJTD/l+kKdytauBuSjmqxVo
G7QM05zPNKFfQFj99ivMxaZazpWbnOFNJjmI8s8V05O91q3ekHyGg+fDEmarduDFnYc/zVwepHps
DDv9OIfJYP491hrHb7oXbulaj7dLZwySHo93SN6OEZL2JXeijOVGfhoHvMSMr53pBUL4JvGZdMb9
kqWkUbJ0JxR++LNd0Rb4pjZFT5Uxl8+ka+tbMnMgN6UhzN3CaGkYOQ/o48LjOKK73uQ+ozsvMSbj
fwnSXZOk35MxSzMHXtAPRnUzY4FknBpskyswXYWZIm2EarppPFz1gQOSNIRfdNPKHUUWMlmqjHGp
pdbYlPBNB3TSfpl2bLlmMaZ/mqegW9I6R+4vBfpaqo2U9LAeSyTpZLH7evdNJjnUMe9zxQxjnXWr
Hj0cYbI8bK6VDUgPUPK+c/pwSfWtIfzpkk1bubD9h6s1mJ8UKG94V1sbOpF6y2bG6e5wF9NSbxYU
oaPlJWwXR80vkPcr1NP1lvFaoFTlz4o0Ii/2vJOfzXnNj7Ok2ulzPErfqsb88Sh1K7hLmsuYnfTS
YXP2m9LhQYPYdi4Ptf2k888ueRyT5HOBdpBprM+C5s6Q+bwUNsFwey+rSQBVNXtsaHpglkqQ9o76
QOZAy/F5XRt4f2wH3JGkIwPuSsGnjx3evWnj6uPX5vlL25sZuv9ya8DgSVk25sH7ifTg90Of1Z+1
dEIlU+fTV3d+aoYyeY18ockHzeueDgot3TqvkVOK3X6ub3ReuEyZnytmCj7WrXzcW5K+hUVp3rLu
OF4ka6me0yvyCc0kqSlHJUktrH/WoUUyhEtaDJ7rIVPvG/qOL73scV+u404mWltewgpnKet09vlf
2ZfGtqnoIknhpeGTbiZq3DUVlStgH0WGRwZJUniy/OnzSFrGAF/6qYpRGlsT6ZveNMIi6Vd6vjjJ
m0Oi1nJ0/6i57YhzsxwA5i5vpc4CkL6hHRTv7QKkily2sPv/X8+i3baemdMDuHumfnFv+zc7V6dn
8ueKmcwa64BnzmJsFHMLitphN46MlKSK1tCk0JVLxo3/+YiTg1GW9wy4DaSTJI2hEXZf3jMUvnUv
Y09eaVihntO+sllVSnFLkr6B7qpud+AAbYNx/2JslEjycEobG8lzOL0taS4jVzPwBgWNvQsAw4Vn
RfRS2P+f5IypXsnj8rlxBOeSvDqf0UoQ9jwx39hIfkhxY6MTRg3gsPtWyNIySdJjt0xRhQWaoKsk
/QmcS0/vMOkrNr1PseuSgkqw5yVIDsmZ8e4LfOVWtuXsjB6yLol11+0jWsXgULyiKyKUl7Ty86t4
4yXpCxbOY8wKm2fHGiDzIUmab9i/XozkV2z2S7Yv27hxy+7de44ePXr0zCW/q38fPXr08M6Ny9du
3b1548/7hieR/IzmbUyJ/18bxQZj44HN0PGzdY1kaIonklSbdvSQNMo6bIZvDzDGsreMvDC9MHPy
VF7aS19wKLApua9L6h5rCOGzSJ7Byhf5yp+zy7r+YORZbg+tJGkW40Jtz6GtOeSQVMnlS3qbC0jy
5uhMpk+wYbuGTm1NaY5KOpF8RbyRnBT99I9JbsSJ54mZzdfrxn8z6TvdtJFseYcFxrt8bKCC+5Lj
bvKUfwfOc0hzW5J0gRmSLmc3rTC9ZVHA1+Z0vdmgO/nxUT+OyNKT4mFSdzbc8v7+BUkOzl7shcry
fsNYY6MIDyTdS2OiriQtZqicc0Tv+7a9nzQWTEfzON/VclMhzWD6RCYbR1cxSQtMnnckBSuJ5MRP
csvnZEI5NrZNEWv8gR68Y1NV/07mekvSBXecPd3J8bdGY7bHdYfVtJei9JHT89IyRHWo3DQTaQ5s
YYp03FzA0o9fJTVhk/QtSwdS7wVJnmOz6z1PjbTL4SfJN0dGvpWCa/MxJY11so7K4xT9YejPt1Jo
NTI/mkzOSuZke7SYkVv4SJJ0Zg6zpCF4B0hXNgQmkZzoSe7wnHWzduDwdrPRSzcujZbiZd8SSdIf
nbw983a4J8nn/bJtT9qO9gVwmSz5eUPKjtcUOOaSpMZsn8AiScuZLB0qdvjaUL8XI9mSL98LVkrv
TbbhE5s6OC5xduk/siglniTLIkm3KasPWfZd789bdbDFT12xT/u71riYeDegg5nC+6TdNAxM47Q6
NOxEa1N+lkph9fA6c8rtmSnHkkhOPCR3N9ZHn9meHDns/9IXDl/cpsFgwwX01sVI96Kddt/fHvJI
0o0eZ551auwk74hYun1eC+tjD+TYovUpgHoPlS2/JKmC3fGImjMRRhkTKXDZ2JSJenTdIik0Z2qt
ccDZEYp+zUJJwS0wDcfupySSEz3JXeIzdcuj53eJneQa7o9e+CIXVi3eFy7pjs+S45J2HZMkXVwY
2qN6+7m+B44evR/R9ccahT49oZAZEc/q+RPSXx28322xNPRYYcMK8tNg/3kHkvTk14HkxBWFESvJ
J019E+cvnERy4iG5p+FilbhJbu98I4nkJJL/P8ndXsRT8v+2oDgn+a5rWyWRnETy/yf581dN3bK5
SpyTPClR5UxOIjlxktzLtij2T9vHDeKc5JKFX03m/GVJJP8X7MmvWEupccu4JvkCg15NZr26CUdy
3szglSaJ5LgnueGrZtauXjeuSZ7AsVeTWapWwpF84gpp1TuJ5LgnuTzhr3aREs3imuQPs7+izEz1
E4zk9JZ9jp9oaa4kkuOc5KJur3iRXB/HMcmW1K1fTaSfuUsCkVxl1zmFS9KYJJLjnORcGV/1j9kq
jkk+82z34BdrS1gUryRn7//DqJTgtrtpo4thunxM13umSSI5zkn2yPuKF/GIa5KXPd/v9P+32m4P
4pFkh2Ehkg5ntxus0djfu+vQJVoO2iSS44jkIMq94kVSNYljkoc4hL2SxCNxp1zERvI8netYbL7C
/XQ9NxV0dslNPTieRHKck3w6Mo/JPx2T49p20TT/KwkMLeV6OR5Jdrp3PAWYOx0+vzS3+wI/SQq6
6mtKIjmuSV7Hq/rmZKoTxySXebULdOYrxeeYnCF9xGaah+FB7derSdLKSDyQPJK5r3iRbBXjmOSc
7V5BnH9b6gTHK8lRW4awrVx/6JhEcjyQ3ORVp1PyjGuSM/f5p7Ku+g5KQ4O4zNz5HJIHqENyHYjt
iJtDEsn/LslvO7zqiOVZLo5J9hz+jwRdHp4f7EqvtCjhSE7d0sE8+aMoO+qOGjb862Fjpi72KZ5E
8r9K8h1771e9iId3XJM84h+IudLczlx1yOqrcfwLv6wHUScfW0si+d8leSVDX/EawdSPa5K/fHkp
OzycvrwaD7/wy5Kcv2aVUrlyFyxYqGDqJJL/VZIHxpoH6GXaDdrFMclFX97N/leXAmfi5RdO8upM
LCSXzhH2itf4g35xbU9+72Vl3PfMdUtJJP+XSH5ifuWwopXMimOSh2V9WRmdXE4rieT/FMl7Wf+q
1xjCT3FM8s+cfTkR+80TlETyf4vk/UWDXp28S3FMckjKqS8lIbxEsdAkkv97vnCv2nJkVxyTrCYf
vpSEtf/yWyKJ5P8Eyb/TNs5JXpX8pcbYGuWVACQXvPQvtJOuEa5GhS/FVcufRHJsrf+/nfglFpLD
yl9/CQE3HZYlBMlF/xV5jSMGzrh7ixZ+EZKDL/y2cdaMG68byeFn/qliGZ7RIzTOSX65tt3JP5GQ
fO/33478tnHH8ulfD/98+ovJ+zvS2+huvJMcdP3X2YP6fPRh9RI5rLV1Pk/UJH9bpETFiu81b9Ww
WauPPqhUJKtn9uxOvP0Pr7DpX14X+Yckh++NpPeqt1/Cknx60ajOzWsUSWMXTTl9wQyNXSJOOBiv
JPt+UbOAXQyNum+iJjl9bJMAh4B/doXqT5W4SyCSP8Wl8eagBPiFYyH5jlPEr2qXu0qTFh0/Llkq
O24vGMF+3t528sL4JNnf+i5IVfy9hh069h40bfnPf94uSuvETHKgHTX7dfy8Y/O6DTp2/GLsvJXb
dh9bBCf/0QW20kiJgOTwzAD25RdfSwwkQ41eo+buuq8HjyPegzi+qMT6kR6h8UiyxY3Oc3efjV5y
piZVEjPJT+xjhirvhkP/6AJejicSA8lbcF/bvihgV3na6YQmWalZ9XS3RRD4wvqatX0Qr9pF+Vje
AV3Il6i1i4yMiIXko/9E/ko6Kz5JfparcVWaStr0DgDlht9KWJLLGLUJo7ZfrbVPX6CFZbKSnC9e
Se5B7xgdB5MlUZOcz6iRJ0l6fPnsod27Dy+GaRs33ntZ8dfTJL8ZbyQ/mt84nbt3l+/2hETff+io
5RjGonv4vj65AUyVZl5OQJKb0FrS+YO+vr6+O7evXbNmz5EJ4LN+3vgnLyJymJVkl3gleUmU+uW3
jvmunT9/zjAvnD//uGVwYiU5sCiFWjWsW6NUvuzpnKPN+rq9rPj6TFU8kbyimqvtNot+Ni8qzIVx
yopHhGfGgR7FzAC5ZyYQyeFHK1GwU+M8sc2rX8jQvdfW2y8+Sb7v5Pj7Vp8JAz+qWvSpIq8nEifJ
H7s+exGz+0tKn0KV8DgjOehhlH3BLQFyRNxptSiWFt+KzoC57vjfbIOH/5axnyTDI2FI9o3xA0cY
I9zyxFxECl7bIpuT8zt9LkbuCs1i7X8svki+Nm+gd62n7tslZdpsGSGf90d3EifJVQHSl6/ZoGXv
LweOXLhqx+69J67d/MmZ2g8exiriyeLPvIvk+6DLqhg2rrnmHHHxJa0k/5EnytStNxQvmHZwHjCX
cgH4NOodpsfTDOBS6esDNm/r+U/XQ40vkn8w20OZ7mOXHrx+4fr9R5IUcD9Qo2FHSMzTl2azZcno
Y3sOf+lVwbpvTXyRHLmw6FiweqshMzcdvRAoSQEusSjPEe3y+mlzfwtIMJJ1+4Aplpfc15hi9Wiz
TPKwfUmPgdG5nWTKcFxxR7JW5opYot5vwj5PPt6+WwJch0+rZMYUpbTqUtxuhfzav5IbgEfTudcl
qTrDE4ZkhU4nWczZQ1AGYiaCDGoFFB04ffHwvFDUOgTXjsBqfHyRPCiNZxHou+vK0/aVBlR+hoy7
w403pFvtVWEJRLIWEsv68l2IrTTznepg59158Nc9qjhCsolRbAdzTZn+VlySrNFFbFPQD8jTDzOc
flgUM+3Dzr9F6shMb3WtjIQeGVvBEXDoekon/6lZ8V+Y8ZWmTSwdOxMzk2RLeMuoZB861hXHxdap
V1wblGPTk/vhHMsi5AhSxy5iRSqANM4A+X9JIJL7xWrudqDftPwpyn62IOpjGfAO1LfGw90cnh4a
R8xjZ5syxVGgXOSMr2tZYwn6iol5f9sB9S13itplMNcLPmgXmVcoyIXlkXe8vjqA2QOX0AQi+bIp
1tDIufDthO92RFXhxkLNCHxOemM2BpOsERkF4pHkslSPpeeC2L0/wrqAudX2e7KcmV/FhLlbSIKQ
3DqakmmbuUZmru7pF6Uro6KYwTqaIszHP9vH0YgcleSw1mX8JOk7kvmrE0Bv+RXF067yoyJ4BERO
9aPZj0+Negsgy5zbCUPyIlLGNg9eavUKqLE5wkZholaUN3N4LzzuS9IE258idzySnDWmDVzSNvhT
D3du3nsy6leyNINSEU4Kh8tAeb+EIPn9KObkiLY9yqzVY4F15zqeUvh3Jjc1uCBJt7Kk+EtxTrIs
HXL/LqkNdaRzDgB17jz8AGiQB1Zb+8+L6f94tQ6Ae3iCkNyVqrF1HBuZHfy8sacG7tH06fDS1Fh8
XbprBuwKJ4eg+CPZTGzOekfgo+L2AOnHRY67o+HDKLcWNtqRFglBcs3YSB4UmwdUIYo99drY6EAX
SWpi3qi4Jrlt91X3tLbEI6kynSV1BCDj1Gv5geQwzNp/AAVjyJgFVZy9E2ZMrh67K2SURETpVt6S
dNmOp/J4HAIc2oSoNcDOx6MGhccfyY58G0vPw1GgeM+mGB1z5L3oXOx0oI8l/kluE5vTT+loJJuW
S9JPsO/pfl3wlKSpqxXnJJ/uX8Dx/XnhkvIxSdKjTBEeZm0LQERuglbEDBHZAOdvP0kYkovE+p62
ZI/4cTHhvlcah/3T5qLudsAgHXvazhgPJOeO9flbY73nrJXcIIfV7d6LDDdjYMG8hJjxvRuj30kT
rlGdU8tKUtNYcoRvhCNxzEYU7eJQvzwPJHkyTZK2mG2qfE4AW/h0U2IWdloP8RTtEJPkwlHnFhHt
N8g5d34jp2o5cS4J2QP1HjEzQ/b79AMKSh8AqYLjleRGlIil55RoA1z5UEnaEouZK6BA3Pk7/T8r
XLoY/QaCJ2B6p8si34WA/V0pPAUxY+0DnBgTfyRbWzYmSpLmmKP+rt9HmLJikjwGh6CEIrkgsaWx
a41rx7Tlek1tb00H98sDx1g1005kkX4F/uUsIs8jeRrmWKxwnxqrNu/17F/O3rbY/iFFY2oSHckb
/yQfgacdHx+mBMgz5Kyk+x/jAFuk/cRWcrIUjeOd5AK29/VPeY1ajV07RnEHaBxLXrrqvKeEIrk6
TWN2O2ZPq4EtDA0jZ2Zgxmq4GIu8bnhKqgDkssQnyVdNsT06OQD7Pvf9d4xpmRtoIOmGObaOQ/CM
f5KVnbFPdRuOnXePLX8e2f3juGYpAZgmTcc5LLantEK8k1yZ9rbp/S/jv111Tgr2InPkaFf6aRFn
7ZmcYCR3j214qm5T8UuNmJgPB1f6diWXYiU5j81kZ0Q/HQuJF5JViYIxJpgHwBUKlLG9C1Mb+uXl
xEJyd7JH/3VuuVN9R/vI93aawgyVesTqIFuLSvFFsu+MqX9bX11Pz+n8k0XaB78ig24viqa9tyHl
/Whz65XxSPIGTDFSJm01QGi97Pyi8pBsdTHavRt7itNO5NbNFn1TAs2lkwOyMiR+SN4OM2NXLqK0
+9IoUsYism9EDo0bE+7GG8mHTdEnmmE1ot9uxQst6SJ1jGVmKOUmso7S5cmP45Lkv07sMPLGLrGu
pEYGnU6INCcrPYxJwWdRJJx2jLbQu/kd7PbHH8mBKWKoFzczAaT8rLW3G5jqnFVueuYl1oJUVfHW
PuPvYL4pdyBjaLyQrPdxe0rrPOEA0esInpQ6Els6hI+tS4RXuzrEjfoZe5aABqS5Fn26lyNttrfz
pnMBU6HeB6QmdJJ6USYWfQ82WDcvtDHHQcRIbNrFRZgn/V3fzjZFCs0De63bD1KBa7RQIUtV3KKs
+m0GqGKJN5I1AlP0ZHuh7+FYzhb5W+83KcieKelidXF64MRg6UOsKt5MM7Ayfkg+60rOaD5i4d7G
fM+WSyYXHJOa4xWLyDzWEPscgN2xeCP5hCulIx1w5tnjZSjEgdfuGjP+snwjjYjNw7cTyWwjowfg
eD5OSfY3tKCeUCgsNAsRqQy+JNISNNBqt48EuXfkqokk3asDsDb+SA7Ki8vWqH0+g1E6N+MT70qf
TPaTpD9gl0esasMPsE+67AJu4CXNioun8Bn5LmZD0SgjnKU3eE9Z9lvA4bHtPny/cd/Nl+CM1DK2
AW4f1tDFWSkgVg+OuCFZ35l4yxpLHTrCDtPTKrwHq6Tvn/JnkKSLzpEG9AluEBdvkkiSf8000Gpn
g2GngSJGj+9NUcIvBjzlN3a8ItSIPg8YD6S9EW8k63hyXGZF4BfUBZpHh3EyTgHZY1tq1UdkM+Z9
dL2wYL8UkhHYEj8kazjkPBfxqTcQzcttHfhLw2Mx4qoOGaxWzysZ4sYd9Vk5iCaYcOnxe6Dl2vel
IcZq73U4Il2JJVinDskiiTiTEuKgIHQEyT+4Vjcu1ggwtQCrMrPDBYgoNjkOikARKyoBY5yg3lNu
tpZixvwpvkjWL2mhqtVV0PctKPOUP3olqqoKDWJKO+9IT0m64WazbIyBWF/ncUGyxkJ668vrYTMg
T7Sjg0gnaRvEcLk5YGK0bXsW4Hoyvkh+0KE0gMkal/P0wugy3IIlvU2zpw5shnFRPo4B0t2MK5J9
zP2tVsBvwQmg1FVJtyc6AZGFXX+GM2Wg5z3J8mfPNJBqWoy38X4z8EP8kay+gEOreb7bvykBNH9q
lea0HfP0BVnDYzHWOZ+z8mA6ZShYqYEd8UPywzYNnaHy1L1/7RqaCTCeqohWghqS/F3p9ZS8sHdJ
FzH3DysPlLHEE8lN4K227hEz0lYxfs/akjQKl+jqhX82ikf99YPfBmrEFcmNZkfscmaMN1BleO+m
Bazzj44RdgE4NApwKlgqLWDXMrZkiD2B9DfjjeQpkDtd5Ix/+VPntMP9kf62i5kVYwn0t75GPLEW
IZwElA+PF5IbwKoSUU0VP0c9eslsmOkaku0pa8qgaF/xtLuxHhEfJP8IvKegAwsXb0gJkDb62/h7
60rwLbenbBPNsNsbbcdvDsCSOLddhDswKnRoZIRnqqhx4Bfg6ChSGMZ7jx6xv9eCCoLxeMYHyeeS
wRD/iUXswb0Y0OGpIdmJXpKqUvgp14qHGchu00MG2+IQHycHNsYHyb7AivBlVR2B9GmBDNGIHYzz
HUna8zSna0zRTeOzAKfT8UFyeF7AyfjJhpbh6SfovAd5ja/QC9O2KAdGEWOSMhyias5xRLI8GCLZ
k9HTFexoXjhq6r2LsGsg3tend3Hh2fGT201gWhs/JFu8rVPhwEvXw+oAyaLNqANKkuqmpFOpn7LD
WZpChBv+kywmq6LdC/COD5KrAyMkhVw6f+2eG2B/KspRP3dbkrj3SRXVsX69G9miWe9CiwCVQuOB
5FUA/GpV8h2AdFHu7EIeHK3OnA+ykTwikMcy0ozX0245QbmAJnFOcgqmKQgWbzFDJhZ3zB1Fgw82
s/hrikgjgQX/V2rmgHgheTmAs+HNuwGAUlHS4AY3gKWSpJHYb4oKck+iBgB+Y3ssr7gBB+Ke5KNA
RFb3yfCU5tgZlysR06OGkfuXu+D+VMjkLtO/7wAVG8nhhQCYKylQCn8b4N2Ixd0z2aMkiZwOLmMN
1cO/JeSPmaVqI0SulcQhySN1H2amA5atvifLqShMZmPIZPJop/n/Jmq9kzxufH5jWeMz4vB8JEln
2yUHKB4xaIXUhh5WAxIk2xn5Z+kM+aJ8rTspc0X5QerHPcmdo+aiq2MocpEOOqttlAc3jmr0DOlp
InOM2N+OQMpLcU6yD+QD+kvD7T84dd8lOUAhq6vAMndMg209H5UCyDnx1yPbBqSHIrGpEQ2ADLfi
mORM9NN9yAJw4en+Zei3lmQPc0EsBvDINgJg8D++q49ylWzayLtwUS+v97y8vN9Z82ySp+OcMVI5
fmIo8Dmtz9jR0vCBYe3eYQ84zbBO5s54Q6Gob3PNtkWHHzCD6R/PRrYWzv9BixrFC5f2qlTBy7ts
vdBnkBzgDpDT+un39Ia7egfrI/hjcnIZo10rIAPUuyIp3PddKHQuxjVveQD5/rH/xZpCBWu2eL9Y
4TJelct7eZdp9iySy1G2HlBDs93+1953h0V1dV+vKQxVKYoFFcXEGjHYG2qMxhqV2BWxxl7e2Gvs
FRvGHkti7LHGJPZYYnujRmPBjmhEbFgQBCkz+/vj9joX8Ps9r8/D+Qtm7ty758665+yz99prA16H
1jK7Vbfhp2JOLw4B8gjJ0W5A09Z8ocYg1RqMG24AQrLNv1hbPrhVeOOQ4FqhDUND69cYoI7kQEyj
hywZQcE7roKx14CGAOCnE/zJDAYAtko1OfrQj8vXbjp42WjV6k0ZlaajJpIzAjHQH96omBjW/G+i
N+3yugKAtcehk6cWN7MAQ5jY4t38gFcIUHPHY6K48W5AI6meSOq3bLdiewkAPsz3djw8u2v58o0/
n7tjlCLXVmb5PQ0kb4PFC7BwP/JvYWyPiJGnH1zaFWaC+xluIYHr01aAW4exnf0AdFFrIPAjAExg
zX9wesfy5Zt2nI8x6jo3kpn8Qh3JV4Afy8KEQnesAFCDLo0Xf6qGoMay1wR8R6c6+wHI318rmR4J
gONQZsb++fPylZt2XYg1anRZqc3mDFUkl8BseiBtWyQOcw5PZ2WsP9e7VCHAYzJR8onZrcq78Fd0
/ajl+D23nYY/V8pu7hpNJG+C+Q+gLVwXAAUyiejZRQvMhfjS9U3s8Q0AVEzpCgDe3gBsi7VCbecA
uI4gurt+YA0htmf2q9VrxRmnQvn2wlLDy2jNyfXQsChEKiEzpZ8ryXjqR6wA6pF9JqsoWHyb6lUX
AqgbS45ba/tVE/pzm/OH9vn+L6ez3jtv6aWr2tWRPAZ5UgujItANANCSaAuA4aN9AHgIJexEiYUA
/EJEjrhYnfDEWACNH5P92sqvqwgmmAt+1u/HC053WI9l/QAaa83JMyiWOUIZSmuAPlQKMKsExsUj
Bi5lol+ta+mqpogX2HPaGd1qo6qyD5zUQrI9GG2vA/OBuoBHOhHZawLVE8YVAGCrvThRvC1sQnTw
MzMAWNpo6zmNRNGuj8+PKKHaiqDRiI263YR+lX2ghYaffBRY7ydW8Iqe3BVAgaZuAFByPjNXX83L
KWE+jmpVs9HowxozVgd4TM84M7SImsmuTUdu1XVHN8g+0EXdu8gohJ7khpFsdWRILFF9AN0oLebM
JcnKPRFwXrR3zh5q8Vye8Ue/QmpGe7Qas1NXCSFSRWNRBckfYQid02rhEoH6jjIoHgTgW62J6TXN
3IvpmTN15B3h/uVuzYqp8/KDT2gheQ8sN08DBwCAYXeuAlCb6N0/F+4KU/+7kgAY/tiTHYuXbNOe
Kvq+aOCRfreyXnu9UtO0o7dNZcc200ByTQQ9NgmrKxElNgNgTUyPPn6GA979ILjwm1nt8duTykXS
r1bQM7ncnFjNj9eUHdtJHck7gAvJQCXA1KwUmhLRQwuAporzxecBYHqlZ/HeW1Tqj6DyGf8tpWOz
OWRhnOYZSssOHqqO5GAMcHwJPuQiHZNRegNQB9Cs731Ko2jwHbcyz79w0nExXy8N2eVp8iN/00Jy
U4TRFmA9AOAAET3JBzC0IPFg1u6ZTvc+l6jsytlY8l8nhptqbVA/wVub7MhQdSQfBtafEzm3RM+Z
ZUgsp5TyKcxdACe9l0/R0si+2HjQicnmehps1WcmlWVEBcktUIX+BQD0o2KYTESToEpSGQQA+q3P
e0dQx++6mvdvdGK07Yvf1U9wV35kD3UkV8aAFewRyo3yPPj4AYAncEz9MuNP7t6w+v5SS4nyTruH
un8Ta2Sa0ERyggX7aA5gAWoxK1pvAIBst3yZWRw2OfFxKaap/dtar4Jd6jvve1p5u5qzvwfGkNwW
pTJ2AiV5WlNKdbibpW170prBtLqlymMpGifSqPXtuDnxJSyNTE5NrqXaM309DCH5phUraDUAKzq9
sGADUVoAAFRRgMwNgF7DlPuN3h7L+2bV4DsBbo2c3+cGh9TO8Z1BJFdFfQ8AMMNd+XMtZz/bBFBd
QVbfS+uQeIro4U/duud1bqlHV2WfimcW+VF7NZC8Dj7p1IPx6IELRBeZH1S6LDmqMWc5o4/ka99S
+6XxexyHp9TyM9DDN2SJ0jvqKz+otiqSn9iwkJYDg1GMiQSm1IX7QkDM9HZEAEMSbOCLKtXG1DG0
YwJR5r4pbWoZMLnaaqWj3REqDpESyX2Q/98IAMu6ov424DbRT+oSZMxiPlDb5rSgydRztj09fe/Y
tlUMGF17Q6ZTJw7d1JEcAsACoLpa28C1AGoCrt01mmKk9Hr02kGvhnvVftXHUFdncwe5duNqxTF/
aSC5MzrQBleY+jREZeAoUSv4lQRwSxmkAsy6O+ITD2joxJQndLAGjhwtZMjy4qtk9zhD0bHuK1Uk
T4L7K5qMAgcBtIgnoqGw7F8HFBKYWjQVaJm5AlASn/jxw683vK+Rg/ZUMp/b52fI5FLrZXNTSl61
6U2B5FduaBgIABdnIagzgomoHkoBKCSz6SRzktUaJo9rmU5r8zxNeeHYWMZ6a0seQ0aXlytbv1DE
EUarI7khYBoNVGmiJl/5O9B9NhD+pboe22NKekLJ4/I23eGV32iHco/h0iBpPcURSepIdgRg3lQT
0JdJxByjI8AsC2RiTklMWZ8TBcPfCk5KO0EnavrM6ulvMmp58CnJOfYpDpigiuSP0JVoLAIdA6yA
39fnD5kQSQPxaTPhl9hrQeUUqqPq4HHjjH/yczv9EeK/oEMBw+3gq16QnGOL4oBINSRvBABLBBCz
GhYfTCW6CXQEFKt2UxS2ArigYfLvpgGOtLabaXf5wBXNDQME9aUc7cWKAzaoI7kF0PsgEFlGQY0l
opTKXnEj4XorWM45Y8b0+ZS6uECFA9uLIgujwDJRdDdB4Vz4a2SrTwLFAGAO7QWA/2YE49MzCmLn
eCAQ+arqqsq+uJ8Yf4QuNrONutXdnAXDTWHRsrSxdHyvhuS/gFYHbw9DINHTJV6AxQ+eR+lT9PuG
XwRjvFDhOcWb9NJPV648IDrzhdukmx1MWTDZ3DFGluKVjm1qSG4LoPCBk8CDA2BmiknwnCtKfrEj
1oTuAKwaiSSHY6VlFdGx2p6zrzTNCkAsvcXhps8U759SR3Iv+Cf9APNNq3pdTSZ1wCDyV9VjI8rc
UrTY5pg2yOIovkvuDIjnEQ0kDwSA1sBWJiUZtwbYtQxWD0lh4U1XfBWEvhV1+yR3gmX/o86WnnGr
/bJouLU7v12wBynePaCG5KMcpoKqtBs240tfAHBP9cDcWfDl7cHfRDsAWIdrpaGX5tkSG2btE7c4
bxZNtvXn17g05WR+Vg3JBYCOL+gkEPsIgL+d0gqg52goyFbz4DpXNaHGjP71r9+8f7OJbdjTue5Z
NNptGE+GfqZMUjxUR/KDvofpUaP+MZo+WsLGZ3azRhBujc+ypFluyPpo8JdW5EJknxTJFQHLunvA
30S+AJJLoB71QLmKPMeM2dJ4nQR2N8JnehnnZ/GOAi2vXq6WDcPzT2OzPAeU711UQ3LGxCpiR9wG
AC73gD2/A2zNcRPAO+LgdKYjwzcaaZzMZJ8Oty5WyobJBSMzRBkj6YhRQ3I/cx8H0Z/AFSoIdCfa
Cfy3MwJE8hFERI4gtBsMAF3V6yz+bV885qlnj9jjpbNhdPGV7MK9SPleOul1ljyvIh7Kj5caNYVJ
i722lkO2hiXsHyKif5VLZVt1JL80wX0b/Q3EEtUCApYCx6k8InqLpUPirQgaBLxcBYTq8cXSUj37
DbVkz/ISUanMWqYY0VpcuEdnGiEoclDLsmxJXPU9QHQyL50V38EKwIJq2762AqZmh9R8jHevXUb0
MWfP5FIr04lEvY6FEacau7ATEZ0F/qR6wFai5gih6mhnlTbWe2dF3gIoUxLIO0EjV/A23jypqyl7
RpffaFffR1lIF8nH1ZuhMCMOOKzy8kX3Pdm8tQBg6p9ENEf5eg91JDvCCpwjugicI5oOlCuI5pRs
wqJZcBOiTXctAOC6P2m4CV7j7mt9n+SI7h2zbziCTrLlJrLxQJPVSaOYKtj0k0UAoNMK2N5RFbTm
F8YpQQBga7p8eH4AIT8oAmiJYUOb5sDkMueJntuUr7/W4sIR3QZ20DjgPj0zYwkFo/fHsl58kQBQ
8+VwG2CbohalfVl/Zq0cGF35KtF9JcK89ZG8Sq+U+5pa7unZIlspE3Iyih5R2YOIYlMqTPunwBFi
RJDNF+kYcEpaj7zjm7omAC6fFARgqr1ClfBxsXT5nBluGpB4WeUMz7SRPJbVdrtoQkS9ejHDUIRo
BmzCsmzfz/56o5v6AShcq7nE2btYKIcmW0Ykn1T5Iu+0kfwU2EQ3qwwlWgXzEyqLAfXQUjb9NbIA
nv85Fu4J+C+W3+vMMz5lc2a0dXr6b8pXC+kj+UugqyaSL/AuHRERPUwicixtlQ85Hp1iJrrIX5ui
h+Q0pk1caaYCfDG80l9ZZFyFGvDl+00iYIZ4Yk6LIaJrMy05tzxg++kQBSxIG8ljWCR3QplMIgpB
S6JYs1gb9xmwelE18S+/kYgo9T4R/T3F7Jdjk4N+O6LwBJk9pzqSX/N50s9RjygQoyNQTkn7BeDS
43hHG1Caj0m9iSOi08Mtvjk2utwfuwPlr1XURbLDF+r6luzvABH34KA1756kyfkKak9ZRQxbmnf5
KXkMYJoeklPdMIWIdvnDdIFoACoSleLqRFg6hRvmJO8bG8YFBs3h/LqXGYqwt5s76wC5gKthyxtf
l4fhzDpIbsroW8S7MjfSBdOIKFSszfoYOE0U/11NBsz+Ie53iehdCMLT1rVx9da0o5DVsMlhN7rJ
XvHTQXIiE1YkemzGGqIATJkNmyw7tBfoMtwVsLRb1xhAaaZmOfkj9MuMauGnbVkx43N19ztyFvin
uki+qEdpsgeKM5IvAgCXGWZtW9rdtotIGK3HuOhaWn1fT5siKquJ5JOMKuG7QviKiBqgOVEHTj6J
GcO5IMzdKUEMY+TjJ0ImDfWaaRsesC5NREzwXPqpruGe0zd+LP24DpKLI286EQ1B0XQiessohKyB
SYj2ngbq24mILk8pAwA17rHfBs3qa5tcYku6iJbgs0I/TJB3/tri0l2VDpKvMkRgohWwJhCZEbkL
eKTgW3rFPR2XBzDV+LwQ4LmBWDpMRDlto0v/Yp8u/Fd4WaB+tGhpVEEFPVkbyRPgYoIG54sOAcjD
e0HTAUDbTPNcCQ22ZCKd1w9vWAadkvDoNukhOQrAz0RbmdBmJfQiWgibODPdRuBZ27fUrZG00Ks4
G5t87alveeVYonQBnKsoY4T+hrb03kjx+llaB8muwDGiODemei+Wqc9MchfF8M8LzNnEM3srMMVk
8S76Jtd5xGU1weSI3vXXn+0qHJwq3qpW00HyfsDykIioMWoSEbAiRlSfzowZDBf4dWRJAJ9OsCGI
iG6Z9Y1umkD0wof/+Y/Ri3D9abnmn2PFAekwXSQ3R8NgFfopM/qJg+IZxfRnqq1ERPaPODtPEVFi
PScuxuzNZYT/DuohuR+A6kT1mG4tRTCY6L9SHaGeACTN2G9d10x7SsbnSZJMTTMHEW12EptptK+P
VUIg0kByBoBJRDPg/ZqI6AzwJ5NMExQNrwCw8VP0k9PqjFfpaJkq+V6diIhWOlm4vzwQIXyp5jpI
/p5lVCS6YiERWbDJ4Stv8zIeQN63RJT+Uzkg8nL73zmmp/bo4SAiGsf9O0yNSy8f7feFCV+rtx6S
k1yxaDw8Nbokl0ETQURmp/41l0npg4zsz7OCTizNM2El77xe1kNyU3gADy8wfnuaK2YQZeSXFMOO
ADQE1T/RDxOzmqpszZg/w4Cf4syNC93ZkvuzjTaSHwNoQRnFWV3nHWy8ZSdMvBrOLQBy2We7/rpb
gd1fsXNLIKMRO8yZyY128QTLXjpIHg0gnMmnxBBRcaygqvKyosEoYGU75jhmshq1b7x0r85G/+NY
z78cw8kNd5Z1D9vFt1Abr4fk48C5jfK0Op+rAdYH8tuqZrpXLMkGQlnyfrlUzeySnIN4cjjrLsfq
Ibk4wk34sT0C0ojoGBPoaCVB7mhAtYkL/aVvAKc4ecIMEZkjw2nix232vrKiQLg6kmMB+GZGM8kT
ojWwpBIRpecXIrRXgErII2VXOWHUc8x0JlJlYlf+1OLOTPZcvJvdZn+jg+QIMDTOHqhMRBkBmEfD
ECS9p6NRtTP/m3OkPf2Ln5IcZmH/TXBKlPNZt5HdxM/WQ/JCuKWfA46qInkXcOtrToAqQX+95UNo
Vz0B2LgCr7QAp1B2WXSvFQBxM2clku1WRIWgtSsjXzmXmUkXwlUEgLkI8OZ03yRjqP6qkC7h8EZw
/y1wvruu/nC1L8CKSKkj+QYAXKIZcznueFFuTsvH8Z7vAQuFRojM6KYfCcyUMHj5+XySAbpZ/Hde
YDweTSR/CQvwhKgsxjDJsSr0HSzSmtfpKH5Grs2nP9XxcTx7HQCCUlRf50a3ejLDBoBJNGohuSMq
01sXtqGZfHyNUrQBpnjee9IZfDXpOjOAgjzfb4iBeMtnB6I8IW7Dq0TyNeDgZJhgfsg498D3RNdE
tZ5Ei+A5CAFKknmGPg2ZvysvygPAQC5hHGcgXuQ/5c9aYFGojuQzgE2U8ongkHzNlO8hl+YDDnaG
i5jR+NZb97K81m98SUD0EFw3Eg+fdawSwOrZqiO5HMJM2E7pHtjNnvT4Gbl+zxQEUm1pn6cEfRoO
vwbdLQJAQNwpA0YHRh0pD7YOTwvJZdGHqJKGDnIdRNAVrpFBmH5ShqMwPfHIG7m5qbl4smQBdBq8
rwFYr2VqI3kLcPccUH8M67+bUIGIgsQCSHNh/YepW5cOJ+V604RnrtmWafkF9niwEcNdWrkAg55r
Inkz0BtuPFexEl8e8BdfuvUQOPTYWyLG4MS5WCxM3W22TvQVpH8DjZjs2soCjHmpjeSCGB2AcUTR
i+wsG3tM2sziUuX02ShAG4Abopd+1r8sb2Qj9Ng2Ko/pAJfQ8DFitEdLEzA3SRvJbz3xA1FbdVns
DHfMIYcvuxL5617JN4VfLHYT0RRewSxG7eDAwMDAwKDg4ODg4ODiAp+nnjaSJ8GHyJcVms6wIQy4
RDSKCeVyEQpXqq/SMtCJm8A1+rzrXiuT6FEA7xB2UAFBQGBgYGBgueDg4ODgcsLEaT2uheT1wA0X
oW9oPpWOFv8Ch2mphG7mxE3gCoj/cWlMRDF+/GaiiYo3z5hcPjg4ODi4rOCUuv6theRMK5bUFZTG
VgMIVtzU7wFK8pL0UPqPvtFcGO+gqT0RXXSpT5qkSLgXCQwMDAz8JDg4ODi4jKfg59/URPIx4DzR
UJnOurCpPkLUlOmWGuds/1Omab+RY7+NKuFNRPQE7e7GP46NvX93nQoepJGSzKT4e6c2L5u3qoiX
NpLboybRZ6zC2hngqBemEMX3FpVfLoGZDqgwniKc8BL8a3QdNmrCjEFMBrkXTsc9+ff+vXsxKls+
WSfbtPgnN7ZsWrCkL5ZoIXkBrNQGH7HZ3HdQKbiPBg6QvRJChDKEVk5MLliz2/BRE2b2YkLwbS2X
4p4+uB8Te1tlyyfTRHz36PH1zT8tWBqBn7SQfB3YOwmenHzNRI4iJd/dEXWU4MZJga9LQJ2eI0ZN
nNmeKTyu6XEj7un9BzGxN1RqSmSaiKlxj6I3rVu4rBV+0UTyfNjeEs2Fq5pUzwy4JhOtYBQydxlN
MnqzWNMZV2937DsrMmrRopnjevXo0CmsVdsBQzp0aeIToI3k0hhMNAGuaQxmXd61U7SE3gC8cdRR
doouY9Ty5WxNmPYId8zuPDpyUdSiOTMG9YgI69jiq/Ax3Tq3qIb1Wkgeh0J0gJ+Q7gBwkUc8rwAn
iXaICnKpkFGTNxEpBXAkY2Dm5M7jIhdFLZozfUCPrmEdv2zdbUxE52aVsEcLyYeBy9eFkE47lIQy
dfYDbESbxBwuu6dRo38jpYqTdExNG9l1wryoRYvmTOvXI/yrji1bfD0mPLxJeRzTRHJbNCCiPVKe
EDc+RnsiemLFOpK3DdcbcURsIbTWiG6mG3FU04WzYTnRKVayYAhCaAXcZBSsrUAC/SJrSENEXkYN
H0xEt2x6R0Sc0HjjTy0kd0QtygziqNd/MJEMhR9/gcheWvCUMw3f6wlEdEGXbj14r8Ybl7WQvALW
VPqE36bWRkSwUqTqB1iI3niIWk0kGDY60un2afoGjTfuayHZUQCTiOi5ShsfoutslOULtCa+r5kR
FtPPx5xwgNsqnfwvzOKFV4HkGOAwUYYXE7ypil50QVEP+SNMmUSKQr4k4/zHUcdW6WdySilzlgWr
sOhUR3JldCSazTbxpCjYzIqWyYeZh/97AeNxhk22TTy+RJ93Vl7phharADDED1UkD0NJor58ZW8x
TPoO1tdq3gUNRr63IraGweEx8+R8/Qm8ilJX4OOSACzPtZB8jl33CqsVpa4CHhMRTUZ+O0u6MGRn
fr3UuwaG+tYAqm58ronkHUwNV318QURJFiykFBu/V2PHciCVaCNcpHIEGYatKGbKsuH4ZLgFmHxO
M3aRD9OJYk0sp2QIqn0kqdli9/xviCjVny/+NT69BWb9XqPKYBMQeUkzdtEWjYn+YEq6iN4A2y4r
2wtFwUJEd82CfNWd/69G1+8DuEZFa8YuhiNfGkMVUelb2IHtQHsEuEY03+g1Z2Qc2Xl2QZbM9FlW
ChDvgxVIngJvOxGNQwE70V/Ar0Sfy1PTK2CyEyX5YpR03bEZtKLA2/u79t4pkiXL239jAtzeacaT
45mwYG12uWmMjl0VPSSXAxkMlSEvG7p8axgTmXd2/n4ja6Trnn3B0ZPVkRyCgUT2oqzjcAK4leEv
5c8S0SJmP9RM2JY8MmpARce1nfsv2LJk9NBO4Apg1ZFciW1iOVJUoc9vzPOwDXRS3fCdU9aFEBlO
ISJ6F5AFMz+dZpNS4ZRI7sGwdE4BZ4kWwe0N0XIhhs3dXF+GfuEjXQqNlkWuZcuGjQ/rxJrgY1Sq
SP4NeE5Ey+DygogyvTA9Et6yGz0NTMzmoQvfza6gQQN2CNEFg8N1WgjA67ypITnJBWuIaDAKZTJZ
yTwOGgJfWZ3vbKYV5m7BJco0uOMzn2azbsaH56xSQg5LFcnJLizFaSugUP88yRS/E1EjtCZOn9bp
+J2yCoiGoy38llYLyVWYNdmeD98ShaGBxD6en1qUiOiJTcbb6mTMjNpMdjDReI2G50yGdxGqjeQZ
DG4fMM1abgK/HwdkGqBDuERuOxR8ZyTvy48mTD7yqYfx1W92SXFiUw3JZxgFgePs1nk4ShEdVVQt
D2SorGkFBFWG2sZMYKs1Y4xPyoXnFRGlNlWRvJ/b6F9U7vhpFqwsAf9beGaS3ZjcAgfBNKPKM6YR
X4uCMxpIzvBk81rt8Dkle2I6EaXmkXV5GoBPiIioD0cVE1FpDYxj/Pc2OMqu94MzJLdHDfZJ7EJE
vwJxyTamAZ843l2LZzr9xJWxGprd/hYqe4yNkHV54AzJ7KyWWZSJXjREK6Jki7wXYAs20DQSnonG
CRQAXLi60P5Gja65zAZnSJ7EeJ9ESWYl86Ilr8T4CxCjmvJSGbsEXBnbfC/kA7hntZF8m8P5JPg4
DrHNGavK+nuHsYg4JmPEXTRkSHUSaBCGRp0lHM+gtTaSA9lg1nh4pRJNRAGiFvKEaigvj1AbFRzc
VzAwvhAznI2MxvO4ibCbNpJnsCJw3Znez95YRETVZPf6pTvL1Yox8aU+ewzZ0E605hvbikznooxD
tJFcgwfrx+JWX5zbEylwbHcwSjnOKTV82P+EsXDBkhL831e0kXyAK/H+Dbi0HLZkJjv6kYyKxXyb
jMIyZfOyRiwRcmH1DFnev6+M0KOG5BfACq4u5ABROKoQrYVZWmxWkg/K7uFIiXZD2wyBiVbR0Oo3
TMh2DtBGcigLtpUwvyaKZfzFwTKNw2UwsY1FPuP3fO8Mrdp87YmjmKGo1kQh3zlWE8nvvDCDn8/q
Kuv7zgt7pn5EyUYcyNI80TnZiI5S7SjRUdHaSF4IC7PjSHTH/AFs+d6Psq5UX3DdwkbCP0XqJxkY
K42SHhgXOVKUC4zQRPJ3wE2u7ncyURM0JboJ7JOxW7j1MDOQaxo02ojJWwzyVpmRd1EtaRJIHckJ
VvahvgNsJ/oRuM/4ofektQuNBeYv5+T0MGL0viztX/IvCZamgdSRfFoQVx+r0BWNgkuqUFcUTEQj
DeWqwxYeenw/euegIAPP28gJ4rjiTW0kt+Yf/EaoH8qujY+kkHDwnTFvmqVsg6dGdtWmcuM3/xMf
eyqysYGtSOlNJRV7GBUkOwL5vF0b1CaqgR5c4l2SgOLnqXlwYSo/77kY8ZMrTtx67dG9E3PrG6iy
rrhRLEb5H00kL4U5nvOM+hH1ZBoUJVslqmpXBDptRjG+ZvmsoUm22rTtN+Nijk6rY0D7J3SDWNBu
kiaSl8OFowSvBeLl0eTPRXR8awrRqyC811F0t3SLru1dpHnyZU6z4OXJZcmKShoxHBXaeYShvENW
CPw+h2nABkkGXNO7OCv84IvhlvGvC5YS0UCpfv182BJ5b8Sda7g84f2abB75vYQ9rO1dVOebIPdF
acrMzz52wUwRHXE7Us9k4W+eB9n7/RptnTxPkojX9i76CDz+a7IW90RBIpX184xQ/Fn392impXuU
LLyhjeTdgqfzN8Cr1rfCV5LSan/O/zxngqRFgaP5+7zBHy1rJp1ONJE8CvnShVt4dgEsD5gNtKh5
nf1jcXp9IHyZ9G9GvfdpcrmVDaRZNU0kRwM/CHWbj45yMZ2vUUbklvqJlpVoE++wp1Z5n0ZXXSOL
62kjWSR2mSinOz0T74HSPJgag72292Zm8JnJ8pdiNJHcGR9zc2ymD+D2lstQivT2b1vY1YdxmWtJ
vk1K3fdmuG1CtHxt6qOF5E8QznvDeTG3DkOeTvEQqPJEhyU+0lUTJ9KX+P5Q4T7jknwHOVILyWOR
j5thn5qxpQOKZLKxOdNjMcNF1E6gpsAvel7mvRmdZ+Gf8h3kdC0kvzQhSlQnMEjy5hGJ6FptNpu9
7j2Z6TUn/Y3ixXtaSM7wFmWym/CVxbRLvEvsBlehrHaTvHPDm3LvyfK60TRc/lpfDSTfEVew1EM1
brqrKTSDJQpHMbsk68pR3BNKvCeTG99RyouO0kCyvZgoiFUBbdy4LFO0OFQSLKlniIQbn1SN9X1P
RreLp5YKHoQWkg+KCYZtZbPYeOTJFBtre8MWIr6P8dl1ogzFd9b0Lg6KqgRpCfh7fV+0fY82ix/F
N25ydfP7Rd6H4T7fZarUoGh5Fwvhlkrieka2ufAIFBZWC2+hxRlzgz25Ittb+d6Hyfm/dxB9K39V
y7s4Kq7XGwTA8i/vbQ4V1aqL+2vFmNhQIxHR33neh9EBG9UyLZrexQTks4siVR6SN7+UxO//4bLQ
me1ybqb/WvbZMYrkofDJFEcHuY2dw0sQ3O8Bd3EnqM5SoS0iOuaZY8NNYXGq9EUtJNcQ14es590Q
OiRaS7bLsqsxItbnr245Ntnc+alqFkILyb3EbVo2A3ywjdoJEYA6KC5hYTQWV6ZusebYaEvfN2yy
0SCSGzFlTXzGRdKzpbxkr5rhxuk9ZeQUyuY+r/msqHRc1UJyqHA/idaKSovr8Nmx++7SpsRHJSlD
BsreObT848MalKRwdSQ/hlg/+zJ4zL7z5eP41BalpOrfrVCaf+E3jxyaXJ4VlVBUaAxUR3JKHrHM
wi2ItPpmwTONL4ZeKytxMItU49bnFMpcs583ctHJcRpITrCICb6pVsmWLwFiXVSi+vz04ojyyomZ
LXm8vrIYnJOfmsWmfSXqR9KP1xNpzeecWCuDhM0WN27WydEqvZwvURlsbE7+UVL+lmFBEQ6i3fkV
44mLaJ/KlZUIkmCXc7TtC1iXoVXLqDEnHxD7cUSBsNwXuRSnOacjQEqMS3bHXNG/ZyrkxOgS2/gH
uaHBOfmAqMU9EVUS/CBmM3VRGpF15b36B22yrfJc4RdJOYV0aOX4doolkl66Aq4pwoqdwH2XIfLE
jsdLRYXM6oLZNdw26JmEGi8ZGjm+MIno8Fs3VouNOcE9zkxXWasOeymOFEFElLk42/LJbiNEN0De
+1AjxzcYhcXbzxCR8lCmG7fClJJLf1F7VvqTe2inZ9uTyzvljXCembI3tXJ8U+AlVjkJ57SG2Gyp
h6RKLlbSVudc9pT3fZZK6l7Hyd6+rYHkIeJQ5mqYBSnEO2wtg70y/N/IAjOuWKMsHkienL2QeFNJ
qlbe7KCXKpIfWiQz1W4IigYP+KDt58p2h1PgLe7K+npM9kKfbSQNZB/IJp9hqkh2FOKSf1wS1SQ8
DXXRnoiILilKn2mnvBLt+dBsuRimLs9JujuTjCkaSG4n0QqhObClidnpMjn+UmKNFHKc7J1lSPjO
keoxKeqvNbyLzHziqEQTNCjJrx7prsz2aD2kHV0Yf0NV7TAhKutrXyN5/+D6RryLYbDGS5h6AP9P
Cdarv2biyfViDoFUEuNpZNajtF9elJ01xIh3cU566Z8gDiOOQEl2zQ+Qd9dK81eUdD2YmuWMsKnT
TZJn5wx5F+VEIk9MZlXouXZXLMtNTDGfp7R/c8zQLO2grH0VIorpboZ2fH+LUwfPbFgsahVQH+2J
KLEoSitEtH6B6YoalCljQ40s3eDSyo7mEwzs+OwBYi+BHpkhCnr2Q4CdIdH4JSrOXk5RyZy+JiRr
aSdll6PBRnZ8XZE/VeJwittp72dWzXNM0YB0DEMBRe+41GXls2R0tZOK00YY2/GFShnfKa7Chpp2
wyJz324qtKApZeeQT4zi+Gu1Zlcyj/6IOpKnwiNFFMsy/bsOJs6V+AYlGF66QtiH0n1luR7RuBrZ
xmiAq+x2FSWQw7I5Ww3Jf0o1ZObDwypwwHcxqL5rlsjm8nsSF2WXyX9mtzLqZXz6q0ovNBknt60a
kp+7Yqo42AMEssULRESvrFhOREOQX9mM6LTaL0COv6Y2N+plVD+k8jutVfPiVKJwT6S/USURmXo8
TyMRkXzDVa51oqsB5z5Io2nbculh69WRXFsgZxM1Rx2KEW7bargRXXIRmoJJWC6+qaQ5XkYZmDFc
Wu9RFZbOkCqLlVND8hj4iaap1NL4qqbQmSg1L+YQ0Ri4q8j9xplVZj0iehZZysA+r/1+NREeSpb+
TjXVkDwRnk8kHLJKa2B6Kkr09iJK9VZtQhuiig4iejjVQKrSs8sfqs2OE6TPQRNtpr2UkDw9VuC7
mpfL3/5WovAq8pEuzGugN8H59z9u17jifWmg+aQqkmPNPKWF6K0NC4mK87PtH8Bzagi/Rypnj5YI
eaqMx7v76CkCWkJXPtX6aHdpZFENyZXRXfSBBcCBUSKaSBfUI3rpJ2riIhp1UVWjz2vczz30OPjW
BusStExuLTmyqxqSAyU9yh97YNstk2iuHYWCdvpVEmPix7w8EzXv871NXfU0BV2bbHqj9VFp3HSQ
QSRLxivFM3LR4paodXTGte3jWlRRlgzka7Hgit5VpvXr2Duia69Bozccv3dbXLEkQvJkuAvX3Q7E
EA1EEPvSQ+DPJya1KAUR1UWI0+/59OjSbnU+VjBlTZUH707S+djlfl3De3fu0W/o3F+jY0+lqiD5
irTLYSmE0X6BDk7bi/YiWgDbVbWz/4RiKdrXjj+yOLxWSUUg1Fz9m191PkVn+4V37d25R7//zP/9
+r1T6WpInuwjjndcdqmcRh+JJBe2AX/RPyXUUZSmf5//PTS/Y03lvGGrPeKQ3icP9O/SrVfnHv2/
Wbz/ZszJzOwgWWUkOj0i4crBeX1b163auEVoaJMOk/ZEZ2b3WiIkfyZWxJ2Ohkz0mIvDff1NgmPV
SNWlif7xCE03drl3MX+tH9OpUZ0aLRqGftay76qTL7NhsxTJi/h6XiKitCC/G5SUX1bWeb28+qLh
2HLX6dVS755dN6pDw9q1Wnwe2qDVgNWnE7Nzn+VROOmilkDUQeQ1JE3YmUGUkZF9CKXcOv39yHaf
1wpt0SC0YashP5xNysZJco7k/8shQnJdcc21fesDInJ829fQWdL/T22WIvlGo3HiN5MeEdHtI/9r
91ldP1nszpxJ+V+z+YNF8vNDmR+GzRpdzP6nh1Mk/w+ODxbJH4zNuUjORXIuknORnIvkXCTnIjkX
yblIzkVyLpJzkZyL5Fwk5yI5F8m5SM5Fci6Sc5Gci+RcJOci+X8LyQFdPpThySPZ94Ox2YdHsucH
Y3NBHskuH4zNJYCK+HAGV6rQ/AOymav06PYB2cx1QBxt+nBsrvD/ANIP5RKCEJXDAAAAAElFTkSu
QmCC
--f46d040a62c49bb1c804f027e8cc--"""
TEXT_ATTACHMENT_W_NULL = """MIME-Version: 1.0
Received: by 10.76.127.40 with HTTP; Fri, 17 Jan 2014 02:21:43 -0800 (PST)
Date: Fri, 17 Jan 2014 12:21:43 +0200
Delivered-To: chirila.s.alexandru@gmail.com
Message-ID: <CALTHOsuHFaaatiXJKU=LdDCo4NmD_h49yvG2RDsWw17D0-NXJg@mail.gmail.com>
Subject: Test
From: Alexandru Chirila <chirila.s.alexandru@gmail.com>
To: Alexandru Chirila <chirila.s.alexandru@gmail.com>
Content-Type: multipart/mixed; boundary=f46d040a62c49bb1c804f027e8cc
--f46d040a62c49bb1c804f027e8cc
Content-Type: multipart/alternative; boundary=f46d040a62c49bb1c404f027e8ca
--f46d040a62c49bb1c404f027e8ca
Content-Type: text/plain; charset=ISO-8859-1
This is a test ma\x00iling
--f46d040a62c49bb1c804f027e8cc--"""
TEXT_ATTACHMENT_W_MULTIPLE_NULLS = """MIME-Version: 1.0
Received: by 10.76.127.40 with HTTP; Fri, 17 Jan 2014 02:21:43 -0800 (PST)
Date: Fri, 17 Jan 2014 12:21:43 +0200
Delivered-To: chirila.s.alexandru@gmail.com
Message-ID: <CALTHOsuHFaaatiXJKU=LdDCo4NmD_h49yvG2RDsWw17D0-NXJg@mail.gmail.com>
Subject: Test
From: Alexandru Chirila <chirila.s.alexandru@gmail.com>
To: Alexandru Chirila <chirila.s.alexandru@gmail.com>
Content-Type: multipart/mixed; boundary=f46d040a62c49bb1c804f027e8cc
--f46d040a62c49bb1c804f027e8cc
Content-Type: multipart/alternative; boundary=f46d040a62c49bb1c404f027e8ca
--f46d040a62c49bb1c404f027e8ca
Content-Type: text/plain; charset=ISO-8859-1
This is a test ma\x00\x00\x00iling
--f46d040a62c49bb1c804f027e8cc--"""
TEXT_ATTACHMENT_W_SUBJECT_NULL = """MIME-Version: 1.0
Received: by 10.76.127.40 with HTTP; Fri, 17 Jan 2014 02:21:43 -0800 (PST)
Date: Fri, 17 Jan 2014 12:21:43 +0200
Delivered-To: chirila.s.alexandru@gmail.com
Message-ID: <CALTHOsuHFaaatiXJKU=LdDCo4NmD_h49yvG2RDsWw17D0-NXJg@mail.gmail.com>
Subject: Te\x00\x00\x00st
From: Alexandru Chirila <chirila.s.alexandru@gmail.com>
To: Alexandru Chirila <chirila.s.alexandru@gmail.com>
Content-Type: multipart/mixed; boundary=f46d040a62c49bb1c804f027e8cc
--f46d040a62c49bb1c804f027e8cc
Content-Type: multipart/alternative; boundary=f46d040a62c49bb1c404f027e8ca
--f46d040a62c49bb1c404f027e8ca
Content-Type: text/plain; charset=ISO-8859-1
This is a test mailing
--f46d040a62c49bb1c804f027e8cc--"""
TEXT_ATTACHMENT_W_CONTENTTYPE_NULL = """MIME-Version: 1.0
Received: by 10.76.127.40 with HTTP; Fri, 17 Jan 2014 02:21:43 -0800 (PST)
Date: Fri, 17 Jan 2014 12:21:43 +0200
Delivered-To: chirila.s.alexandru@gmail.com
Message-ID: <CALTHOsuHFaaatiXJKU=LdDCo4NmD_h49yvG2RDsWw17D0-NXJg@mail.gmail.com>
Subject: Test
From: Alexandru Chirila <chirila.s.alexandru@gmail.com>
To: Alexandru Chirila <chirila.s.alexandru@gmail.com>
Content-Type: multipart/mixed; boundary=f46d040a62c49bb1c804f027e8cc
--f46d040a62c49bb1c804f027e8cc
Content-Type: multipart/alternative; boundary=f46d040a62c49bb1c404f027e8ca
--f46d040a62c49bb1c404f027e8ca
Content-Type: text/plain; charset="iso-8859-1\x00\x00\x00"
This is a test mailing
--f46d040a62c49bb1c804f027e8cc--"""
class PyzorPreDigestTest(PyzorTestBase):
# we don't need the pyzord server to test this
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
# no argument necessary
self.client_args = {}
def test_predigest_email(self):
"""Test email removal in the predigest process"""
emails = ["t@abc.ro",
"t1@abc.ro",
"t+@abc.ro",
"t.@abc.ro",
]
message = "Test %s Test2"
expected = b"TestTest2\n"
for email in emails:
msg = message % email
res = self.check_pyzor("predigest", None, input=TEXT % msg)
self.assertEqual(res, expected)
def test_predigest_long(self):
"""Test long "words" removal in the predigest process"""
strings = ["0A2D3f%a#S",
"3sddkf9jdkd9",
"@@#@@@@@@@@@"]
message = "Test %s Test2"
expected = b"TestTest2\n"
for s in strings:
msg = message % s
res = self.check_pyzor("predigest", None, input=TEXT % msg)
self.assertEqual(res, expected)
def test_predigest_line_length(self):
"""Test small lines removal in the predigest process"""
msg = "This line is included\n"\
"not this\n"\
"This also"
expected = b"Thislineisincluded\nThisalso\n"
res = self.check_pyzor("predigest", None, input=TEXT % msg)
self.assertEqual(res, expected)
def test_predigest_atomic(self):
"""Test atomic messages (lines <= 4) in the predigest process"""
msg = "All this message\nShould be included\nIn the predigest"
expected = b"Allthismessage\nShouldbeincluded\nInthepredigest\n"
res = self.check_pyzor("predigest", None, input=TEXT % msg)
self.assertEqual(res, expected)
def test_predigest_pieced(self):
"""Test pieced messages (lines > 4) in the predigest process"""
msg = ""
for i in range(100):
msg += "Line%d test test test\n" % i
expected = b""
for i in [20, 21, 22, 60, 61, 62]:
expected += ("Line%dtesttesttest\n" % i).encode("utf8")
res = self.check_pyzor("predigest", None, input=TEXT % msg)
self.assertEqual(res, expected)
def test_predigest_html(self):
expected = """Emailspam,alsoknownasjunkemailorbulkemail,isasubset
ofspaminvolvingnearlyidenticalmessagessenttonumerous
byemail.Clickingonlinksinspamemailmaysendusersto
byemail.Clickingonlinksinspamemailmaysendusersto
phishingwebsitesorsitesthatarehostingmalware.
Emailspam.Emailspam,alsoknownasjunkemailorbulkemail,isasubsetofspaminvolvingnearlyidenticalmessagessenttonumerousbyemail.Clickingonlinksinspamemailmaysenduserstophishingwebsitesorsitesthatarehostingmalware.
""".encode("utf8")
res = self.check_pyzor("predigest", None, input=HTML_TEXT)
self.assertEqual(res, expected)
def test_predigest_html_style_script(self):
expected = """Thisisatest.
Thisisatest.
""".encode("utf8")
res = self.check_pyzor("predigest", None, input=HTML_TEXT_STYLE_SCRIPT)
self.assertEqual(res, expected)
def test_predigest_attachemnt(self):
expected = b"Thisisatestmailing\n"
res = self.check_pyzor("predigest", None, input=TEXT_ATTACHMENT)
self.assertEqual(res, expected)
class PyzorDigestTest(PyzorTestBase):
# we don't need the pyzord server to test this
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
# no argument necessary
self.client_args = {}
def test_digest_email(self):
"""Test email removal in the digest process"""
emails = ["t@abc.ro",
"t1@abc.ro",
"t+@abc.ro",
"t.@abc.ro",
]
message = "Test %s Test2"
expected = b"TestTest2"
for email in emails:
msg = message % email
res = self.check_pyzor("digest", None, input=TEXT % msg)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_long(self):
"""Test long "words" removal in the digest process"""
strings = ["0A2D3f%a#S",
"3sddkf9jdkd9",
"@@#@@@@@@@@@"]
message = "Test %s Test2"
expected = b"TestTest2"
for s in strings:
msg = message % s
res = self.check_pyzor("digest", None, input=TEXT % msg)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_line_length(self):
"""Test small lines removal in the digest process"""
msg = "This line is included\n"\
"not this\n"\
"This also"
expected = b"ThislineisincludedThisalso"
res = self.check_pyzor("digest", None, input=TEXT % msg)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_atomic(self):
"""Test atomic messages (lines <= 4) in the digest process"""
msg = "All this message\nShould be included\nIn the digest"
expected = b"AllthismessageShouldbeincludedInthedigest"
res = self.check_pyzor("digest", None, input=TEXT % msg)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_pieced(self):
"""Test pieced messages (lines > 4) in the digest process"""
msg = ""
for i in range(100):
msg += "Line%d test test test\n" % i
expected = b""
for i in [20, 21, 22, 60, 61, 62]:
expected += ("Line%dtesttesttest" % i).encode("utf8")
res = self.check_pyzor("digest", None, input=TEXT % msg)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_html(self):
expected = """Emailspam,alsoknownasjunkemailorbulkemail,isasubset
ofspaminvolvingnearlyidenticalmessagessenttonumerous
byemail.Clickingonlinksinspamemailmaysendusersto
byemail.Clickingonlinksinspamemailmaysendusersto
phishingwebsitesorsitesthatarehostingmalware.
Emailspam.Emailspam,alsoknownasjunkemailorbulkemail,isasubsetofspaminvolvingnearlyidenticalmessagessenttonumerousbyemail.Clickingonlinksinspamemailmaysenduserstophishingwebsitesorsitesthatarehostingmalware.
""".replace("\n", "").encode("utf8")
res = self.check_pyzor("digest", None, input=HTML_TEXT)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_html_style_script(self):
expected = """Thisisatest.Thisisatest.""".encode("utf8")
res = self.check_pyzor("digest", None, input=HTML_TEXT_STYLE_SCRIPT)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_attachment(self):
expected = b"Thisisatestmailing"
res = self.check_pyzor("digest", None, input=TEXT_ATTACHMENT)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_attachment_w_null(self):
expected = b"Thisisatestmailing"
res = self.check_pyzor("digest", None, input=TEXT_ATTACHMENT_W_NULL)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_attachment_w_multiple_nulls(self):
expected = b"Thisisatestmailing"
res = self.check_pyzor("digest", None, input=TEXT_ATTACHMENT_W_MULTIPLE_NULLS)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_attachment_w_subject_null(self):
expected = b"Thisisatestmailing"
res = self.check_pyzor("digest", None, input=TEXT_ATTACHMENT_W_SUBJECT_NULL)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
def test_digest_attachment_w_contenttype_null(self):
expected = b"Thisisatestmailing"
res = self.check_pyzor("digest", None, input=TEXT_ATTACHMENT_W_CONTENTTYPE_NULL)
self.assertEqual(res.decode("utf8"),
hashlib.sha1(expected).hexdigest().lower() + "\n")
ENCODING_TEST_EMAIL = """From nobody Tue Apr 1 13:18:54 2014
Content-Type: multipart/related;
boundary="===============0632694142025794937=="
MIME-Version: 1.0
This is a multi-part message in MIME format.
--===============0632694142025794937==
Content-Type: text/plain; charset="iso-8859-1"
MIME-Version: 1.0
Content-Transfer-Encoding: quoted-printable
Thist is a t=E9st
--===============0632694142025794937==
MIME-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: base64
VGhpcyBpcyBhIHRlc3Qg5r+A5YWJ6YCZ
--===============0632694142025794937==
MIME-Version: 1.0
Content-Type: text/plain; charset="cp1258"
Content-Transfer-Encoding: base64
VGhpcyBpcyBhIHTpc3Qg4qXG
--===============0632694142025794937==--
"""
BAD_ENCODING = """From nobody Tue Apr 1 13:18:54 2014
Content-Type: multipart/related;
boundary="===============0632694142025794937=="
MIME-Version: 1.0\x00\x00\x00
This is a multi-part message in MIME format.
--===============0632694142025794937==
Content-Type: text/plain; charset=ISO-8859-1\x00\x00\x00Content-Transfer-Encoding: quoted-printable
This is a test
--===============0632694142025794937\x00\x00\x00==
Content-Type: text/plain; charset=us-asciia
Content-Transfer-Encoding: quoted-printable
This is a test
--===============0632694142025794937==
"""
class PyzorEncodingTest(PyzorTestBase):
# we don't need the pyzord server to test this
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
# no argument necessary
self.client_args = {}
def test_encodings(self):
expected = "47a83cd0e5cc9bd2c64c06c00e3853f79e63014f\n"
res = self.check_pyzor("digest", None, input=ENCODING_TEST_EMAIL)
self.assertEqual(res.decode("utf8"), expected)
def test_bad_encoding(self):
expected = "2b4dbf2fb521edd21d997f3f04b1c7155ba91fff\n"
res = self.check_pyzor("digest", None, input=BAD_ENCODING)
self.assertEqual(res.decode("utf8"), expected)
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(PyzorDigestTest))
test_suite.addTest(unittest.makeSuite(PyzorPreDigestTest))
test_suite.addTest(unittest.makeSuite(PyzorEncodingTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 56,344 | 56.029352 | 206 | py |
pyzor | pyzor-master/tests/functional/test_engines/test_redis.py | import unittest
try:
import redis
has_redis = True
except ImportError:
has_redis = False
from tests.util import *
@unittest.skipIf(not has_redis, "redis library not available")
class RedisPyzorTest(PyzorTest, PyzorTestBase):
"""Test the redis engine"""
dsn = "localhost,,,10"
engine = "redis"
@classmethod
def tearDownClass(cls):
super(RedisPyzorTest, cls).tearDownClass()
redis.StrictRedis(db=10).flushdb()
class ThreadsRedisPyzorTest(RedisPyzorTest):
"""Test the redis engine with threads activated."""
threads = "True"
class MaxThreadsRedisPyzorTest(RedisPyzorTest):
"""Test the gdbm engine with with maximum threads."""
threads = "True"
max_threads = "10"
class PreForkRedisPyzorTest(RedisPyzorTest):
"""Test the redis engine with threads activated."""
prefork = "4"
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(RedisPyzorTest))
test_suite.addTest(unittest.makeSuite(ThreadsRedisPyzorTest))
test_suite.addTest(unittest.makeSuite(MaxThreadsRedisPyzorTest))
test_suite.addTest(unittest.makeSuite(PreForkRedisPyzorTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 1,306 | 24.627451 | 68 | py |
pyzor | pyzor-master/tests/functional/test_engines/test_mysql.py | import unittest
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from tests.util import *
try:
import MySQLdb
has_mysql = True
except ImportError:
has_mysql = False
schema = """
CREATE TABLE IF NOT EXISTS `%s` (
`digest` char(40) default NULL,
`r_count` int(11) default NULL,
`wl_count` int(11) default NULL,
`r_entered` datetime default NULL,
`wl_entered` datetime default NULL,
`r_updated` datetime default NULL,
`wl_updated` datetime default NULL,
PRIMARY KEY (`digest`)
)
"""
@unittest.skipIf(not os.path.exists("./test.conf"),
"test.conf is not available")
@unittest.skipIf(not has_mysql, "MySQLdb library not available")
class MySQLdbPyzorTest(PyzorTest, PyzorTestBase):
"""Test the mysql engine."""
dsn = None
engine = "mysql"
@classmethod
def setUpClass(cls):
conf = ConfigParser.ConfigParser()
conf.read("./test.conf")
table = conf.get("test", "table")
db = MySQLdb.Connect(host=conf.get("test", "host"),
user=conf.get("test", "user"),
passwd=conf.get("test", "passwd"),
db=conf.get("test", "db"))
c = db.cursor()
c.execute(schema % table)
c.close()
db.close()
cls.dsn = "%s,%s,%s,%s,%s" % (conf.get("test", "host"),
conf.get("test", "user"),
conf.get("test", "passwd"),
conf.get("test", "db"),
conf.get("test", "table"))
super(MySQLdbPyzorTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(MySQLdbPyzorTest, cls).tearDownClass()
try:
conf = ConfigParser.ConfigParser()
conf.read("./test.conf")
table = conf.get("test", "table")
db = MySQLdb.Connect(host=conf.get("test", "host"),
user=conf.get("test", "user"),
passwd=conf.get("test", "passwd"),
db=conf.get("test", "db"))
c = db.cursor()
c.execute("DROP TABLE %s" % table)
c.close()
db.close()
except:
pass
class ThreadsMySQLdbPyzorTest(MySQLdbPyzorTest):
"""Test the mysql engine with threads activated."""
threads = "True"
max_threads = "0"
class BoundedThreadsMySQLdbPyzorTest(MySQLdbPyzorTest):
"""Test the mysql engine with threads and DBConnections set."""
threads = "True"
max_threads = "0"
db_connections = "10"
class MaxThreadsMySQLdbPyzorTest(MySQLdbPyzorTest):
"""Test the mysql engine with threads and MaxThreads set."""
threads = "True"
max_threads = "10"
class BoundedMaxThreadsMySQLdbPyzorTest(MySQLdbPyzorTest):
"""Test the mysql engine with threads, MaxThreads and DBConnections set."""
threads = "True"
max_threads = "10"
db_connections = "10"
class ProcessesMySQLdbPyzorTest(MySQLdbPyzorTest):
processes = "True"
max_processes = "10"
class PreForkMySQLdbPyzorTest(MySQLdbPyzorTest):
prefork = "4"
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(MySQLdbPyzorTest))
test_suite.addTest(unittest.makeSuite(ThreadsMySQLdbPyzorTest))
test_suite.addTest(unittest.makeSuite(BoundedThreadsMySQLdbPyzorTest))
test_suite.addTest(unittest.makeSuite(MaxThreadsMySQLdbPyzorTest))
test_suite.addTest(unittest.makeSuite(BoundedMaxThreadsMySQLdbPyzorTest))
test_suite.addTest(unittest.makeSuite(ProcessesMySQLdbPyzorTest))
test_suite.addTest(unittest.makeSuite(PreForkMySQLdbPyzorTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 3,987 | 31.16129 | 79 | py |
pyzor | pyzor-master/tests/functional/test_engines/test_gdbm.py | import unittest
from tests.util import *
try:
import gdbm
has_gdbm = True
except ImportError:
has_gdbm = False
@unittest.skipIf(not has_gdbm, "gdbm library not available")
class GdbmPyzorTest(PyzorTest, PyzorTestBase):
"""Test the gdbm engine"""
dsn = "pyzord.db"
engine = "gdbm"
class ThreadsGdbmPyzorTest(GdbmPyzorTest):
"""Test the gdbm engine with threads activated."""
threads = "True"
max_threads = "0"
class MaxThreadsGdbmPyzorTest(GdbmPyzorTest):
"""Test the gdbm engine with with maximum threads."""
threads = "True"
max_threads = "10"
def suite():
"""Gather all the tests from this module in a test suite."""
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(GdbmPyzorTest))
test_suite.addTest(unittest.makeSuite(ThreadsGdbmPyzorTest))
test_suite.addTest(unittest.makeSuite(MaxThreadsGdbmPyzorTest))
return test_suite
if __name__ == '__main__':
unittest.main()
| 1,009 | 25.578947 | 67 | py |
pyzor | pyzor-master/tests/functional/test_engines/__init__.py | """A suite of functional tests that verifies the correct behaviour of the
pyzor client and server as a whole.
Functional test should not touch real data and are usually safe, but it's not
recommended to run theses on production servers.
Note these tests the installed version of pyzor, not the version from the
source.
"""
import unittest
def suite():
"""Gather all the tests from this package in a test suite."""
import test_gdbm
import test_mysql
import test_redis
test_suite = unittest.TestSuite()
test_suite.addTest(test_gdbm.suite())
test_suite.addTest(test_mysql.suite())
test_suite.addTest(test_redis.suite())
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 745 | 24.724138 | 77 | py |
pyzor | pyzor-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Pyzor documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 7 15:20:07 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pyzor'
copyright = u'2014, Frank Tobin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pyzordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Pyzor.tex', u'Pyzor Documentation',
u'Frank Tobin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyzor', u'Pyzor Documentation',
[u'Frank Tobin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Pyzor', u'Pyzor Documentation',
u'Frank Tobin', 'Pyzor', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 8,310 | 30.244361 | 79 | py |
MINDER | MINDER-main/setup.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from distutils.core import setup, Extension
import os
extra_compile_args = ["-std=c++11", "-DNDEBUG", "-O3"]
extension = Extension(
"seal.cpp_modules._fm_index",
include_dirs=["seal/cpp_modules", os.path.expanduser("~/include")],
libraries=["stdc++", "sdsl", "divsufsort", "divsufsort64"],
library_dirs=[os.path.expanduser("~/lib")],
sources=["seal/cpp_modules/fm_index.cpp", "seal/cpp_modules/fm_index.i"],
swig_opts=["-I../include", "-c++"],
language="c++11",
extra_compile_args=extra_compile_args,
)
setup(
name="SEAL",
version="1.0",
ext_modules=[extension],
)
| 810 | 26.965517 | 77 | py |
MINDER | MINDER-main/scripts/build_fm_index.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import logging
import multiprocessing
import re
import ftfy
import torch
import tqdm
import pickle
from seal.index import FMIndex
from datasets import load_dataset
import json
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
def process(line):
tokens = tokenize(line)
return tokens
def preprocess_file(input_path, labels, format="kilt", lowercase=False, tokenize=False, pid2query=None):
if pid2query:
with open(args.pid2query, 'rb') as f:
pid2query = pickle.load(f)
if args.id2code:
with open(args.id2code, 'rb') as f:
id2code = pickle.load(f)
if format =="msmarco":
f = load_dataset(input_path, split="train")
pieces_it = ((pp['docid'], pp["title"], pp["text"]) for pp in f)
pieces_it = tqdm.tqdm(pieces_it)
for idx, title, text in pieces_it:
idx = idx.strip()
title = title.strip()
text = re.sub(r"\s+", " ", text)
text = ftfy.fix_text(text)
text = text.replace("BULLET::::", "")
text = text.replace("SECTION::::", "")
text = text.strip()
if args.include_query:
if args.query_format == 'free':
queries = pid2query[idx]
query = ''
for s in queries:
query =query + " " + s
if args.query_format == 'stable':
queries = pid2query[idx]
query = ''
for s in queries:
query =query + " || " + s + ' @@'
if args.include_code:
code = id2code[idx]
code = " || " + ' '.join([*code.strip()]) + ' @@'
if not text:
continue
if tokenize:
title = " ".join(word_tokenize(title))
text = " ".join(word_tokenize(text))
title = f"{title} {args.delim}"
if args.include_title and title:
text = f"{title} {text}"
if args.include_query and query:
text = f"{text} {query}"
if lowercase:
text = text.lower()
labels.append(idx)
yield text
else:
with open(input_path, "r", 2**16) as f:
if format == "dpr":
next(f)
pieces_it = csv.reader(f, delimiter="\t", quotechar='"')
pieces_it = ((pp[0], pp[2], pp[1]) for pp in pieces_it if len(pp) == 3)
elif format == "kilt":
pieces_it = (line.strip() for line in f)
pieces_it = (line.split("\t", 2) for line in pieces_it)
pieces_it = ((pp[0], pp[1], pp[2]) for pp in pieces_it if len(pp) == 3)
pieces_it = tqdm.tqdm(pieces_it)
for idx, title, text in pieces_it:
idx = idx.strip()
title = title.strip()
text = re.sub(r"\s+", " ", text)
text = ftfy.fix_text(text)
text = text.replace("BULLET::::", "")
text = text.replace("SECTION::::", "")
text = text.strip()
if args.include_query:
if args.query_format == 'free':
queries = pid2query[idx]
query = ''
for s in queries:
query =query + " " + s
if args.query_format == 'stable':
queries = pid2query[idx]
query = ''
for s in queries:
query =query + " || " + s + ' @@'
if args.include_code:
code = id2code[idx]
code = " || " + ' '.join([*code.strip()]) + ' @@'
if not text:
continue
if tokenize:
title = " ".join(word_tokenize(title))
text = " ".join(word_tokenize(text))
title = f"{title} {args.delim}"
if args.include_title and title:
text = f"{title} {text}"
if args.include_query and query:
text = f"{text} {query}"
if args.include_code and code:
text = f"{text} {code}"
if lowercase:
text = text.lower()
labels.append(idx)
yield text
def build_index(input_path):
labels = []
index = FMIndex()
lines = preprocess_file(input_path, labels, args.format, lowercase=args.lowercase, tokenize=args.tokenize, pid2query=args.pid2query)
print('start build index')
with multiprocessing.Pool(args.jobs) as p:
sequences = p.imap(process, lines)
index.initialize(sequences)
print('start build index 2')
index.labels = labels
return index
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("output")
parser.add_argument("--jobs", type=int, default=1)
parser.add_argument("--include_title", action="store_true")
parser.add_argument("--delim", default="@@")
parser.add_argument("--format", choices=["kilt", "dpr", "msmarco"], default="kilt")
parser.add_argument("--hf_model", default=None, type=str)
parser.add_argument("--lowercase", action="store_true")
parser.add_argument("--tokenize", action="store_true")
parser.add_argument('--pid2query', default=None, type=str)
parser.add_argument('--id2code', default=None, type=str)
parser.add_argument("--include_query", action="store_true")
parser.add_argument("--include_code", action="store_true")
parser.add_argument("--query_format", choices=["free", "stable"], default="free")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
print(args)
if args.tokenize:
from spacy.lang.en import English
nlp = English()
_tokenizer = nlp.tokenizer
def word_tokenize(text):
return [t.text.strip() for t in _tokenizer(text)]
if args.hf_model is not None:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(args.hf_model, use_fast=False)
is_bart = "bart" in args.hf_model
def tokenize(text):
text = text.strip()
if is_bart:
text = " " + text
with tokenizer.as_target_tokenizer():
return tokenizer(text, add_special_tokens=False)["input_ids"] + [tokenizer.eos_token_id]
else:
bart = torch.hub.load("pytorch/fairseq", "bart.large").eval()
def tokenize(text):
return bart.encode(" " + text.strip()).tolist()[1:]
delim = tokenize(args.delim)[:-1]
index = build_index(args.input)
print('start build index 3')
index.save(args.output)
| 7,216 | 29.974249 | 136 | py |
MINDER | MINDER-main/scripts/training/make_supervised_msmarco_dataset2.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from argparse import ArgumentParser
from collections import defaultdict
import json
import multiprocessing
import random
import re
import tqdm
import math
import ftfy
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
import nltk
import pickle
from datasets import load_dataset
nltk.download('stopwords')
banned = set(stopwords.words('english'))
def parse_args():
parser = ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
parser.add_argument('--min_score', default=999.0, type=float)
parser.add_argument('--min_score_gold', default=999.0, type=float)
parser.add_argument('--max_rank', default=1, type=int)
parser.add_argument(
'--target',
default = "span",
choices = [
"chunk",
"span",
"title",
"code",
"query",
])
parser.add_argument('--min_length', default=10, type=int)
parser.add_argument('--max_length', default=10, type=int)
parser.add_argument('--temperature', default=1.0, type=float)
parser.add_argument('--jobs', default=30, type=int)
parser.add_argument('--mark_target', action="store_true")
parser.add_argument('--mark_silver', action="store_true")
parser.add_argument('--n_samples', default=1, type=int)
parser.add_argument('--id2code', default=None, type=str)
parser.add_argument('--pid2query', default=None, type=str)
parser.add_argument('--mode', choices=["w", "a"], default="w")
return parser.parse_args()
def read_id2code(id2code_path):
id2code = {}
with open(id2code_path) as fin:
for line in tqdm.tqdm(fin):
line = line.strip()
if not line:
continue
idx, code = line.split("\t")
id2code[idx] = code
return id2code
def span_iterator(tokens, ngrams=3, banned=banned):
for i in range(len(tokens)):
if tokens[i] not in banned:
yield (i, i+ngrams)
# for j in range(i+1, min(i+1+ngrams, len(tokens) + 1)):
# tok_orig = tokens[i:j]
# tok = [t for t in tok_orig if t not in banned]
# if not tok:
# break
# yield (i, j)
def extract_spans(text, source, n_samples, min_length, max_length, temperature=1.0):
source = source.split("||", 1)[0]
query_tokens = source.split()
query_tokens_lower = [t.lower() for t in query_tokens]
passage_tokens = text.split()
passage_tokens_lower = [t.lower() for t in passage_tokens]
matches = defaultdict(int)
for i1, _ in enumerate(query_tokens_lower):
j1 = i1+3
str_1 = " ".join(query_tokens_lower[i1:j1])
for (i2, j2) in span_iterator(passage_tokens_lower, 3):
str_2 = " ".join(passage_tokens_lower[i2:j2])
ratio = fuzz.ratio(str_1, str_2) / 100.0
matches[i2] += ratio
if not matches:
indices = [0]
else:
indices, weights = zip(*sorted(matches.items(), key=lambda x: -(x[1])))
weights = list(weights)
sum_weights = float(sum([0] + weights))
if sum_weights == 0.0 or not weights:
indices = [0]
weights = [1.0]
else:
weights = [math.exp(float(w) / temperature) for w in weights]
Z = sum(weights)
weights = [w / Z for w in weights]
indices = random.choices(indices, weights=weights, k=n_samples)
for i in indices:
subspan_size = random.randint(min_length, max_length)
span = " ".join(passage_tokens[i:i+subspan_size])
yield span
def extract_spans_wrapper(args):
return args[1], list(extract_spans(*args))
def clean(text):
text = re.sub(r'\s+', ' ', text)
text = ftfy.fix_text(text)
text = text.replace('BULLET::::', '')
text = text.replace('SECTION::::', '')
text = text.strip()
return text
def _iterator_span_get_arguments(data, min_score, max_rank, mark_target, mark_silver, min_score_gold):
for sample in tqdm.tqdm(data):
source = sample['query'].strip()
if mark_target:
source += " || body"
for i, ctx in enumerate(sample['positive_passages'], start=1):
ctx['score']=1000.0
if i > max_rank:
continue
if float(ctx['score']) < min_score:
continue
text = ctx['text'].strip()
if mark_silver and float(ctx['score']) < min_score_gold:
yield text, source + " || ?"
elif mark_silver:
yield text, source + " || +"
else:
yield text, source
def iterator_span(args):
if "train" in args.output:
data = load_dataset(args.input, split="train")
if "dev" in args.output:
qid2query={}
for s in load_dataset("Tevatron/msmarco-passage", split="dev"):
qid2query[s['query_id']] = s['query']
passage_corpus = load_dataset("Tevatron/msmarco-passage-corpus", split="train")
data = []
with open("/home/v-yongqili/project/GGR/data/MSMARCO/qrels.msmarco-passage.dev-subset.txt",'r') as f:
for l in f:
entry = {}
l = re.split('[\t\s]', l.strip())
qid = str(l[0])
entry['query_id'] = qid
entry['query'] = qid2query[qid]
entry['positive_passages'] = [passage_corpus[int(l[2])]]
data.append(entry)
arg_it = _iterator_span_get_arguments(data, args.min_score, args.max_rank, args.mark_target, args.mark_silver, args.min_score_gold)
arg_it = ((text, source, args.n_samples, args.min_length, args.max_length, args.temperature) for text, source in arg_it)
with multiprocessing.Pool(args.jobs) as pool:
for source, spans in pool.imap(extract_spans_wrapper, arg_it):
for target in spans:
yield source, target
def iterator(args):
if args.pid2query and args.target == "query":
with open(args.pid2query, 'rb') as f:
pid2query = pickle.load(f)
if args.target == "code" and args.id2code:
id2code = read_id2code(args.id2code)
if "train" in args.output:
data = load_dataset(args.input, split="train")
if "dev" in args.output:
qid2query={}
for s in load_dataset("Tevatron/msmarco-passage", split="dev"):
qid2query[s['query_id']] = s['query']
passage_corpus = load_dataset("Tevatron/msmarco-passage-corpus", split="train")
data = []
with open("/home/v-yongqili/project/GGR/data/MSMARCO/qrels.msmarco-passage.dev-subset.txt",'r') as f:
for l in f:
entry = {}
l = re.split('[\t\s]', l.strip())
qid = str(l[0])
entry['query_id'] = qid
entry['query'] = qid2query[qid]
entry['positive_passages'] = [passage_corpus[int(l[2])]]
data.append(entry)
for sample in tqdm.tqdm(data):
source = sample['query'].strip()
if args.target == "chunk" and args.mark_target:
source += " || body"
elif args.target == "title" and args.mark_target:
source += " || title"
elif args.target == "query" and args.mark_target:
source += " || query"
elif args.target == "code" and args.mark_target:
source += " || code"
else:
raise ValueError("Wrong target")
for i, ctx in enumerate(sample['positive_passages'], start=1):
ctx['score']=1000.0
if i > args.max_rank:
continue
if float(ctx['score']) < args.min_score:
continue
if args.target == "chunk":
target = ctx['text'].strip()
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", target
elif args.mark_silver:
yield source + " || +", target
else:
yield source, target
elif args.target == "title":
if ctx['title'].strip()=="-":
continue
target = ctx['title'].strip() + " @@"
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", target
elif args.mark_silver:
yield source + " || +", target
else:
yield source, target
elif args.target == "query":
if "trivia" in args.input:
querys = pid2query[str(ctx['psg_id'])]
if "nq" in args.input:
querys = pid2query[str(ctx['passage_id'])]
if "msmarco" in args.input:
querys = pid2query[str(ctx['docid'])]
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", querys[min(_, len(querys)-1)]
elif args.mark_silver:
yield source + " || +", querys[min(_, len(querys)-1)]
else:
yield source, querys[min(_, len(querys)-1)]
elif args.target == "code":
idx = ctx['docid']
code = id2code.get(idx)
if not code: continue
target = code.strip() + " ||"
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", target
elif args.mark_silver:
yield source + " || +", target
else:
yield source, target
else:
raise ValueError("Wrong target")
def main():
args = parse_args()
with open(args.output + '.source', mode=args.mode) as src, open(args.output + '.target', mode=args.mode) as tgt:
for source, target in iterator_span(args) if args.target == "span" else iterator(args):
source = " " + source.strip()
target = " " + target.strip()
src.write(source + "\n")
tgt.write(target + "\n")
if __name__ == '__main__':
main()
| 10,873 | 33.302839 | 135 | py |
MINDER | MINDER-main/scripts/training/make_supervised_msmarco_dataset3.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from argparse import ArgumentParser
from collections import defaultdict
import json
import multiprocessing
import random
import re
import tqdm
import math
import ftfy
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
import nltk
import pickle
from datasets import load_dataset
nltk.download('stopwords')
banned = set(stopwords.words('english'))
def parse_args():
parser = ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
parser.add_argument('--min_score', default=999.0, type=float)
parser.add_argument('--min_score_gold', default=999.0, type=float)
parser.add_argument('--max_rank', default=1, type=int)
parser.add_argument(
'--target',
default = "span",
choices = [
"chunk",
"span",
"title",
"code",
"query",
])
parser.add_argument('--min_length', default=10, type=int)
parser.add_argument('--max_length', default=10, type=int)
parser.add_argument('--temperature', default=1.0, type=float)
parser.add_argument('--jobs', default=30, type=int)
parser.add_argument('--mark_target', action="store_true")
parser.add_argument('--mark_silver', action="store_true")
parser.add_argument('--n_samples', default=1, type=int)
parser.add_argument('--id2code', default=None, type=str)
parser.add_argument('--pid2query', default=None, type=str)
parser.add_argument('--mode', choices=["w", "a"], default="w")
return parser.parse_args()
def read_id2code(id2code_path):
id2code = {}
with open(id2code_path) as fin:
for line in tqdm.tqdm(fin):
line = line.strip()
if not line:
continue
idx, code = line.split("\t")
id2code[idx] = code
return id2code
def span_iterator(tokens, ngrams=3, banned=banned):
for i in range(len(tokens)):
if tokens[i] not in banned:
yield (i, i+ngrams)
# for j in range(i+1, min(i+1+ngrams, len(tokens) + 1)):
# tok_orig = tokens[i:j]
# tok = [t for t in tok_orig if t not in banned]
# if not tok:
# break
# yield (i, j)
def extract_spans(text, source, n_samples, min_length, max_length, temperature=1.0):
source = source.split("||", 1)[0]
query_tokens = source.split()
query_tokens_lower = [t.lower() for t in query_tokens]
passage_tokens = text.split()
passage_tokens_lower = [t.lower() for t in passage_tokens]
matches = defaultdict(int)
for i1, _ in enumerate(query_tokens_lower):
j1 = i1+3
str_1 = " ".join(query_tokens_lower[i1:j1])
for (i2, j2) in span_iterator(passage_tokens_lower, 3):
str_2 = " ".join(passage_tokens_lower[i2:j2])
ratio = fuzz.ratio(str_1, str_2) / 100.0
matches[i2] += ratio
if not matches:
indices = [0]
else:
indices, weights = zip(*sorted(matches.items(), key=lambda x: -(x[1])))
weights = list(weights)
sum_weights = float(sum([0] + weights))
if sum_weights == 0.0 or not weights:
indices = [0]
weights = [1.0]
else:
weights = [math.exp(float(w) / temperature) for w in weights]
Z = sum(weights)
weights = [w / Z for w in weights]
indices = random.choices(indices, weights=weights, k=n_samples)
for i in indices:
subspan_size = random.randint(min_length, max_length)
span = " ".join(passage_tokens[i:i+subspan_size])
yield span
def extract_spans_wrapper(args):
return args[1], list(extract_spans(*args))
def clean(text):
text = re.sub(r'\s+', ' ', text)
text = ftfy.fix_text(text)
text = text.replace('BULLET::::', '')
text = text.replace('SECTION::::', '')
text = text.strip()
return text
def _iterator_span_get_arguments(data, min_score, max_rank, mark_target, mark_silver, min_score_gold):
for sample in tqdm.tqdm(data):
source = sample['query'].strip()
if mark_target:
source += " || body"
for i, ctx in enumerate(sample['positive_passages'], start=1):
ctx['score']=1000.0
if i > max_rank:
continue
if float(ctx['score']) < min_score:
continue
text = ctx['text'].strip()
if mark_silver and float(ctx['score']) < min_score_gold:
yield text, source + " || ?"
elif mark_silver:
yield text, source + " || +"
else:
yield text, source
def iterator_span(args):
if "train" in args.output:
data = load_dataset(args.input, split="train")
if "dev" in args.output:
qid2query={}
for s in load_dataset("Tevatron/msmarco-passage", split="dev"):
qid2query[s['query_id']] = s['query']
passage_corpus = load_dataset("Tevatron/msmarco-passage-corpus", split="train")
data = []
with open("/home/v-yongqili/project/GGR/data/MSMARCO/qrels.msmarco-passage.dev-subset.txt",'r') as f:
for l in f:
entry = {}
l = re.split('[\t\s]', l.strip())
qid = str(l[0])
entry['query_id'] = qid
entry['query'] = qid2query[qid]
entry['positive_passages'] = [passage_corpus[int(l[2])]]
data.append(entry)
arg_it = _iterator_span_get_arguments(data, args.min_score, args.max_rank, args.mark_target, args.mark_silver, args.min_score_gold)
arg_it = ((text, source, args.n_samples, args.min_length, args.max_length, args.temperature) for text, source in arg_it)
with multiprocessing.Pool(args.jobs) as pool:
for source, spans in pool.imap(extract_spans_wrapper, arg_it):
for target in spans:
yield source, target
def iterator(args):
if args.pid2query and args.target == "query":
with open(args.pid2query, 'rb') as f:
pid2query = pickle.load(f)
if args.target == "code" and args.id2code:
id2code = read_id2code(args.id2code)
if "train" in args.output:
data = load_dataset(args.input, split="train")
if "dev" in args.output:
qid2query={}
for s in load_dataset("Tevatron/msmarco-passage", split="dev"):
qid2query[s['query_id']] = s['query']
passage_corpus = load_dataset("Tevatron/msmarco-passage-corpus", split="train")
data = []
with open("/home/v-yongqili/project/GGR/data/MSMARCO/qrels.msmarco-passage.dev-subset.txt",'r') as f:
for l in f:
entry = {}
l = re.split('[\t\s]', l.strip())
qid = str(l[0])
entry['query_id'] = qid
entry['query'] = qid2query[qid]
entry['positive_passages'] = [passage_corpus[int(l[2])]]
data.append(entry)
for sample in tqdm.tqdm(data):
source = sample['query'].strip()
if args.target == "chunk" and args.mark_target:
source += " || body"
elif args.target == "title" and args.mark_target:
source += " || title"
elif args.target == "query" and args.mark_target:
source += " || query"
elif args.target == "code" and args.mark_target:
source += " || code"
else:
raise ValueError("Wrong target")
for i, ctx in enumerate(sample['positive_passages'], start=1):
ctx['score']=1000.0
if i > args.max_rank:
continue
if float(ctx['score']) < args.min_score:
continue
if args.target == "chunk":
target = ctx['text'].strip()
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", target
elif args.mark_silver:
yield source + " || +", target
else:
yield source, target
elif args.target == "title":
if ctx['title'].strip()=="-":
continue
target = ctx['title'].strip() + " @@"
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", target
elif args.mark_silver:
yield source + " || +", target
else:
yield source, target
elif args.target == "query":
if "trivia" in args.input:
querys = pid2query[str(ctx['psg_id'])]
if "nq" in args.input:
querys = pid2query[str(ctx['passage_id'])]
if "msmarco" in args.input:
querys = pid2query[str(ctx['docid'])]
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", querys[min(_, len(querys)-1)]
elif args.mark_silver:
yield source + " || +", querys[min(_, len(querys)-1)]
else:
yield source, querys[min(_, len(querys)-1)]
elif args.target == "code":
idx = ctx['docid']
code = id2code.get(idx)
if not code: continue
target = code.strip() + " ||"
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", target
elif args.mark_silver:
yield source + " || +", target
else:
yield source, target
else:
raise ValueError("Wrong target")
def main():
args = parse_args()
with open(args.output + '.source', mode=args.mode) as src, open(args.output + '.target', mode=args.mode) as tgt:
for source, target in iterator_span(args) if args.target == "span" else iterator(args):
source = " " + source.strip()
target = " " + target.strip()
src.write(source + "\n")
tgt.write(target + "\n")
if __name__ == '__main__':
main()
| 10,873 | 33.302839 | 135 | py |
MINDER | MINDER-main/scripts/training/make_generated_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import random
import tqdm
import pickle
from nltk.corpus import stopwords
from random import sample
from fuzzywuzzy import fuzz
from collections import defaultdict
import math
banned = {
"the", "The",
"to",
"a", "A", "an", "An",
"he", "He", "his", "His", "him", "He's",
"she", "She", "her", "Her", "she's", "She's",
"it", "It", "its", "Its", "it's", "It's",
"and", "And",
"or", "Or",
"this", "This",
"that", "That",
"those", "Those",
"these", "These",
'"', '""', "'", "''",
}
def is_good(token):
if token in banned:
return False
elif token[-1] in '?.!':
return False
elif token[0] in '([':
return False
return True
def span_iterator(tokens, ngrams=3, banned=banned):
for i in range(len(tokens)):
if tokens[i] not in banned:
yield (i, i+ngrams)
def extract_spans(text, source, n_samples, min_length=10, max_length=10, temperature=1.0):
source = source.split("||", 1)[0]
query_tokens = source.split()
query_tokens_lower = [t.lower() for t in query_tokens]
passage_tokens = text.split()
passage_tokens_lower = [t.lower() for t in passage_tokens]
matches = defaultdict(int)
for i1, _ in enumerate(query_tokens_lower):
j1 = i1+3
str_1 = " ".join(query_tokens_lower[i1:j1])
for (i2, j2) in span_iterator(passage_tokens_lower, 3):
str_2 = " ".join(passage_tokens_lower[i2:j2])
ratio = fuzz.ratio(str_1, str_2) / 100.0
matches[i2] += ratio
if not matches:
indices = [0]
else:
indices, weights = zip(*sorted(matches.items(), key=lambda x: -(x[1])))
weights = list(weights)
sum_weights = float(sum([0] + weights))
if sum_weights == 0.0 or not weights:
indices = [0]
weights = [1.0]
else:
weights = [math.exp(float(w) / temperature) for w in weights]
Z = sum(weights)
weights = [w / Z for w in weights]
indices = random.choices(indices, weights=weights, k=n_samples)
spans = []
for i in indices:
subspan_size = random.randint(min_length, max_length)
span = " ".join(passage_tokens[i:i+subspan_size])
spans.append(span)
return spans
def preprocess_file(
input_path,
num_samples=1,
num_title_samples=1,
num_query_samples=1,
format="dpr",
delimiter='@@',
min_length_input=1,
max_length_input=15,
min_length_output=10,
max_length_output=10,
full_doc_n=0,
mark_pretraining=False,
pid2query=None
):
if format == 'kilt':
raise NotImplementedError
elif format == 'dpr':
if pid2query:
with open(pid2query, 'rb') as f:
pid2query = pickle.load(f)
with open(input_path, 'r', 2 ** 20) as f:
next(f)
f = csv.reader(f, delimiter='\t', quotechar='"')
f = (l for l in f if len(l) == 3)
for pid, text, title in tqdm.tqdm(f):
try:
text = text
title = title
querys = pid2query[str(pid)]
if text == title:
continue
sampled = 0
select_query = sample(querys, 1)[0]
a = select_query+ " || body"
if mark_pretraining:
a += " || p"
spans = extract_spans(text, a, num_samples, min_length=10, max_length=10, temperature=1.0)
while sampled < min(num_samples, len(spans)):
b = spans[sampled]
yield a, b
sampled += 1
sampled = 0
select_querys = sample(querys*num_title_samples, num_title_samples)
while sampled < num_title_samples:
a=select_querys[sampled]+ " || title"
if mark_pretraining:
a += " || p"
b = title.strip() + " " + delimiter
yield a, b
sampled += 1
if len(querys)>1:
sampled = 0
select_querys = sample(querys*num_query_samples, num_query_samples)
while sampled < num_query_samples:
a = select_querys[sampled]+ " || query"
if mark_pretraining:
a += " || p"
num=0
while select_querys[sampled]==querys[num]:
num+=1
if(num>=len(querys)):
num-=1
break
b = querys[num].strip()
yield a, b
sampled += 1
except Exception as e:
print(e)
continue
else:
raise ValueError
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('source')
parser.add_argument('target')
parser.add_argument('--delim', default="@@")
parser.add_argument('--format', choices=['kilt', 'dpr'], default='dpr')
parser.add_argument('--min_length_input', type=int, default=10)
parser.add_argument('--max_length_input', type=int, default=10)
parser.add_argument('--min_length_output', type=int, default=10)
parser.add_argument('--max_length_output', type=int, default=10)
parser.add_argument('--num_samples', type=int, default=10)
parser.add_argument('--num_title_samples', type=int, default=3)
parser.add_argument('--num_query_samples', type=int, default=5)
parser.add_argument('--full_doc_n', type=int, default=1)
parser.add_argument('--mark_pretraining', action="store_true")
parser.add_argument('--pid2query', default=None, type=str)
return parser.parse_args()
def main():
args = parse_args()
with open(args.source, 'w', 2 ** 20) as src, open(args.target, 'w', 2 ** 20) as tgt:
for i, (s, t) in enumerate(preprocess_file(
args.input,
format=args.format,
num_samples=args.num_samples,
num_title_samples=args.num_title_samples,
num_query_samples=args.num_query_samples,
full_doc_n=args.full_doc_n,
delimiter=args.delim,
min_length_input=args.min_length_input,
max_length_input=args.max_length_input,
min_length_output=args.min_length_output,
max_length_output=args.max_length_output,
mark_pretraining=args.mark_pretraining,
pid2query =args.pid2query,
)):
if random.random() < 0.1:
s = s.lower()
s = " " + s
t = " " + t
src.write(s + '\n')
tgt.write(t + '\n')
if __name__ == '__main__':
main()
| 7,389 | 32.139013 | 110 | py |
MINDER | MINDER-main/scripts/training/make_generated_dataset3.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import random
import tqdm
import pickle
from nltk.corpus import stopwords
from random import sample
from fuzzywuzzy import fuzz
from collections import defaultdict
import math
import multiprocessing
banned = {
"the", "The",
"to",
"a", "A", "an", "An",
"he", "He", "his", "His", "him", "He's",
"she", "She", "her", "Her", "she's", "She's",
"it", "It", "its", "Its", "it's", "It's",
"and", "And",
"or", "Or",
"this", "This",
"that", "That",
"those", "Those",
"these", "These",
'"', '""', "'", "''",
}
def is_good(token):
if token in banned:
return False
elif token[-1] in '?.!':
return False
elif token[0] in '([':
return False
return True
def span_iterator(tokens, ngrams=3, banned=banned):
for i in range(len(tokens)):
if tokens[i] not in banned:
yield (i, i+ngrams)
def extract_spans(text, source, n_samples, min_length=10, max_length=10, temperature=1.0):
source = source.split("||", 1)[0]
query_tokens = source.split()
query_tokens_lower = [t.lower() for t in query_tokens]
passage_tokens = text.split()
passage_tokens_lower = [t.lower() for t in passage_tokens]
matches = defaultdict(int)
for i1, _ in enumerate(query_tokens_lower):
j1 = i1+3
str_1 = " ".join(query_tokens_lower[i1:j1])
for (i2, j2) in span_iterator(passage_tokens_lower, 3):
str_2 = " ".join(passage_tokens_lower[i2:j2])
ratio = fuzz.ratio(str_1, str_2) / 100.0
matches[i2] += ratio
if not matches:
indices = [0]
else:
indices, weights = zip(*sorted(matches.items(), key=lambda x: -(x[1])))
weights = list(weights)
sum_weights = float(sum([0] + weights))
if sum_weights == 0.0 or not weights:
indices = [0]
weights = [1.0]
else:
weights = [math.exp(float(w) / temperature) for w in weights]
Z = sum(weights)
weights = [w / Z for w in weights]
indices = random.choices(indices, weights=weights, k=n_samples)
spans = []
for i in indices:
subspan_size = random.randint(min_length, max_length)
span = " ".join(passage_tokens[i:i+subspan_size])
spans.append(span)
return spans
def preprocess_file(
pid,
text,
title,
querys,
num_samples,
num_title_samples,
num_query_samples,
format,
delimiter,
min_length_input,
max_length_input,
min_length_output,
max_length_output,
full_doc_n,
mark_pretraining
):
if format == 'kilt':
raise NotImplementedError
elif format == 'dpr':
try:
text = text
title = title
tokens = text.split()
for _ in range(full_doc_n):
a = text.strip() + " || title"
if mark_pretraining:
a += " || p"
b = title.strip() + " " + delimiter
yield a, b
sampled = 0
failures = 0
while sampled < num_title_samples and failures < 10:
if random.random() > 0.5:
len_a = random.randint(min_length_input, max_length_input)
idx_a = random.randint(0, max(0, len(tokens)-len_a))
a = ' '.join(tokens[idx_a:idx_a+len_a]).strip() + " || title"
if mark_pretraining:
a += " || p"
b = title.strip() + " " + delimiter
else:
len_b = random.randint(min_length_output, max_length_output)
idx_b = random.randint(0, max(0, len(tokens)-len_b))
if not is_good(tokens[idx_b]):
failures += 1
continue
b = ' '.join(tokens[idx_b:idx_b+len_b]).strip()
a = title.strip() + ' || body'
if mark_pretraining:
a += " || p"
yield a, b
sampled += 1
sampled = 0
failures = 0
while sampled < num_samples and failures < 10:
len_a = random.randint(min_length_input, max_length_input)
len_b = random.randint(min_length_output, max_length_output)
idx_a = random.randint(0, max(0, len(tokens)-len_a))
idx_b = random.randint(0, max(0, len(tokens)-len_b))
if idx_a == idx_b or (not is_good(tokens[idx_b])):
failures += 1
continue
a = ' '.join(tokens[idx_a:idx_a+len_a]).strip() + ' || body'
if mark_pretraining:
a += " || p"
b = ' '.join(tokens[idx_b:idx_b+len_b]).strip()
yield a, b
sampled += 1
sampled = 0
failures = 0
while sampled < num_query_samples and failures < 10:
if random.random() > 0.5:
len_a = random.randint(min_length_input, max_length_input)
idx_a = random.randint(0, max(0, len(tokens)-len_a))
a = ' '.join(tokens[idx_a:idx_a+len_a]).strip() + " || query"
if mark_pretraining:
a += " || p"
b = sample(querys, 1)[0]
else:
len_b = random.randint(min_length_output, max_length_output)
idx_b = random.randint(0, max(0, len(tokens)-len_b))
if not is_good(tokens[idx_b]):
failures += 1
continue
b = ' '.join(tokens[idx_b:idx_b+len_b]).strip()
a = title.strip() + ' || body'
if mark_pretraining:
a += " || p"
yield a, b
sampled += 1
except Exception as e:
print(e)
else:
raise ValueError
def preprocess_file_wrapper(args):
return list(preprocess_file(*args))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('source')
parser.add_argument('target')
parser.add_argument('--delim', default="@@")
parser.add_argument('--format', choices=['kilt', 'dpr'], default='dpr')
parser.add_argument('--min_length_input', type=int, default=10)
parser.add_argument('--max_length_input', type=int, default=10)
parser.add_argument('--min_length_output', type=int, default=10)
parser.add_argument('--max_length_output', type=int, default=10)
parser.add_argument('--num_samples', type=int, default=10)
parser.add_argument('--num_title_samples', type=int, default=3)
parser.add_argument('--num_query_samples', type=int, default=5)
parser.add_argument('--full_doc_n', type=int, default=1)
parser.add_argument('--mark_pretraining', action="store_true")
parser.add_argument('--pid2query', default=None, type=str)
parser.add_argument("--jobs", type=int, default=20)
return parser.parse_args()
def data_read(f, pid2query):
for pid, text, title in tqdm.tqdm(f):
querys = pid2query[str(pid)]
if text == title:
continue
if len(querys)<2:
continue
yield pid, text, title, querys
def main():
args = parse_args()
with open(args.source, 'w', 2 ** 20) as src, open(args.target, 'w', 2 ** 20) as tgt:
if args.pid2query:
with open(args.pid2query, 'rb') as f:
pid2query = pickle.load(f)
with open(args.input, 'r', 2 ** 20) as f:
next(f)
f = csv.reader(f, delimiter='\t', quotechar='"')
data = data_read(f, pid2query)
arg_it = ((pid, text, title, querys, args.num_samples, args.num_title_samples, args.num_query_samples, args.format, args.delim,
args.min_length_input,args.max_length_input,
args.min_length_output, args.max_length_output, args.full_doc_n, args.mark_pretraining ) for pid, text, title, querys in data)
with multiprocessing.Pool(args.jobs) as pool:
for sts in pool.imap(preprocess_file_wrapper, arg_it):
for s, t in sts:
if random.random() < 0.1:
s = s.lower()
s = " " + s
t = " " + t
src.write(s + '\n')
tgt.write(t + '\n')
if __name__ == '__main__':
main()
| 8,962 | 31.711679 | 140 | py |
MINDER | MINDER-main/scripts/training/make_supervised_dpr_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from argparse import ArgumentParser
from collections import defaultdict
import json
import multiprocessing
import random
import re
import tqdm
import math
import ftfy
from fuzzywuzzy import fuzz
from nltk.corpus import stopwords
import nltk
import pickle
nltk.download('stopwords')
banned = set(stopwords.words('english'))
def parse_args():
parser = ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
parser.add_argument('--min_score', default=999.0, type=float)
parser.add_argument('--min_score_gold', default=999.0, type=float)
parser.add_argument('--max_rank', default=1, type=int)
parser.add_argument(
'--target',
default = "span",
choices = [
"chunk",
"span",
"title",
"code",
"query",
])
parser.add_argument('--min_length', default=10, type=int)
parser.add_argument('--max_length', default=10, type=int)
parser.add_argument('--temperature', default=1.0, type=float)
parser.add_argument('--jobs', default=30, type=int)
parser.add_argument('--mark_target', action="store_true")
parser.add_argument('--mark_silver', action="store_true")
parser.add_argument('--n_samples', default=1, type=int)
parser.add_argument('--id2code', default=None, type=str)
parser.add_argument('--pid2query', default=None, type=str)
parser.add_argument('--mode', choices=["w", "a"], default="w")
return parser.parse_args()
def read_id2code(id2code_path):
with open(id2code_path, 'rb') as f:
id2code = pickle.load(f)
return id2code
def span_iterator(tokens, ngrams=3, banned=banned):
for i in range(len(tokens)):
if tokens[i] not in banned:
yield (i, i+ngrams)
# for j in range(i+1, min(i+1+ngrams, len(tokens) + 1)):
# tok_orig = tokens[i:j]
# tok = [t for t in tok_orig if t not in banned]
# if not tok:
# break
# yield (i, j)
def extract_spans(text, source, n_samples, min_length, max_length, temperature=1.0):
source = source.split("||", 1)[0]
query_tokens = source.split()
query_tokens_lower = [t.lower() for t in query_tokens]
passage_tokens = text.split()
passage_tokens_lower = [t.lower() for t in passage_tokens]
matches = defaultdict(int)
for i1, _ in enumerate(query_tokens_lower):
j1 = i1+3
str_1 = " ".join(query_tokens_lower[i1:j1])
for (i2, j2) in span_iterator(passage_tokens_lower, 3):
str_2 = " ".join(passage_tokens_lower[i2:j2])
ratio = fuzz.ratio(str_1, str_2) / 100.0
matches[i2] += ratio
if not matches:
indices = [0]
else:
indices, weights = zip(*sorted(matches.items(), key=lambda x: -(x[1])))
weights = list(weights)
sum_weights = float(sum([0] + weights))
if sum_weights == 0.0 or not weights:
indices = [0]
weights = [1.0]
else:
weights = [math.exp(float(w) / temperature) for w in weights]
Z = sum(weights)
weights = [w / Z for w in weights]
indices = random.choices(indices, weights=weights, k=n_samples)
for i in indices:
subspan_size = random.randint(min_length, max_length)
span = " ".join(passage_tokens[i:i+subspan_size])
yield span
def extract_spans_wrapper(args):
return args[1], list(extract_spans(*args))
def clean(text):
text = re.sub(r'\s+', ' ', text)
text = ftfy.fix_text(text)
text = text.replace('BULLET::::', '')
text = text.replace('SECTION::::', '')
text = text.strip()
return text
def _iterator_span_get_arguments(data, min_score, max_rank, mark_target, mark_silver, min_score_gold):
for sample in tqdm.tqdm(data):
source = sample['question'].strip()
if mark_target:
source += " || body"
for i, ctx in enumerate(sample['positive_ctxs'], start=1):
if i > max_rank:
continue
if float(ctx['score']) < min_score:
continue
text = ctx['text'].strip()
if mark_silver and float(ctx['score']) < min_score_gold:
yield text, source + " || ?"
elif mark_silver:
yield text, source + " || +"
else:
yield text, source
def iterator_span(args):
with open(args.input) as fin:
data = json.load(fin)
arg_it = _iterator_span_get_arguments(data, args.min_score, args.max_rank, args.mark_target, args.mark_silver, args.min_score_gold)
arg_it = ((text, source, args.n_samples, args.min_length, args.max_length, args.temperature) for text, source in arg_it)
with multiprocessing.Pool(args.jobs) as pool:
for source, spans in pool.imap(extract_spans_wrapper, arg_it):
for target in spans:
yield source, target
def iterator(args):
if args.pid2query and args.target == "query":
with open(args.pid2query, 'rb') as f:
pid2query = pickle.load(f)
if args.target == "code" and args.id2code:
id2code = read_id2code(args.id2code)
with open(args.input) as fin:
data = json.load(fin)
for sample in tqdm.tqdm(data):
source = sample['question'].strip()
if args.target == "chunk" and args.mark_target:
source += " || body"
elif args.target == "title" and args.mark_target:
source += " || title"
elif args.target == "query" and args.mark_target:
source += " || query"
elif args.target == "code" and args.mark_target:
source += " || code"
else:
raise ValueError("Wrong target")
for i, ctx in enumerate(sample['positive_ctxs'], start=1):
if i > args.max_rank:
continue
if float(ctx['score']) < args.min_score:
continue
if args.target == "chunk":
target = ctx['text'].strip()
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", target
elif args.mark_silver:
yield source + " || +", target
else:
yield source, target
elif args.target == "title":
target = ctx['title'].strip() + " @@"
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", target
elif args.mark_silver:
yield source + " || +", target
else:
yield source, target
elif args.target == "query":
if "trivia" in args.input:
querys = pid2query[str(ctx['psg_id'])]
if "nq" in args.input:
querys = pid2query[str(ctx['passage_id'])]
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", querys[min(_, len(querys)-1)]
elif args.mark_silver:
yield source + " || +", querys[min(_, len(querys)-1)]
else:
yield source, querys[min(_, len(querys)-1)]
elif args.target == "code":
if "trivia" in args.input:
idx = ctx['psg_id']
if "nq" in args.input:
idx = ctx['passage_id']
code = id2code.get(idx)
if not code:
continue
target = ' '.join([*code.strip()])
for _ in range(args.n_samples):
if args.mark_silver and float(ctx['score']) < args.min_score_gold:
yield source + " || ?", target
elif args.mark_silver:
yield source + " || +", target
else:
yield source, target
else:
raise ValueError("Wrong target")
def main():
args = parse_args()
with open(args.output + '.source', mode=args.mode) as src, open(args.output + '.target', mode=args.mode) as tgt:
for source, target in iterator_span(args) if args.target == "span" else iterator(args):
source = " " + source.strip()
target = " " + target.strip()
src.write(source + "\n")
tgt.write(target + "\n")
if __name__ == '__main__':
main()
| 9,100 | 31.974638 | 135 | py |
MINDER | MINDER-main/scripts/training/make_unsupervised_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import random
import tqdm
from nltk.corpus import stopwords
banned = {
"the", "The",
"to",
"a", "A", "an", "An",
"he", "He", "his", "His", "him", "He's",
"she", "She", "her", "Her", "she's", "She's",
"it", "It", "its", "Its", "it's", "It's",
"and", "And",
"or", "Or",
"this", "This",
"that", "That",
"those", "Those",
"these", "These",
'"', '""', "'", "''",
}
def is_good(token):
if token in banned:
return False
elif token[-1] in '?.!':
return False
elif token[0] in '([':
return False
return True
def preprocess_file(
input_path,
num_samples=1,
num_title_samples=1,
format="dpr",
delimiter='@@',
min_length_input=1,
max_length_input=15,
min_length_output=10,
max_length_output=10,
full_doc_n=0,
mark_pretraining=False,
):
if format == 'kilt':
raise NotImplementedError
elif format == 'dpr':
with open(input_path, 'r', 2 ** 20) as f:
next(f)
f = csv.reader(f, delimiter='\t', quotechar='"')
f = (l for l in f if len(l) == 3)
for _, text, title in tqdm.tqdm(f):
text = text
title = title
if text == title:
continue
tokens = text.split()
for _ in range(full_doc_n):
a = text.strip() + " || title"
if mark_pretraining:
a += " || p"
b = title.strip() + " " + delimiter
yield a, b
sampled = 0
failures = 0
while sampled < num_title_samples and failures < 10:
if random.random() > 0.5:
len_a = random.randint(min_length_input, max_length_input)
idx_a = random.randint(0, max(0, len(tokens)-len_a))
a = ' '.join(tokens[idx_a:idx_a+len_a]).strip() + " || title"
if mark_pretraining:
a += " || p"
b = title.strip() + " " + delimiter
else:
len_b = random.randint(min_length_output, max_length_output)
idx_b = random.randint(0, max(0, len(tokens)-len_b))
if not is_good(tokens[idx_b]):
failures += 1
continue
b = ' '.join(tokens[idx_b:idx_b+len_b]).strip()
a = title.strip() + ' || body'
if mark_pretraining:
a += " || p"
yield a, b
sampled += 1
sampled = 0
failures = 0
while sampled < num_samples and failures < 10:
len_a = random.randint(min_length_input, max_length_input)
len_b = random.randint(min_length_output, max_length_output)
idx_a = random.randint(0, max(0, len(tokens)-len_a))
idx_b = random.randint(0, max(0, len(tokens)-len_b))
if idx_a == idx_b or (not is_good(tokens[idx_b])):
failures += 1
continue
a = ' '.join(tokens[idx_a:idx_a+len_a]).strip() + ' || body'
if mark_pretraining:
a += " || p"
b = ' '.join(tokens[idx_b:idx_b+len_b]).strip()
yield a, b
sampled += 1
else:
raise ValueError
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('source')
parser.add_argument('target')
parser.add_argument('--delim', default="@@")
parser.add_argument('--format', choices=['kilt', 'dpr'], default='dpr')
parser.add_argument('--min_length_input', type=int, default=10)
parser.add_argument('--max_length_input', type=int, default=10)
parser.add_argument('--min_length_output', type=int, default=10)
parser.add_argument('--max_length_output', type=int, default=10)
parser.add_argument('--num_samples', type=int, default=10)
parser.add_argument('--num_title_samples', type=int, default=3)
parser.add_argument('--full_doc_n', type=int, default=1)
parser.add_argument('--mark_pretraining', action="store_true")
return parser.parse_args()
def main():
args = parse_args()
with open(args.source, 'w', 2 ** 20) as src, open(args.target, 'w', 2 ** 20) as tgt:
for i, (s, t) in enumerate(preprocess_file(
args.input,
format=args.format,
num_samples=args.num_samples,
num_title_samples=args.num_title_samples,
full_doc_n=args.full_doc_n,
delimiter=args.delim,
min_length_input=args.min_length_input,
max_length_input=args.max_length_input,
min_length_output=args.min_length_output,
max_length_output=args.max_length_output,
mark_pretraining=args.mark_pretraining,
)):
if random.random() < 0.1:
s = s.lower()
s = " " + s
t = " " + t
src.write(s + '\n')
tgt.write(t + '\n')
if __name__ == '__main__':
main()
| 5,756 | 31.525424 | 88 | py |
MINDER | MINDER-main/scripts/training/make_generated_dataset2.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import random
import tqdm
import pickle
from nltk.corpus import stopwords
from random import sample
from fuzzywuzzy import fuzz
from collections import defaultdict
import math
import multiprocessing
banned = {
"the", "The",
"to",
"a", "A", "an", "An",
"he", "He", "his", "His", "him", "He's",
"she", "She", "her", "Her", "she's", "She's",
"it", "It", "its", "Its", "it's", "It's",
"and", "And",
"or", "Or",
"this", "This",
"that", "That",
"those", "Those",
"these", "These",
'"', '""', "'", "''",
}
def is_good(token):
if token in banned:
return False
elif token[-1] in '?.!':
return False
elif token[0] in '([':
return False
return True
def span_iterator(tokens, ngrams=3, banned=banned):
for i in range(len(tokens)):
if tokens[i] not in banned:
yield (i, i+ngrams)
def extract_spans(text, source, n_samples, min_length=10, max_length=10, temperature=1.0):
source = source.split("||", 1)[0]
query_tokens = source.split()
query_tokens_lower = [t.lower() for t in query_tokens]
passage_tokens = text.split()
passage_tokens_lower = [t.lower() for t in passage_tokens]
matches = defaultdict(int)
for i1, _ in enumerate(query_tokens_lower):
j1 = i1+3
str_1 = " ".join(query_tokens_lower[i1:j1])
for (i2, j2) in span_iterator(passage_tokens_lower, 3):
str_2 = " ".join(passage_tokens_lower[i2:j2])
ratio = fuzz.ratio(str_1, str_2) / 100.0
matches[i2] += ratio
if not matches:
indices = [0]
else:
indices, weights = zip(*sorted(matches.items(), key=lambda x: -(x[1])))
weights = list(weights)
sum_weights = float(sum([0] + weights))
if sum_weights == 0.0 or not weights:
indices = [0]
weights = [1.0]
else:
weights = [math.exp(float(w) / temperature) for w in weights]
Z = sum(weights)
weights = [w / Z for w in weights]
indices = random.choices(indices, weights=weights, k=n_samples)
spans = []
for i in indices:
subspan_size = random.randint(min_length, max_length)
span = " ".join(passage_tokens[i:i+subspan_size])
spans.append(span)
return spans
def preprocess_file(
pid,
text,
title,
querys,
num_samples,
num_title_samples,
num_query_samples,
format,
delimiter,
min_length_input,
max_length_input,
min_length_output,
max_length_output,
full_doc_n,
mark_pretraining
):
if format == 'kilt':
raise NotImplementedError
elif format == 'dpr':
try:
text = text
title = title
sampled = 0
select_query = sample(querys, 1)[0]
a = select_query+ " || body"
if mark_pretraining:
a += " || p"
spans = extract_spans(text, a, num_samples, min_length=10, max_length=10, temperature=1.0)
while sampled < min(num_samples, len(spans)):
b = spans[sampled]
yield a, b
sampled += 1
sampled = 0
select_querys = sample(querys*num_title_samples, num_title_samples)
while sampled < num_title_samples:
a=select_querys[sampled]+ " || title"
if mark_pretraining:
a += " || p"
b = title.strip() + " " + delimiter
yield a, b
sampled += 1
if len(querys)>1:
sampled = 0
select_querys = sample(querys*num_query_samples, num_query_samples)
while sampled < num_query_samples:
a = select_querys[sampled]+ " || query"
if mark_pretraining:
a += " || p"
num=0
while select_querys[sampled]==querys[num]:
num+=1
if(num>=len(querys)):
num-=1
break
b = querys[num].strip()
yield a, b
sampled += 1
except Exception as e:
print(e)
else:
raise ValueError
def preprocess_file_wrapper(args):
return list(preprocess_file(*args))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('source')
parser.add_argument('target')
parser.add_argument('--delim', default="@@")
parser.add_argument('--format', choices=['kilt', 'dpr'], default='dpr')
parser.add_argument('--min_length_input', type=int, default=10)
parser.add_argument('--max_length_input', type=int, default=10)
parser.add_argument('--min_length_output', type=int, default=10)
parser.add_argument('--max_length_output', type=int, default=10)
parser.add_argument('--num_samples', type=int, default=10)
parser.add_argument('--num_title_samples', type=int, default=3)
parser.add_argument('--num_query_samples', type=int, default=5)
parser.add_argument('--full_doc_n', type=int, default=1)
parser.add_argument('--mark_pretraining', action="store_true")
parser.add_argument('--pid2query', default=None, type=str)
parser.add_argument("--jobs", type=int, default=20)
return parser.parse_args()
def data_read(f, pid2query):
for pid, text, title in tqdm.tqdm(f):
querys = pid2query[str(pid)]
if text == title:
continue
if len(querys)<2:
continue
yield pid, text, title, querys
def main():
args = parse_args()
with open(args.source, 'w', 2 ** 20) as src, open(args.target, 'w', 2 ** 20) as tgt:
if args.pid2query:
with open(args.pid2query, 'rb') as f:
pid2query = pickle.load(f)
with open(args.input, 'r', 2 ** 20) as f:
next(f)
f = csv.reader(f, delimiter='\t', quotechar='"')
data = data_read(f, pid2query)
arg_it = ((pid, text, title, querys, args.num_samples, args.num_title_samples, args.num_query_samples, args.format, args.delim,
args.min_length_input,args.max_length_input,
args.min_length_output, args.max_length_output, args.full_doc_n, args.mark_pretraining ) for pid, text, title, querys in data)
with multiprocessing.Pool(args.jobs) as pool:
for sts in pool.imap(preprocess_file_wrapper, arg_it):
for s, t in sts:
if random.random() < 0.1:
s = s.lower()
s = " " + s
t = " " + t
src.write(s + '\n')
tgt.write(t + '\n')
if __name__ == '__main__':
main()
| 7,171 | 30.594714 | 140 | py |
MINDER | MINDER-main/scripts/training/make_generated_dataset_for_mamarco.py | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import csv
import random
import tqdm
import pickle
from nltk.corpus import stopwords
from random import sample
from fuzzywuzzy import fuzz
from collections import defaultdict
import math
from datasets import load_dataset
import multiprocessing
banned = {
"the", "The",
"to",
"a", "A", "an", "An",
"he", "He", "his", "His", "him", "He's",
"she", "She", "her", "Her", "she's", "She's",
"it", "It", "its", "Its", "it's", "It's",
"and", "And",
"or", "Or",
"this", "This",
"that", "That",
"those", "Those",
"these", "These",
'"', '""', "'", "''",
}
def is_good(token):
if token in banned:
return False
elif token[-1] in '?.!':
return False
elif token[0] in '([':
return False
return True
def span_iterator(tokens, ngrams=3, banned=banned):
for i in range(len(tokens)):
if tokens[i] not in banned:
yield (i, i+ngrams)
def extract_spans(text, source, n_samples, min_length=10, max_length=10, temperature=1.0):
source = source.split("||", 1)[0]
query_tokens = source.split()
query_tokens_lower = [t.lower() for t in query_tokens]
passage_tokens = text.split()
passage_tokens_lower = [t.lower() for t in passage_tokens]
matches = defaultdict(int)
for i1, _ in enumerate(query_tokens_lower):
j1 = i1+3
str_1 = " ".join(query_tokens_lower[i1:j1])
for (i2, j2) in span_iterator(passage_tokens_lower, 3):
str_2 = " ".join(passage_tokens_lower[i2:j2])
ratio = fuzz.ratio(str_1, str_2) / 100.0
matches[i2] += ratio
if not matches:
indices = [0]
else:
indices, weights = zip(*sorted(matches.items(), key=lambda x: -(x[1])))
weights = list(weights)
sum_weights = float(sum([0] + weights))
if sum_weights == 0.0 or not weights:
indices = [0]
weights = [1.0]
else:
weights = [math.exp(float(w) / temperature) for w in weights]
Z = sum(weights)
weights = [w / Z for w in weights]
indices = random.choices(indices, weights=weights, k=n_samples)
spans = []
for i in indices:
subspan_size = random.randint(min_length, max_length)
span = " ".join(passage_tokens[i:i+subspan_size])
spans.append(span)
return spans
def preprocess_file(
pid,
text,
title,
querys,
num_samples,
num_title_samples,
num_query_samples,
format,
delimiter,
min_length_input,
max_length_input,
min_length_output,
max_length_output,
full_doc_n,
mark_pretraining
):
if format == 'kilt':
raise NotImplementedError
elif format == 'dpr':
try:
text = text
title = title
sampled = 0
select_query = sample(querys, 1)[0]
a = select_query+ " || body"
if mark_pretraining:
a += " || p"
spans = extract_spans(text, a, num_samples, min_length=10, max_length=10, temperature=1.0)
while sampled < min(num_samples, len(spans)):
b = spans[sampled]
yield a, b
sampled += 1
sampled = 0
select_querys = sample(querys*num_title_samples, num_title_samples)
if title != "-":
while sampled < num_title_samples:
a=select_querys[sampled]+ " || title"
if mark_pretraining:
a += " || p"
b = title.strip() + " " + delimiter
yield a, b
sampled += 1
if len(querys)>1:
sampled = 0
select_querys = sample(querys*num_query_samples, num_query_samples)
while sampled < num_query_samples:
a = select_querys[sampled]+ " || query"
if mark_pretraining:
a += " || p"
num=0
while select_querys[sampled]==querys[num]:
num+=1
if(num>=len(querys)):
num-=1
break
b = querys[num].strip()
yield a, b
sampled += 1
except Exception as e:
print(e)
else:
raise ValueError
def preprocess_file_wrapper(args):
return list(preprocess_file(*args))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('source')
parser.add_argument('target')
parser.add_argument('--delim', default="@@")
parser.add_argument('--format', choices=['kilt', 'dpr'], default='dpr')
parser.add_argument('--min_length_input', type=int, default=10)
parser.add_argument('--max_length_input', type=int, default=10)
parser.add_argument('--min_length_output', type=int, default=10)
parser.add_argument('--max_length_output', type=int, default=10)
parser.add_argument('--num_samples', type=int, default=10)
parser.add_argument('--num_title_samples', type=int, default=3)
parser.add_argument('--num_query_samples', type=int, default=5)
parser.add_argument('--full_doc_n', type=int, default=1)
parser.add_argument('--mark_pretraining', action="store_true")
parser.add_argument('--pid2query', default=None, type=str)
parser.add_argument("--jobs", type=int, default=20)
return parser.parse_args()
def data_read(f, pid2query):
for entry in tqdm.tqdm(f):
pid, text, title = entry['docid'], entry['text'], entry['title']
querys = pid2query[str(pid)]
if text == title:
continue
if len(querys)<2:
continue
yield pid, text, title, querys
def main():
args = parse_args()
with open(args.source, 'w', 2 ** 20) as src, open(args.target, 'w', 2 ** 20) as tgt:
if args.pid2query:
with open(args.pid2query, 'rb') as f:
pid2query = pickle.load(f)
passage_corpus = load_dataset(args.input, split="train")
data = data_read(passage_corpus, pid2query)
arg_it = ((pid, text, title, querys, args.num_samples, args.num_title_samples, args.num_query_samples, args.format, args.delim,
args.min_length_input,args.max_length_input,
args.min_length_output, args.max_length_output, args.full_doc_n, args.mark_pretraining ) for pid, text, title, querys in data)
with multiprocessing.Pool(args.jobs) as pool:
for sts in pool.imap(preprocess_file_wrapper, arg_it):
for s, t in sts:
if random.random() < 0.1:
s = s.lower()
s = " " + s
t = " " + t
src.write(s + '\n')
tgt.write(t + '\n')
if __name__ == '__main__':
main()
| 7,234 | 30.872247 | 136 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.