repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
deephyper | deephyper-master/deephyper/skopt/acquisition.py | import numpy as np
import warnings
from scipy.stats import norm
def gaussian_acquisition_1D(
X, model, y_opt=None, acq_func="LCB", acq_func_kwargs=None, return_grad=True
):
"""
A wrapper around the acquisition function that is called by fmin_l_bfgs_b.
This is because lbfgs allows only 1-D input.
"""
return _gaussian_acquisition(
np.expand_dims(X, axis=0),
model,
y_opt,
acq_func=acq_func,
acq_func_kwargs=acq_func_kwargs,
return_grad=return_grad,
)
def _gaussian_acquisition(
X, model, y_opt=None, acq_func="LCB", return_grad=False, acq_func_kwargs=None
):
"""
Wrapper so that the output of this function can be
directly passed to a minimizer.
"""
# Check inputs
X = np.asarray(X)
if X.ndim != 2:
raise ValueError(
"X is {}-dimensional, however," " it must be 2-dimensional.".format(X.ndim)
)
if acq_func_kwargs is None:
acq_func_kwargs = dict()
xi = acq_func_kwargs.get("xi", 0.01)
kappa = acq_func_kwargs.get("kappa", 1.96)
# Evaluate acquisition function
per_second = acq_func.endswith("ps")
if per_second:
model, time_model = model.estimators_
if acq_func == "LCB":
func_and_grad = gaussian_lcb(X, model, kappa, return_grad)
if return_grad:
acq_vals, acq_grad = func_and_grad
else:
acq_vals = func_and_grad
elif acq_func in ["EI", "PI", "EIps", "PIps"]:
if acq_func in ["EI", "EIps"]:
func_and_grad = gaussian_ei(X, model, y_opt, xi, return_grad)
else:
func_and_grad = gaussian_pi(X, model, y_opt, xi, return_grad)
if return_grad:
acq_vals = -func_and_grad[0]
acq_grad = -func_and_grad[1]
else:
acq_vals = -func_and_grad
if acq_func in ["EIps", "PIps"]:
if return_grad:
mu, std, mu_grad, std_grad = time_model.predict(
X, return_std=True, return_mean_grad=True, return_std_grad=True
)
else:
mu, std = time_model.predict(X, return_std=True)
# acq = acq / E(t)
inv_t = np.exp(-mu + 0.5 * std**2)
acq_vals *= inv_t
# grad = d(acq_func) * inv_t + (acq_vals *d(inv_t))
# inv_t = exp(g)
# d(inv_t) = inv_t * grad(g)
# d(inv_t) = inv_t * (-mu_grad + std * std_grad)
if return_grad:
acq_grad *= inv_t
acq_grad += acq_vals * (-mu_grad + std * std_grad)
else:
raise ValueError("Acquisition function not implemented.")
if return_grad:
return acq_vals, acq_grad
return acq_vals
def gaussian_lcb(X, model, kappa=1.96, return_grad=False):
"""
Use the lower confidence bound to estimate the acquisition
values.
The trade-off between exploitation and exploration is left to
be controlled by the user through the parameter ``kappa``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Values where the acquisition function should be computed.
model : sklearn estimator that implements predict with ``return_std``
The fit estimator that approximates the function through the
method ``predict``.
It should have a ``return_std`` parameter that returns the standard
deviation.
kappa : float, default 1.96 or 'inf'
Controls how much of the variance in the predicted values should be
taken into account. If set to be very high, then we are favouring
exploration over exploitation and vice versa.
If set to 'inf', the acquisition function will only use the variance
which is useful in a pure exploration setting.
Useless if ``method`` is not set to "LCB".
return_grad : boolean, optional
Whether or not to return the grad. Implemented only for the case where
``X`` is a single sample.
Returns
-------
values : array-like, shape (X.shape[0],)
Acquisition function values computed at X.
grad : array-like, shape (n_samples, n_features)
Gradient at X.
"""
# Compute posterior.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if return_grad:
mu, std, mu_grad, std_grad = model.predict(
X, return_std=True, return_mean_grad=True, return_std_grad=True
)
if kappa == "inf":
return -std, -std_grad
return mu - kappa * std, mu_grad - kappa * std_grad
else:
mu, std = model.predict(X, return_std=True)
if kappa == "inf":
return -std
return mu - kappa * std
def gaussian_pi(X, model, y_opt=0.0, xi=0.01, return_grad=False):
"""
Use the probability of improvement to calculate the acquisition values.
The conditional probability `P(y=f(x) | x)` form a gaussian with a
certain mean and standard deviation approximated by the model.
The PI condition is derived by computing ``E[u(f(x))]``
where ``u(f(x)) = 1``, if ``f(x) < y_opt`` and ``u(f(x)) = 0``,
if``f(x) > y_opt``.
This means that the PI condition does not care about how "better" the
predictions are than the previous values, since it gives an equal reward
to all of them.
Note that the value returned by this function should be maximized to
obtain the ``X`` with maximum improvement.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Values where the acquisition function should be computed.
model : sklearn estimator that implements predict with ``return_std``
The fit estimator that approximates the function through the
method ``predict``.
It should have a ``return_std`` parameter that returns the standard
deviation.
y_opt : float, default 0
Previous minimum value which we would like to improve upon.
xi : float, default=0.01
Controls how much improvement one wants over the previous best
values. Useful only when ``method`` is set to "EI"
return_grad : boolean, optional
Whether or not to return the grad. Implemented only for the case where
``X`` is a single sample.
Returns
-------
values : [array-like, shape=(X.shape[0],)
Acquisition function values computed at X.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if return_grad:
mu, std, mu_grad, std_grad = model.predict(
X, return_std=True, return_mean_grad=True, return_std_grad=True
)
else:
mu, std = model.predict(X, return_std=True)
# check dimensionality of mu, std so we can divide them below
if (mu.ndim != 1) or (std.ndim != 1):
raise ValueError(
"mu and std are {}-dimensional and {}-dimensional, "
"however both must be 1-dimensional. Did you train "
"your model with an (N, 1) vector instead of an "
"(N,) vector?".format(mu.ndim, std.ndim)
)
values = np.zeros_like(mu)
mask = std > 0
improve = y_opt - xi - mu[mask]
scaled = improve / std[mask]
values[mask] = norm.cdf(scaled)
if return_grad:
if not np.all(mask):
return values, np.zeros_like(std_grad)
# Substitute (y_opt - xi - mu) / sigma = t and apply chain rule.
# improve_grad is the gradient of t wrt x.
improve_grad = -mu_grad * std - std_grad * improve
improve_grad /= std**2
return values, improve_grad * norm.pdf(scaled)
return values
def gaussian_ei(X, model, y_opt=0.0, xi=0.01, return_grad=False):
"""
Use the expected improvement to calculate the acquisition values.
The conditional probability `P(y=f(x) | x)` form a gaussian with a certain
mean and standard deviation approximated by the model.
The EI condition is derived by computing ``E[u(f(x))]``
where ``u(f(x)) = 0``, if ``f(x) > y_opt`` and ``u(f(x)) = y_opt - f(x)``,
if``f(x) < y_opt``.
This solves one of the issues of the PI condition by giving a reward
proportional to the amount of improvement got.
Note that the value returned by this function should be maximized to
obtain the ``X`` with maximum improvement.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Values where the acquisition function should be computed.
model : sklearn estimator that implements predict with ``return_std``
The fit estimator that approximates the function through the
method ``predict``.
It should have a ``return_std`` parameter that returns the standard
deviation.
y_opt : float, default 0
Previous minimum value which we would like to improve upon.
xi : float, default=0.01
Controls how much improvement one wants over the previous best
values. Useful only when ``method`` is set to "EI"
return_grad : boolean, optional
Whether or not to return the grad. Implemented only for the case where
``X`` is a single sample.
Returns
-------
values : array-like, shape=(X.shape[0],)
Acquisition function values computed at X.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if return_grad:
mu, std, mu_grad, std_grad = model.predict(
X, return_std=True, return_mean_grad=True, return_std_grad=True
)
else:
mu, std = model.predict(X, return_std=True)
# check dimensionality of mu, std so we can divide them below
if (mu.ndim != 1) or (std.ndim != 1):
raise ValueError(
"mu and std are {}-dimensional and {}-dimensional, "
"however both must be 1-dimensional. Did you train "
"your model with an (N, 1) vector instead of an "
"(N,) vector?".format(mu.ndim, std.ndim)
)
values = np.zeros_like(mu)
mask = std > 0
improve = y_opt - xi - mu[mask]
scaled = improve / std[mask]
cdf = norm.cdf(scaled)
pdf = norm.pdf(scaled)
exploit = improve * cdf
explore = std[mask] * pdf
values[mask] = exploit + explore
if return_grad:
if not np.all(mask):
return values, np.zeros_like(std_grad)
# Substitute (y_opt - xi - mu) / sigma = t and apply chain rule.
# improve_grad is the gradient of t wrt x.
improve_grad = -mu_grad * std - std_grad * improve
improve_grad /= std**2
cdf_grad = improve_grad * pdf
pdf_grad = -improve * cdf_grad
exploit_grad = -mu_grad * cdf - pdf_grad
explore_grad = std_grad * pdf + pdf_grad
grad = exploit_grad + explore_grad
return values, grad
return values
| 10,975 | 32.160121 | 87 | py |
deephyper | deephyper-master/deephyper/skopt/benchmarks.py | # -*- coding: utf-8 -*-
"""A collection of benchmark problems."""
import numpy as np
def bench1(x):
"""A benchmark function for test purposes.
f(x) = x ** 2
It has a single minima with f(x*) = 0 at x* = 0.
"""
return x[0] ** 2
def bench1_with_time(x):
"""Same as bench1 but returns the computation time (constant)."""
return x[0] ** 2, 2.22
def bench2(x):
"""A benchmark function for test purposes.
f(x) = x ** 2 if x < 0
(x-5) ** 2 - 5 otherwise.
It has a global minima with f(x*) = -5 at x* = 5.
"""
if x[0] < 0:
return x[0] ** 2
else:
return (x[0] - 5) ** 2 - 5
def bench3(x):
"""A benchmark function for test purposes.
f(x) = sin(5*x) * (1 - tanh(x ** 2))
It has a global minima with f(x*) ~= -0.9 at x* ~= -0.3.
"""
return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2))
def bench4(x):
"""A benchmark function for test purposes.
f(x) = float(x) ** 2
where x is a string. It has a single minima with f(x*) = 0 at x* = "0".
This benchmark is used for checking support of categorical variables.
"""
return float(x[0]) ** 2
def bench5(x):
"""A benchmark function for test purposes.
f(x) = float(x[0]) ** 2 + x[1] ** 2
where x is a string. It has a single minima with f(x) = 0 at x[0] = "0"
and x[1] = "0"
This benchmark is used for checking support of mixed spaces.
"""
return float(x[0]) ** 2 + x[1] ** 2
def branin(
x, a=1, b=5.1 / (4 * np.pi**2), c=5.0 / np.pi, r=6, s=10, t=1.0 / (8 * np.pi)
):
"""Branin-Hoo function is defined on the square
:math:`x1 \\in [-5, 10], x2 \\in [0, 15]`.
It has three minima with f(x*) = 0.397887 at x* = (-pi, 12.275),
(+pi, 2.275), and (9.42478, 2.475).
More details: <http://www.sfu.ca/~ssurjano/branin.html>
"""
return (
a * (x[1] - b * x[0] ** 2 + c * x[0] - r) ** 2 + s * (1 - t) * np.cos(x[0]) + s
)
def hart6(
x,
alpha=np.asarray([1.0, 1.2, 3.0, 3.2]),
P=10**-4
* np.asarray(
[
[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381],
]
),
A=np.asarray(
[
[10, 3, 17, 3.50, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14],
]
),
):
"""The six dimensional Hartmann function is defined on the unit hypercube.
It has six local minima and one global minimum f(x*) = -3.32237 at
x* = (0.20169, 0.15001, 0.476874, 0.275332, 0.311652, 0.6573).
More details: <http://www.sfu.ca/~ssurjano/hart6.html>
"""
return -np.sum(alpha * np.exp(-np.sum(A * (np.array(x) - P) ** 2, axis=1)))
| 2,888 | 24.342105 | 87 | py |
deephyper | deephyper-master/deephyper/skopt/searchcv.py | import warnings
import numpy as np
from scipy.stats import rankdata
from sklearn.model_selection._search import BaseSearchCV
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_is_fitted
from . import Optimizer
from .utils import point_asdict, dimensions_aslist, eval_callbacks
from .space import check_dimension
from .callbacks import check_callback
class BayesSearchCV(BaseSearchCV):
"""Bayesian optimization over hyper parameters.
BayesSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
Parameters are presented as a list of deephyper.skopt.space.Dimension objects.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each search point.
This object is assumed to implement the scikit-learn estimator api.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
search_spaces : dict, list of dict or list of tuple containing (dict, int).
One of these cases:
1. dictionary, where keys are parameter names (strings)
and values are deephyper.skopt.space.Dimension instances (Real, Integer
or Categorical) or any other valid value that defines skopt
dimension (see deephyper.skopt.Optimizer docs). Represents search space
over parameters of the provided estimator.
2. list of dictionaries: a list of dictionaries, where every
dictionary fits the description given in case 1 above.
If a list of dictionary objects is given, then the search is
performed sequentially for every parameter space with maximum
number of evaluations set to self.n_iter.
3. list of (dict, int > 0): an extension of case 2 above,
where first element of every tuple is a dictionary representing
some search subspace, similarly as in case 2, and second element
is a number of iterations that will be spent optimizing over
this subspace.
n_iter : int, default=50
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution. Consider increasing
``n_points`` if you want to try more parameter settings in
parallel.
optimizer_kwargs : dict, optional
Dict of arguments passed to :class:`Optimizer`. For example,
``{'base_estimator': 'RF'}`` would use a Random Forest surrogate
instead of the default Gaussian Process.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel. At maximum there are
``n_points`` times ``cv`` jobs available during each iteration.
n_points : int, default=1
Number of parameter settings to sample in parallel. If this does
not align with ``n_iter``, the last iteration will sample less
points. See also :func:`~Optimizer.ask`
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : boolean, default=False
If ``'True'``, the ``cv_results_`` attribute will include training
scores.
Examples
--------
>>> from deephyper.skopt import BayesSearchCV
>>> # parameter ranges are specified by one of below
>>> from deephyper.skopt.space import Real, Categorical, Integer
>>>
>>> from sklearn.datasets import load_iris
>>> from sklearn.svm import SVC
>>> from sklearn.model_selection import train_test_split
>>>
>>> X, y = load_iris(True)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... train_size=0.75,
... random_state=0)
>>>
>>> # log-uniform: understand as search over p = exp(x) by varying x
>>> opt = BayesSearchCV(
... SVC(),
... {
... 'C': Real(1e-6, 1e+6, prior='log-uniform'),
... 'gamma': Real(1e-6, 1e+1, prior='log-uniform'),
... 'degree': Integer(1,8),
... 'kernel': Categorical(['linear', 'poly', 'rbf']),
... },
... n_iter=32,
... random_state=0
... )
>>>
>>> # executes bayesian optimization
>>> _ = opt.fit(X_train, y_train)
>>>
>>> # model can be saved, used for predictions or scoring
>>> print(opt.score(X_test, y_test))
0.973...
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+--------------+-------------+-------------------+---+---------------+
| param_kernel | param_gamma | split0_test_score |...|rank_test_score|
+==============+=============+===================+===+===============+
| 'rbf' | 0.1 | 0.8 |...| 2 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.2 | 0.9 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.3 | 0.7 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'split0_test_score' : [0.8, 0.9, 0.7],
'split1_test_score' : [0.82, 0.5, 0.7],
'mean_test_score' : [0.81, 0.7, 0.7],
'std_test_score' : [0.02, 0.2, 0.],
'rank_test_score' : [3, 1, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
optimizer_results_ : list of `OptimizeResult`
Contains a `OptimizeResult` for each search space. The search space
parameter are sorted by its name.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
"""
def __init__(
self,
estimator,
search_spaces,
optimizer_kwargs=None,
n_iter=50,
scoring=None,
fit_params=None,
n_jobs=1,
n_points=1,
iid="deprecated",
refit=True,
cv=None,
verbose=0,
pre_dispatch="2*n_jobs",
random_state=None,
error_score="raise",
return_train_score=False,
):
self.search_spaces = search_spaces
self.n_iter = n_iter
self.n_points = n_points
self.random_state = random_state
self.optimizer_kwargs = optimizer_kwargs
self._check_search_space(self.search_spaces)
# Temporary fix for compatibility with sklearn 0.20 and 0.21
# See scikit-optimize#762
# To be consistent with sklearn 0.21+, fit_params should be deprecated
# in the constructor and be passed in ``fit``.
self.fit_params = fit_params
if iid != "deprecated":
warnings.warn(
"The `iid` parameter has been deprecated " "and will be ignored."
)
self.iid = iid # For sklearn repr pprint
super(BayesSearchCV, self).__init__(
estimator=estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
error_score=error_score,
return_train_score=return_train_score,
)
def _check_search_space(self, search_space):
"""Checks whether the search space argument is correct"""
if len(search_space) == 0:
raise ValueError(
"The search_spaces parameter should contain at least one"
"non-empty search space, got %s" % search_space
)
# check if space is a single dict, convert to list if so
if isinstance(search_space, dict):
search_space = [search_space]
# check if the structure of the space is proper
if isinstance(search_space, list):
# convert to just a list of dicts
dicts_only = []
# 1. check the case when a tuple of space, n_iter is provided
for elem in search_space:
if isinstance(elem, tuple):
if len(elem) != 2:
raise ValueError(
"All tuples in list of search spaces should have"
"length 2, and contain (dict, int), got %s" % elem
)
subspace, n_iter = elem
if (not isinstance(n_iter, int)) or n_iter < 0:
raise ValueError(
"Number of iterations in search space should be"
"positive integer, got %s in tuple %s " % (n_iter, elem)
)
# save subspaces here for further checking
dicts_only.append(subspace)
elif isinstance(elem, dict):
dicts_only.append(elem)
else:
raise TypeError(
"A search space should be provided as a dict or"
"tuple (dict, int), got %s" % elem
)
# 2. check all the dicts for correctness of contents
for subspace in dicts_only:
for k, v in subspace.items():
check_dimension(v)
else:
raise TypeError(
"Search space should be provided as a dict or list of dict,"
"got %s" % search_space
)
@property
def optimizer_results_(self):
check_is_fitted(self, "_optim_results")
return self._optim_results
def _make_optimizer(self, params_space):
"""Instantiate skopt Optimizer class.
Parameters
----------
params_space : dict
Represents parameter search space. The keys are parameter
names (strings) and values are deephyper.skopt.space.Dimension instances,
one of Real, Integer or Categorical.
Returns
-------
optimizer: Instance of the `Optimizer` class used for for search
in some parameter space.
"""
kwargs = self.optimizer_kwargs_.copy()
kwargs["dimensions"] = dimensions_aslist(params_space)
optimizer = Optimizer(**kwargs)
for i in range(len(optimizer.space.dimensions)):
if optimizer.space.dimensions[i].name is not None:
continue
optimizer.space.dimensions[i].name = list(sorted(params_space.keys()))[i]
return optimizer
def _step(self, search_space, optimizer, evaluate_candidates, n_points=1):
"""Generate n_jobs parameters and evaluate them in parallel."""
# get parameter values to evaluate
params = optimizer.ask(n_points=n_points)
# convert parameters to python native types
params = [[np.array(v).item() for v in p] for p in params]
# make lists into dictionaries
params_dict = [point_asdict(search_space, p) for p in params]
all_results = evaluate_candidates(params_dict)
# Feed the point and objective value back into optimizer
# Optimizer minimizes objective, hence provide negative score
local_results = all_results["mean_test_score"][-len(params) :]
return optimizer.tell(params, [-score for score in local_results])
@property
def total_iterations(self):
"""
Count total iterations that will be taken to explore
all subspaces with `fit` method.
Returns
-------
max_iter: int, total number of iterations to explore
"""
total_iter = 0
for elem in self.search_spaces:
if isinstance(elem, tuple):
space, n_iter = elem
else:
n_iter = self.n_iter
total_iter += n_iter
return total_iter
# TODO: Accept callbacks via the constructor?
def fit(self, X, y=None, *, groups=None, callback=None, **fit_params):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_output]
Target relative to X for classification or regression (class
labels should be integers or strings).
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
callback: [callable, list of callables, optional]
If callable then `callback(res)` is called after each parameter
combination tested. If list of callables, then each callable in
the list is called.
"""
self._callbacks = check_callback(callback)
if self.optimizer_kwargs is None:
self.optimizer_kwargs_ = {}
else:
self.optimizer_kwargs_ = dict(self.optimizer_kwargs)
super().fit(X=X, y=y, groups=groups, **fit_params)
# BaseSearchCV never ranked train scores,
# but apparently we used to ship this (back-compat)
if self.return_train_score:
self.cv_results_["rank_train_score"] = rankdata(
-np.array(self.cv_results_["mean_train_score"]), method="min"
).astype(int)
return self
def _run_search(self, evaluate_candidates):
# check if space is a single dict, convert to list if so
search_spaces = self.search_spaces
if isinstance(search_spaces, dict):
search_spaces = [search_spaces]
callbacks = self._callbacks
random_state = check_random_state(self.random_state)
self.optimizer_kwargs_["random_state"] = random_state
# Instantiate optimizers for all the search spaces.
optimizers = []
for search_space in search_spaces:
if isinstance(search_space, tuple):
search_space = search_space[0]
optimizers.append(self._make_optimizer(search_space))
self.optimizers_ = optimizers # will save the states of the optimizers
self._optim_results = []
n_points = self.n_points
for search_space, optimizer in zip(search_spaces, optimizers):
# if not provided with search subspace, n_iter is taken as
# self.n_iter
if isinstance(search_space, tuple):
search_space, n_iter = search_space
else:
n_iter = self.n_iter
# do the optimization for particular search space
while n_iter > 0:
# when n_iter < n_points points left for evaluation
n_points_adjusted = min(n_iter, n_points)
optim_result = self._step(
search_space,
optimizer,
evaluate_candidates,
n_points=n_points_adjusted,
)
n_iter -= n_points
if eval_callbacks(callbacks, optim_result):
break
self._optim_results.append(optim_result)
| 20,969 | 38.269663 | 85 | py |
deephyper | deephyper-master/deephyper/skopt/utils.py | from copy import deepcopy
from functools import wraps
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize as sp_minimize
from sklearn.base import is_regressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import make_pipeline
from joblib import dump as dump_
from joblib import load as load_
from collections import OrderedDict
from .learning import ExtraTreesRegressor
from .learning import GaussianProcessRegressor
from .learning import GradientBoostingQuantileRegressor
from .learning import RandomForestRegressor
from .learning.gaussian_process.kernels import ConstantKernel
from .learning.gaussian_process.kernels import HammingKernel
from .learning.gaussian_process.kernels import Matern
from .sampler import Sobol, Lhs, Hammersly, Halton, Grid
from .sampler import InitialPointGenerator
from .space import Space, Dimension
# Try to import Mondrian Forest
MF_INSTALLED = False
try:
from .learning import MondrianForestRegressor
MF_INSTALLED = True
except ImportError:
MF_INSTALLED = False
__all__ = (
"load",
"dump",
)
def create_result(Xi, yi, space=None, rng=None, specs=None, models=None):
"""
Initialize an `OptimizeResult` object.
Parameters
----------
Xi : list of lists, shape (n_iters, n_features)
Location of the minimum at every iteration.
yi : array-like, shape (n_iters,)
Minimum value obtained at every iteration.
space : Space instance, optional
Search space.
rng : RandomState instance, optional
State of the random state.
specs : dict, optional
Call specifications.
models : list, optional
List of fit surrogate models.
Returns
-------
res : `OptimizeResult`, scipy object
OptimizeResult instance with the required information.
"""
res = OptimizeResult()
yi = np.asarray(yi)
if np.ndim(yi) == 2:
res.log_time = np.ravel(yi[:, 1])
yi = np.ravel(yi[:, 0])
best = np.argmin(yi)
res.x = Xi[best]
res.fun = yi[best]
res.func_vals = yi
res.x_iters = Xi
res.models = models
res.space = space
res.random_state = rng
res.specs = specs
return res
def eval_callbacks(callbacks, result):
"""Evaluate list of callbacks on result.
The return values of the `callbacks` are ORed together to give the
overall decision on whether or not the optimization procedure should
continue.
Parameters
----------
callbacks : list of callables
Callbacks to evaluate.
result : `OptimizeResult`, scipy object
Optimization result object to be stored.
Returns
-------
decision : bool
Decision of the callbacks whether or not to keep optimizing
"""
stop = False
if callbacks:
for c in callbacks:
decision = c(result)
if decision is not None:
stop = stop or decision
return stop
def dump(res, filename, store_objective=True, **kwargs):
"""
Store an skopt optimization result into a file.
Parameters
----------
res : `OptimizeResult`, scipy object
Optimization result object to be stored.
filename : string or `pathlib.Path`
The path of the file in which it is to be stored. The compression
method corresponding to one of the supported filename extensions ('.z',
'.gz', '.bz2', '.xz' or '.lzma') will be used automatically.
store_objective : boolean, default=True
Whether the objective function should be stored. Set `store_objective`
to `False` if your objective function (`.specs['args']['func']`) is
unserializable (i.e. if an exception is raised when trying to serialize
the optimization result).
Notice that if `store_objective` is set to `False`, a deep copy of the
optimization result is created, potentially leading to performance
problems if `res` is very large. If the objective function is not
critical, one can delete it before calling `deephyper.skopt.dump()` and thus
avoid deep copying of `res`.
**kwargs : other keyword arguments
All other keyword arguments will be passed to `joblib.dump`.
"""
if store_objective:
dump_(res, filename, **kwargs)
elif "func" in res.specs["args"]:
# If the user does not want to store the objective and it is indeed
# present in the provided object, then create a deep copy of it and
# remove the objective function before dumping it with joblib.dump.
res_without_func = deepcopy(res)
del res_without_func.specs["args"]["func"]
dump_(res_without_func, filename, **kwargs)
else:
# If the user does not want to store the objective and it is already
# missing in the provided object, dump it without copying.
dump_(res, filename, **kwargs)
def load(filename, **kwargs):
"""
Reconstruct a skopt optimization result from a file
persisted with deephyper.skopt.dump.
.. note::
Notice that the loaded optimization result can be missing
the objective function (`.specs['args']['func']`) if `deephyper.skopt.dump`
was called with `store_objective=False`.
Parameters
----------
filename : string or `pathlib.Path`
The path of the file from which to load the optimization result.
**kwargs : other keyword arguments
All other keyword arguments will be passed to `joblib.load`.
Returns
-------
res : `OptimizeResult`, scipy object
Reconstructed OptimizeResult instance.
"""
return load_(filename, **kwargs)
def is_listlike(x):
return isinstance(x, (list, tuple))
def is_2Dlistlike(x):
return np.all([is_listlike(xi) for xi in x])
def check_x_in_space(x, space):
if is_2Dlistlike(x):
if not np.all([p in space for p in x]):
raise ValueError("Not all points are within the bounds of" " the space.")
if any([len(p) != len(space.dimensions) for p in x]):
raise ValueError("Not all points have the same dimensions as" " the space.")
elif is_listlike(x):
print(x)
if x not in space:
raise ValueError(
"Point (%s) is not within the bounds of"
" the space (%s)." % (x, space.bounds)
)
if len(x) != len(space.dimensions):
raise ValueError(
"Dimensions of point (%s) and space (%s) do not match"
% (x, space.bounds)
)
def expected_minimum(res, n_random_starts=20, random_state=None):
"""Compute the minimum over the predictions of the last surrogate model.
Uses `expected_minimum_random_sampling` with `n_random_starts` = 100000,
when the space contains any categorical values.
.. note::
The returned minimum may not necessarily be an accurate
prediction of the minimum of the true objective function.
Parameters
----------
res : `OptimizeResult`, scipy object
The optimization result returned by a `skopt` minimizer.
n_random_starts : int, default=20
The number of random starts for the minimization of the surrogate
model.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
x : list
location of the minimum.
fun : float
the surrogate function value at the minimum.
"""
if res.space.is_partly_categorical:
return expected_minimum_random_sampling(
res, n_random_starts=100000, random_state=random_state
)
def func(x):
reg = res.models[-1]
x = res.space.transform(x.reshape(1, -1))
return reg.predict(x.reshape(1, -1))[0]
xs = [res.x]
if n_random_starts > 0:
xs.extend(res.space.rvs(n_random_starts, random_state=random_state))
best_x = None
best_fun = np.inf
for x0 in xs:
r = sp_minimize(func, x0=x0, bounds=res.space.bounds)
if r.fun < best_fun:
best_x = r.x
best_fun = r.fun
return [v for v in best_x], best_fun
def expected_minimum_random_sampling(res, n_random_starts=100000, random_state=None):
"""Minimum search by doing naive random sampling, Returns the parameters
that gave the minimum function value. Can be used when the space
contains any categorical values.
.. note::
The returned minimum may not necessarily be an accurate
prediction of the minimum of the true objective function.
Parameters
----------
res : `OptimizeResult`, scipy object
The optimization result returned by a `skopt` minimizer.
n_random_starts : int, default=100000
The number of random starts for the minimization of the surrogate
model.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
x : list
location of the minimum.
fun : float
the surrogate function value at the minimum.
"""
# sample points from search space
random_samples = res.space.rvs(n_random_starts, random_state=random_state)
# make estimations with surrogate
model = res.models[-1]
y_random = model.predict(res.space.transform(random_samples))
index_best_objective = np.argmin(y_random)
min_x = random_samples[index_best_objective]
return min_x, y_random[index_best_objective]
def has_gradients(estimator):
"""
Check if an estimator's ``predict`` method provides gradients.
Parameters
----------
estimator :
sklearn BaseEstimator instance.
"""
tree_estimators = [
ExtraTreesRegressor,
RandomForestRegressor,
GradientBoostingQuantileRegressor,
]
if MF_INSTALLED:
tree_estimators.append(MondrianForestRegressor)
tree_estimators = tuple(tree_estimators)
# cook_estimator() returns None for "dummy minimize" aka random values only
if estimator is None:
return False
if isinstance(estimator, tree_estimators):
return False
categorical_gp = False
if hasattr(estimator, "kernel"):
params = estimator.get_params()
categorical_gp = isinstance(estimator.kernel, HammingKernel) or any(
[isinstance(params[p], HammingKernel) for p in params]
)
return not categorical_gp
def cook_estimator(base_estimator, space=None, **kwargs):
"""Cook a default estimator.
For the special base_estimator called "DUMMY" the return value is None.
This corresponds to sampling points at random, hence there is no need
for an estimator.
Parameters
----------
base_estimator : "GP", "RF", "ET", "GBRT", "DUMMY" or sklearn regressor
Should inherit from `sklearn.base.RegressorMixin`.
In addition the `predict` method should have an optional `return_std`
argument, which returns `std(Y | x)`` along with `E[Y | x]`.
If base_estimator is one of ["GP", "RF", "ET", "GBRT", "DUMMY"], a
surrogate model corresponding to the relevant `X_minimize` function
is created.
space : Space instance
Has to be provided if the base_estimator is a gaussian process.
Ignored otherwise.
kwargs : dict
Extra parameters provided to the base_estimator at init time.
"""
if isinstance(base_estimator, str):
base_estimator = base_estimator.upper()
if base_estimator not in ["GP", "ET", "RF", "GBRT", "DUMMY"]:
raise ValueError(
"Valid strings for the base_estimator parameter "
" are: 'RF', 'ET', 'GP', 'GBRT' or 'DUMMY' not "
"%s." % base_estimator
)
elif not is_regressor(base_estimator):
raise ValueError("base_estimator has to be a regressor.")
if base_estimator == "GP":
if space is not None:
space = Space(space)
space = Space(normalize_dimensions(space.dimensions))
n_dims = space.transformed_n_dims
is_cat = space.is_categorical
else:
raise ValueError("Expected a Space instance, not None.")
cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
# only special if *all* dimensions are categorical
if is_cat:
other_kernel = HammingKernel(length_scale=np.ones(n_dims))
else:
other_kernel = Matern(
length_scale=np.ones(n_dims),
length_scale_bounds=[(0.01, 100)] * n_dims,
nu=2.5,
)
base_estimator = GaussianProcessRegressor(
kernel=cov_amplitude * other_kernel,
normalize_y=True,
noise="gaussian",
n_restarts_optimizer=2,
)
elif base_estimator == "RF":
base_estimator = RandomForestRegressor(n_estimators=100, min_samples_leaf=3)
elif base_estimator == "ET":
base_estimator = ExtraTreesRegressor(n_estimators=100, min_samples_leaf=3)
elif base_estimator == "GBRT":
gbrt = GradientBoostingRegressor(n_estimators=30, loss="quantile")
base_estimator = GradientBoostingQuantileRegressor(base_estimator=gbrt)
elif base_estimator == "DUMMY":
return None
if ("n_jobs" in kwargs.keys()) and not hasattr(base_estimator, "n_jobs"):
del kwargs["n_jobs"]
base_estimator.set_params(**kwargs)
return base_estimator
def cook_initial_point_generator(generator, **kwargs):
"""Cook a default initial point generator.
For the special generator called "random" the return value is None.
Parameters
----------
generator : "lhs", "sobol", "halton", "hammersly", "grid", "random" \
or InitialPointGenerator instance"
Should inherit from `deephyper.skopt.sampler.InitialPointGenerator`.
kwargs : dict
Extra parameters provided to the generator at init time.
"""
if generator is None:
generator = "random"
elif isinstance(generator, str):
generator = generator.lower()
if generator not in ["sobol", "halton", "hammersly", "lhs", "random", "grid"]:
raise ValueError(
"Valid strings for the generator parameter "
" are: 'sobol', 'lhs', 'halton', 'hammersly',"
"'random', or 'grid' not "
"%s." % generator
)
elif not isinstance(generator, InitialPointGenerator):
raise ValueError(
"generator has to be an InitialPointGenerator."
"Got %s" % (str(type(generator)))
)
if isinstance(generator, str):
if generator == "sobol":
generator = Sobol()
elif generator == "halton":
generator = Halton()
elif generator == "hammersly":
generator = Hammersly()
elif generator == "lhs":
generator = Lhs()
elif generator == "grid":
generator = Grid()
elif generator == "random":
return None
generator.set_params(**kwargs)
return generator
def dimensions_aslist(search_space):
"""Convert a dict representation of a search space into a list of
dimensions, ordered by sorted(search_space.keys()).
Parameters
----------
search_space : dict
Represents search space. The keys are dimension names (strings)
and values are instances of classes that inherit from the class
:class:`deephyper.skopt.space.Dimension` (Real, Integer or Categorical)
Returns
-------
params_space_list: list
list of deephyper.skopt.space.Dimension instances.
Examples
--------
>>> from deephyper.skopt.space.space import Real, Integer
>>> from deephyper.skopt.utils import dimensions_aslist
>>> search_space = {'name1': Real(0,1),
... 'name2': Integer(2,4), 'name3': Real(-1,1)}
>>> dimensions_aslist(search_space)[0]
Real(low=0, high=1, prior='uniform', transform='identity')
>>> dimensions_aslist(search_space)[1]
Integer(low=2, high=4, prior='uniform', transform='identity')
>>> dimensions_aslist(search_space)[2]
Real(low=-1, high=1, prior='uniform', transform='identity')
"""
params_space_list = [search_space[k] for k in sorted(search_space.keys())]
return params_space_list
def point_asdict(search_space, point_as_list):
"""Convert the list representation of a point from a search space
to the dictionary representation, where keys are dimension names
and values are corresponding to the values of dimensions in the list.
.. seealso:: :class:`deephyper.skopt.utils.point_aslist`
Parameters
----------
search_space : dict
Represents search space. The keys are dimension names (strings)
and values are instances of classes that inherit from the class
:class:`deephyper.skopt.space.Dimension` (Real, Integer or Categorical)
point_as_list : list
list with parameter values.The order of parameters in the list
is given by sorted(params_space.keys()).
Returns
-------
params_dict : OrderedDict
dictionary with parameter names as keys to which
corresponding parameter values are assigned.
Examples
--------
>>> from deephyper.skopt.space.space import Real, Integer
>>> from deephyper.skopt.utils import point_asdict
>>> search_space = {'name1': Real(0,1),
... 'name2': Integer(2,4), 'name3': Real(-1,1)}
>>> point_as_list = [0.66, 3, -0.15]
>>> point_asdict(search_space, point_as_list)
OrderedDict([('name1', 0.66), ('name2', 3), ('name3', -0.15)])
"""
params_dict = OrderedDict()
for k, v in zip(sorted(search_space.keys()), point_as_list):
params_dict[k] = v
return params_dict
def point_aslist(search_space, point_as_dict):
"""Convert a dictionary representation of a point from a search space to
the list representation. The list of values is created from the values of
the dictionary, sorted by the names of dimensions used as keys.
.. seealso:: :class:`deephyper.skopt.utils.point_asdict`
Parameters
----------
search_space : dict
Represents search space. The keys are dimension names (strings)
and values are instances of classes that inherit from the class
:class:`deephyper.skopt.space.Dimension` (Real, Integer or Categorical)
point_as_dict : dict
dict with parameter names as keys to which corresponding
parameter values are assigned.
Returns
-------
point_as_list : list
list with point values.The order of
parameters in the list is given by sorted(params_space.keys()).
Examples
--------
>>> from deephyper.skopt.space.space import Real, Integer
>>> from deephyper.skopt.utils import point_aslist
>>> search_space = {'name1': Real(0,1),
... 'name2': Integer(2,4), 'name3': Real(-1,1)}
>>> point_as_dict = {'name1': 0.66, 'name2': 3, 'name3': -0.15}
>>> point_aslist(search_space, point_as_dict)
[0.66, 3, -0.15]
"""
point_as_list = [point_as_dict[k] for k in sorted(search_space.keys())]
return point_as_list
def normalize_dimensions(dimensions):
"""Create a ``Space`` where all dimensions are normalized to unit range.
This is particularly useful for Gaussian process based regressors and is
used internally by ``gp_minimize``.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
NOTE: The upper and lower bounds are inclusive for `Integer`
dimensions.
"""
space = Space(dimensions)
transformed_dimensions = []
for dimension in space.dimensions:
# check if dimension is of a Dimension instance
if isinstance(dimension, Dimension):
# Change the transformer to normalize
# and add it to the new transformed dimensions
dimension.set_transformer("normalize")
transformed_dimensions.append(dimension)
else:
raise RuntimeError("Unknown dimension type " "(%s)" % type(dimension))
return Space(transformed_dimensions)
def check_list_types(x, types):
"""
Check whether all elements of a list `x` are of the correct type(s)
and raise a ValueError if they are not.
Note that `types` can be either a single object-type or a tuple
of object-types.
Raises `ValueError`, If one or more element in the list `x` is
not of the correct type(s).
Parameters
----------
x : list
List of objects.
types : object or list(object)
Either a single object-type or a tuple of object-types.
"""
# List of the elements in the list that are incorrectly typed.
err = list(filter(lambda a: not isinstance(a, types), x))
# If the list is non-empty then raise an exception.
if len(err) > 0:
msg = "All elements in list must be instances of {}, but found: {}"
msg = msg.format(types, err)
raise ValueError(msg)
def check_dimension_names(dimensions):
"""
Check whether all dimensions have names. Raises `ValueError`,
if one or more dimensions are unnamed.
Parameters
----------
dimensions : list(Dimension)
List of Dimension-objects.
"""
# List of the dimensions that have no names.
err_dims = list(filter(lambda dim: dim.name is None, dimensions))
# If the list is non-empty then raise an exception.
if len(err_dims) > 0:
msg = "All dimensions must have names, but found: {}"
msg = msg.format(err_dims)
raise ValueError(msg)
def use_named_args(dimensions):
"""
Wrapper / decorator for an objective function that uses named arguments
to make it compatible with optimizers that use a single list of parameters.
Your objective function can be defined as being callable using named
arguments: `func(foo=123, bar=3.0, baz='hello')` for a search-space
with dimensions named `['foo', 'bar', 'baz']`. But the optimizer
will only pass a single list `x` of unnamed arguments when calling
the objective function: `func(x=[123, 3.0, 'hello'])`. This wrapper
converts your objective function with named arguments into one that
accepts a list as argument, while doing the conversion automatically.
The advantage of this is that you don't have to unpack the list of
arguments `x` yourself, which makes the code easier to read and
also reduces the risk of bugs if you change the number of dimensions
or their order in the search-space.
Examples
--------
>>> # Define the search-space dimensions. They must all have names!
>>> from deephyper.skopt.space import Real
>>> from deephyper.skopt import forest_minimize
>>> from deephyper.skopt.utils import use_named_args
>>> dim1 = Real(name='foo', low=0.0, high=1.0)
>>> dim2 = Real(name='bar', low=0.0, high=1.0)
>>> dim3 = Real(name='baz', low=0.0, high=1.0)
>>>
>>> # Gather the search-space dimensions in a list.
>>> dimensions = [dim1, dim2, dim3]
>>>
>>> # Define the objective function with named arguments
>>> # and use this function-decorator to specify the
>>> # search-space dimensions.
>>> @use_named_args(dimensions=dimensions)
... def my_objective_function(foo, bar, baz):
... return foo ** 2 + bar ** 4 + baz ** 8
>>>
>>> # Not the function is callable from the outside as
>>> # `my_objective_function(x)` where `x` is a list of unnamed arguments,
>>> # which then wraps your objective function that is callable as
>>> # `my_objective_function(foo, bar, baz)`.
>>> # The conversion from a list `x` to named parameters `foo`,
>>> # `bar`, `baz`
>>> # is done automatically.
>>>
>>> # Run the optimizer on the wrapped objective function which is called
>>> # as `my_objective_function(x)` as expected by `forest_minimize()`.
>>> result = forest_minimize(func=my_objective_function,
... dimensions=dimensions,
... n_calls=20, base_estimator="ET",
... random_state=4)
>>>
>>> # Print the best-found results in same format as the expected result.
>>> print("Best fitness: " + str(result.fun))
Best fitness: 0.1948080835239698
>>> print("Best parameters: {}".format(result.x))
Best parameters: [0.44134853091052617, 0.06570954323368307, 0.17586123323419825]
Parameters
----------
dimensions : list(Dimension)
List of `Dimension`-objects for the search-space dimensions.
Returns
-------
wrapped_func : callable
Wrapped objective function.
"""
def decorator(func):
"""
This uses more advanced Python features to wrap `func` using a
function-decorator, which are not explained so well in the
official Python documentation.
A good video tutorial explaining how this works is found here:
https://www.youtube.com/watch?v=KlBPCzcQNU8
Parameters
----------
func : callable
Function to minimize. Should take *named arguments*
and return the objective value.
"""
# Ensure all dimensions are correctly typed.
check_list_types(dimensions, Dimension)
# Ensure all dimensions have names.
check_dimension_names(dimensions)
@wraps(func)
def wrapper(x):
"""
This is the code that will be executed every time the
wrapped / decorated `func` is being called.
It takes `x` as a single list of parameters and
converts them to named arguments and calls `func` with them.
Parameters
----------
x : list
A single list of parameters e.g. `[123, 3.0, 'linear']`
which will be converted to named arguments and passed
to `func`.
Returns
-------
objective_value
The objective value returned by `func`.
"""
# Ensure the number of dimensions match
# the number of parameters in the list x.
if len(x) != len(dimensions):
msg = (
"Mismatch in number of search-space dimensions. "
"len(dimensions)=={} and len(x)=={}"
)
msg = msg.format(len(dimensions), len(x))
raise ValueError(msg)
# Create a dict where the keys are the names of the dimensions
# and the values are taken from the list of parameters x.
arg_dict = {dim.name: value for dim, value in zip(dimensions, x)}
# Call the wrapped objective function with the named arguments.
objective_value = func(**arg_dict)
return objective_value
return wrapper
return decorator
def cook_objective_scaler(scaler, base_estimator):
"""Prepare a Scikit-Learn preprocessing pipeline to map the output objective to a different space."""
scalers = {}
# identity
pipeline = FunctionTransformer(func=lambda x: x, inverse_func=lambda x: x)
scalers["identity"] = pipeline
# minmaxlog
scaler_log = FunctionTransformer(
func=lambda x: np.log(x + 1e-12),
inverse_func=lambda x: np.exp(x) - 1e-12,
check_inverse=True,
)
pipeline = make_pipeline(MinMaxScaler(), scaler_log)
scalers["minmaxlog"] = pipeline
if scaler == "auto":
if isinstance(base_estimator, RandomForestRegressor):
scaler = "minmaxlog"
else:
scaler = "identity"
if type(scaler) is str:
if scaler in scalers:
return scalers[scaler]
else:
raise ValueError(
f"Objective scaler should be a sklearn.pipeline.Pipeline or a value in {list(scalers.keys())}"
)
else:
return scaler
| 28,832 | 32.881316 | 110 | py |
deephyper | deephyper-master/deephyper/skopt/plots.py | # -*- encoding: UTF-8 -*-
"""Plotting functions."""
import sys
import numpy as np
from itertools import count
from functools import partial
from scipy.optimize import OptimizeResult
from .acquisition import _gaussian_acquisition
from deephyper.skopt import expected_minimum, expected_minimum_random_sampling
from .space import Categorical
from collections import Counter
# For plot tests, matplotlib must be set to headless mode early
if "pytest" in sys.modules:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from matplotlib.ticker import LogLocator
from matplotlib.ticker import MaxNLocator, FuncFormatter # noqa: E402
def plot_convergence(*args, **kwargs):
"""Plot one or several convergence traces.
Parameters
----------
args[i] : `OptimizeResult`, list of `OptimizeResult`, or tuple
The result(s) for which to plot the convergence trace.
- if `OptimizeResult`, then draw the corresponding single trace;
- if list of `OptimizeResult`, then draw the corresponding convergence
traces in transparency, along with the average convergence trace;
- if tuple, then `args[i][0]` should be a string label and `args[i][1]`
an `OptimizeResult` or a list of `OptimizeResult`.
ax : `Axes`, optional
The matplotlib axes on which to draw the plot, or `None` to create
a new one.
true_minimum : float, optional
The true minimum value of the function, if known.
yscale : None or string, optional
The scale for the y-axis.
Returns
-------
ax : `Axes`
The matplotlib axes.
"""
# <3 legacy python
ax = kwargs.get("ax", None)
true_minimum = kwargs.get("true_minimum", None)
yscale = kwargs.get("yscale", None)
if ax is None:
ax = plt.gca()
ax.set_title("Convergence plot")
ax.set_xlabel("Number of calls $n$")
ax.set_ylabel(r"$\min f(x)$ after $n$ calls")
ax.grid()
if yscale is not None:
ax.set_yscale(yscale)
colors = cm.viridis(np.linspace(0.25, 1.0, len(args)))
for results, color in zip(args, colors):
if isinstance(results, tuple):
name, results = results
else:
name = None
if isinstance(results, OptimizeResult):
n_calls = len(results.x_iters)
mins = [np.min(results.func_vals[:i]) for i in range(1, n_calls + 1)]
ax.plot(
range(1, n_calls + 1),
mins,
c=color,
marker=".",
markersize=12,
lw=2,
label=name,
)
elif isinstance(results, list):
n_calls = len(results[0].x_iters)
iterations = range(1, n_calls + 1)
mins = [[np.min(r.func_vals[:i]) for i in iterations] for r in results]
for m in mins:
ax.plot(iterations, m, c=color, alpha=0.2)
ax.plot(
iterations,
np.mean(mins, axis=0),
c=color,
marker=".",
markersize=12,
lw=2,
label=name,
)
if true_minimum:
ax.axhline(true_minimum, linestyle="--", color="r", lw=1, label="True minimum")
if true_minimum or name:
ax.legend(loc="best")
return ax
def plot_gaussian_process(res, **kwargs):
"""Plots the optimization results and the gaussian process
for 1-D objective functions.
Parameters
----------
res : `OptimizeResult`
The result for which to plot the gaussian process.
ax : `Axes`, optional
The matplotlib axes on which to draw the plot, or `None` to create
a new one.
n_calls : int, default: -1
Can be used to evaluate the model at call `n_calls`.
objective : func, default: None
Defines the true objective function. Must have one input parameter.
n_points : int, default: 1000
Number of data points used to create the plots
noise_level : float, default: 0
Sets the estimated noise level
show_legend : boolean, default: True
When True, a legend is plotted.
show_title : boolean, default: True
When True, a title containing the found minimum value
is shown
show_acq_func : boolean, default: False
When True, the acquisition function is plotted
show_next_point : boolean, default: False
When True, the next evaluated point is plotted
show_observations : boolean, default: True
When True, observations are plotted as dots.
show_mu : boolean, default: True
When True, the predicted model is shown.
Returns
-------
ax : `Axes`
The matplotlib axes.
"""
ax = kwargs.get("ax", None)
n_calls = kwargs.get("n_calls", -1)
objective = kwargs.get("objective", None)
noise_level = kwargs.get("noise_level", 0)
show_legend = kwargs.get("show_legend", True)
show_title = kwargs.get("show_title", True)
show_acq_func = kwargs.get("show_acq_func", False)
show_next_point = kwargs.get("show_next_point", False)
show_observations = kwargs.get("show_observations", True)
show_mu = kwargs.get("show_mu", True)
n_points = kwargs.get("n_points", 1000)
if ax is None:
ax = plt.gca()
n_dims = res.space.n_dims
assert n_dims == 1, "Space dimension must be 1"
dimension = res.space.dimensions[0]
x, x_model = _evenly_sample(dimension, n_points)
x = x.reshape(-1, 1)
x_model = x_model.reshape(-1, 1)
if res.specs is not None and "args" in res.specs:
n_random = res.specs["args"].get("n_random_starts", None)
acq_func = res.specs["args"].get("acq_func", "EI")
acq_func_kwargs = res.specs["args"].get("acq_func_kwargs", {})
if acq_func_kwargs is None:
acq_func_kwargs = {}
if acq_func is None or acq_func == "gp_hedge":
acq_func = "EI"
if n_random is None:
n_random = len(res.x_iters) - len(res.models)
if objective is not None:
fx = np.array([objective(x_i) for x_i in x])
if n_calls < 0:
model = res.models[-1]
curr_x_iters = res.x_iters
curr_func_vals = res.func_vals
else:
model = res.models[n_calls]
curr_x_iters = res.x_iters[: n_random + n_calls]
curr_func_vals = res.func_vals[: n_random + n_calls]
# Plot true function.
if objective is not None:
ax.plot(x, fx, "r--", label="True (unknown)")
ax.fill(
np.concatenate([x, x[::-1]]),
np.concatenate(
(
[fx_i - 1.9600 * noise_level for fx_i in fx],
[fx_i + 1.9600 * noise_level for fx_i in fx[::-1]],
)
),
alpha=0.2,
fc="r",
ec="None",
)
# Plot GP(x) + contours
if show_mu:
y_pred, sigma = model.predict(x_model, return_std=True)
ax.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
ax.fill(
np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]),
alpha=0.2,
fc="g",
ec="None",
)
# Plot sampled points
if show_observations:
ax.plot(curr_x_iters, curr_func_vals, "r.", markersize=8, label="Observations")
if (show_mu or show_observations or objective is not None) and show_acq_func:
ax_ei = ax.twinx()
ax_ei.set_ylabel(str(acq_func) + "(x)")
plot_both = True
else:
ax_ei = ax
plot_both = False
if show_acq_func:
acq = _gaussian_acquisition(
x_model,
model,
y_opt=np.min(curr_func_vals),
acq_func=acq_func,
acq_func_kwargs=acq_func_kwargs,
)
next_x = x[np.argmin(acq)]
next_acq = acq[np.argmin(acq)]
acq = -acq
next_acq = -next_acq
ax_ei.plot(x, acq, "b", label=str(acq_func) + "(x)")
if not plot_both:
ax_ei.fill_between(x.ravel(), 0, acq.ravel(), alpha=0.3, color="blue")
if show_next_point and next_x is not None:
ax_ei.plot(next_x, next_acq, "bo", markersize=6, label="Next query point")
if show_title:
ax.set_title(r"x* = %.4f, f(x*) = %.4f" % (res.x[0], res.fun))
# Adjust plot layout
ax.grid()
ax.set_xlabel("x")
ax.set_ylabel("f(x)")
if show_legend:
if plot_both:
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = ax_ei.get_legend_handles_labels()
ax_ei.legend(
lines + lines2,
labels + labels2,
loc="best",
prop={"size": 6},
numpoints=1,
)
else:
ax.legend(loc="best", prop={"size": 6}, numpoints=1)
return ax
def plot_regret(*args, **kwargs):
"""Plot one or several cumulative regret traces.
Parameters
----------
args[i] : `OptimizeResult`, list of `OptimizeResult`, or tuple
The result(s) for which to plot the cumulative regret trace.
- if `OptimizeResult`, then draw the corresponding single trace;
- if list of `OptimizeResult`, then draw the corresponding cumulative
regret traces in transparency, along with the average cumulative
regret trace;
- if tuple, then `args[i][0]` should be a string label and `args[i][1]`
an `OptimizeResult` or a list of `OptimizeResult`.
ax : Axes`, optional
The matplotlib axes on which to draw the plot, or `None` to create
a new one.
true_minimum : float, optional
The true minimum value of the function, if known.
yscale : None or string, optional
The scale for the y-axis.
Returns
-------
ax : `Axes`
The matplotlib axes.
"""
# <3 legacy python
ax = kwargs.get("ax", None)
true_minimum = kwargs.get("true_minimum", None)
yscale = kwargs.get("yscale", None)
if ax is None:
ax = plt.gca()
ax.set_title("Cumulative regret plot")
ax.set_xlabel("Number of calls $n$")
ax.set_ylabel(r"$\sum_{i=0}^n(f(x_i) - optimum)$ after $n$ calls")
ax.grid()
if yscale is not None:
ax.set_yscale(yscale)
colors = cm.viridis(np.linspace(0.25, 1.0, len(args)))
if true_minimum is None:
results = []
for res in args:
if isinstance(res, tuple):
res = res[1]
if isinstance(res, OptimizeResult):
results.append(res)
elif isinstance(res, list):
results.extend(res)
true_minimum = np.min([np.min(r.func_vals) for r in results])
for results, color in zip(args, colors):
if isinstance(results, tuple):
name, results = results
else:
name = None
if isinstance(results, OptimizeResult):
n_calls = len(results.x_iters)
regrets = [
np.sum(results.func_vals[:i] - true_minimum)
for i in range(1, n_calls + 1)
]
ax.plot(
range(1, n_calls + 1),
regrets,
c=color,
marker=".",
markersize=12,
lw=2,
label=name,
)
elif isinstance(results, list):
n_calls = len(results[0].x_iters)
iterations = range(1, n_calls + 1)
regrets = [
[np.sum(r.func_vals[:i] - true_minimum) for i in iterations]
for r in results
]
for cr in regrets:
ax.plot(iterations, cr, c=color, alpha=0.2)
ax.plot(
iterations,
np.mean(regrets, axis=0),
c=color,
marker=".",
markersize=12,
lw=2,
label=name,
)
if name:
ax.legend(loc="best")
return ax
def _format_scatter_plot_axes(ax, space, ylabel, plot_dims, dim_labels=None):
# Work out min, max of y axis for the diagonal so we can adjust
# them all to the same value
diagonal_ylim = _get_ylim_diagonal(ax)
# Number of search-space dimensions we are using.
if isinstance(ax, (list, np.ndarray)):
n_dims = len(plot_dims)
else:
n_dims = 1
if dim_labels is None:
dim_labels = [
"$X_{%i}$" % i if d.name is None else d.name for i, d in plot_dims
]
# Axes for categorical dimensions are really integers; we have to
# label them with the category names
iscat = [isinstance(dim[1], Categorical) for dim in plot_dims]
# Deal with formatting of the axes
for i in range(n_dims): # rows
for j in range(n_dims): # columns
if n_dims > 1:
ax_ = ax[i, j]
else:
ax_ = ax
index_i, dim_i = plot_dims[i]
index_j, dim_j = plot_dims[j]
if j > i:
ax_.axis("off")
elif i > j: # off-diagonal plots
# plots on the diagonal are special, like Texas. They have
# their own range so do not mess with them.
if not iscat[i]: # bounds not meaningful for categoricals
ax_.set_ylim(*dim_i.bounds)
if iscat[j]:
# partial() avoids creating closures in a loop
ax_.xaxis.set_major_formatter(
FuncFormatter(partial(_cat_format, dim_j))
)
else:
ax_.set_xlim(*dim_j.bounds)
if j == 0: # only leftmost column (0) gets y labels
ax_.set_ylabel(dim_labels[i])
if iscat[i]: # Set category labels for left column
ax_.yaxis.set_major_formatter(
FuncFormatter(partial(_cat_format, dim_i))
)
else:
ax_.set_yticklabels([])
# for all rows except ...
if i < n_dims - 1:
ax_.set_xticklabels([])
# ... the bottom row
else:
[l.set_rotation(45) for l in ax_.get_xticklabels()]
ax_.set_xlabel(dim_labels[j])
# configure plot for linear vs log-scale
if dim_j.prior == "log-uniform":
ax_.set_xscale("log")
else:
ax_.xaxis.set_major_locator(
MaxNLocator(6, prune="both", integer=iscat[j])
)
if dim_i.prior == "log-uniform":
ax_.set_yscale("log")
else:
ax_.yaxis.set_major_locator(
MaxNLocator(6, prune="both", integer=iscat[i])
)
else: # diagonal plots
ax_.set_ylim(*diagonal_ylim)
if not iscat[i]:
low, high = dim_i.bounds
ax_.set_xlim(low, high)
ax_.yaxis.tick_right()
ax_.yaxis.set_label_position("right")
ax_.yaxis.set_ticks_position("both")
ax_.set_ylabel(ylabel)
ax_.xaxis.tick_top()
ax_.xaxis.set_label_position("top")
ax_.set_xlabel(dim_labels[j])
if dim_i.prior == "log-uniform":
ax_.set_xscale("log")
else:
ax_.xaxis.set_major_locator(
MaxNLocator(6, prune="both", integer=iscat[i])
)
if iscat[i]:
ax_.xaxis.set_major_formatter(
FuncFormatter(partial(_cat_format, dim_i))
)
return ax
def partial_dependence(
space, model, i, j=None, sample_points=None, n_samples=250, n_points=40, x_eval=None
):
"""Calculate the partial dependence for dimensions `i` and `j` with
respect to the objective value, as approximated by `model`.
The partial dependence plot shows how the value of the dimensions
`i` and `j` influence the `model` predictions after "averaging out"
the influence of all other dimensions.
When `x_eval` is not `None`, the given values are used instead of
random samples. In this case, `n_samples` will be ignored.
Parameters
----------
space : `Space`
The parameter space over which the minimization was performed.
model
Surrogate model for the objective function.
i : int
The first dimension for which to calculate the partial dependence.
j : int, default=None
The second dimension for which to calculate the partial dependence.
To calculate the 1D partial dependence on `i` alone set `j=None`.
sample_points : np.array, shape=(n_points, n_dims), default=None
Only used when `x_eval=None`, i.e in case partial dependence should
be calculated.
Randomly sampled and transformed points to use when averaging
the model function at each of the `n_points` when using partial
dependence.
n_samples : int, default=100
Number of random samples to use for averaging the model function
at each of the `n_points` when using partial dependence. Only used
when `sample_points=None` and `x_eval=None`.
n_points : int, default=40
Number of points at which to evaluate the partial dependence
along each dimension `i` and `j`.
x_eval : list, default=None
`x_eval` is a list of parameter values or None. In case `x_eval`
is not None, the parsed dependence will be calculated using these
values.
Otherwise, random selected samples will be used.
Returns
-------
For 1D partial dependence:
xi : np.array
The points at which the partial dependence was evaluated.
yi : np.array
The value of the model at each point `xi`.
For 2D partial dependence:
xi : np.array, shape=n_points
The points at which the partial dependence was evaluated.
yi : np.array, shape=n_points
The points at which the partial dependence was evaluated.
zi : np.array, shape=(n_points, n_points)
The value of the model at each point `(xi, yi)`.
For Categorical variables, the `xi` (and `yi` for 2D) returned are
the indices of the variable in `Dimension.categories`.
"""
# If we haven't parsed an x_eval list we use random sampled values instead
if x_eval is None and sample_points is None:
sample_points = space.transform(space.rvs(n_samples=n_samples))
elif sample_points is None:
sample_points = space.transform([x_eval])
if j is None:
return partial_dependence_1D(space, model, i, sample_points, n_points)
else:
return partial_dependence_2D(space, model, i, j, sample_points, n_points)
def plot_objective(
result,
levels=10,
n_points=40,
n_samples=250,
size=2,
zscale="linear",
dimensions=None,
sample_source="random",
minimum="result",
n_minimum_search=None,
plot_dims=None,
show_points=True,
cmap="viridis_r",
):
"""Plot a 2-d matrix with so-called Partial Dependence plots
of the objective function. This shows the influence of each
search-space dimension on the objective function.
This uses the last fitted model for estimating the objective function.
The diagonal shows the effect of a single dimension on the
objective function, while the plots below the diagonal show
the effect on the objective function when varying two dimensions.
The Partial Dependence is calculated by averaging the objective value
for a number of random samples in the search-space,
while keeping one or two dimensions fixed at regular intervals. This
averages out the effect of varying the other dimensions and shows
the influence of one or two dimensions on the objective function.
Also shown are small black dots for the points that were sampled
during optimization.
A red star indicates per default the best observed minimum, but
this can be changed by changing argument ´minimum´.
.. note::
The Partial Dependence plot is only an estimation of the surrogate
model which in turn is only an estimation of the true objective
function that has been optimized. This means the plots show
an "estimate of an estimate" and may therefore be quite imprecise,
especially if few samples have been collected during the
optimization
(e.g. less than 100-200 samples), and in regions of the search-space
that have been sparsely sampled (e.g. regions away from the optimum).
This means that the plots may change each time you run the
optimization and they should not be considered completely reliable.
These compromises are necessary because we cannot evaluate the
expensive objective function in order to plot it, so we have to use
the cheaper surrogate model to plot its contour. And in order to
show search-spaces with 3 dimensions or more in a 2-dimensional
plot,
we further need to map those dimensions to only 2-dimensions using
the Partial Dependence, which also causes distortions in the plots.
Parameters
----------
result : `OptimizeResult`
The optimization results from calling e.g. `gp_minimize()`.
levels : int, default=10
Number of levels to draw on the contour plot, passed directly
to `plt.contourf()`.
n_points : int, default=40
Number of points at which to evaluate the partial dependence
along each dimension.
n_samples : int, default=250
Number of samples to use for averaging the model function
at each of the `n_points` when `sample_method` is set to 'random'.
size : float, default=2
Height (in inches) of each facet.
zscale : str, default='linear'
Scale to use for the z axis of the contour plots. Either 'linear'
or 'log'.
dimensions : list of str, default=None
Labels of the dimension
variables. `None` defaults to `space.dimensions[i].name`, or
if also `None` to `['X_0', 'X_1', ..]`.
plot_dims : list of str and int, default=None
List of dimension names or dimension indices from the
search-space dimensions to be included in the plot.
If `None` then use all dimensions except constant ones
from the search-space.
sample_source : str or list of floats, default='random'
Defines to samples generation to use for averaging the model function
at each of the `n_points`.
A partial dependence plot is only generated, when `sample_source`
is set to 'random' and `n_samples` is sufficient.
`sample_source` can also be a list of
floats, which is then used for averaging.
Valid strings:
- 'random' - `n_samples` random samples will used
- 'result' - Use only the best observed parameters
- 'expected_minimum' - Parameters that gives the best
minimum Calculated using scipy's minimize method.
This method currently does not work with categorical values.
- 'expected_minimum_random' - Parameters that gives the
best minimum when using naive random sampling.
Works with categorical values.
minimum : str or list of floats, default = 'result'
Defines the values for the red points in the plots.
Valid strings:
- 'result' - Use best observed parameters
- 'expected_minimum' - Parameters that gives the best
minimum Calculated using scipy's minimize method.
This method currently does not work with categorical values.
- 'expected_minimum_random' - Parameters that gives the
best minimum when using naive random sampling.
Works with categorical values
n_minimum_search : int, default = None
Determines how many points should be evaluated
to find the minimum when using 'expected_minimum' or
'expected_minimum_random'. Parameter is used when
`sample_source` and/or `minimum` is set to
'expected_minimum' or 'expected_minimum_random'.
show_points: bool, default = True
Choose whether to show evaluated points in the
contour plots.
cmap: str or Colormap, default = 'viridis_r'
Color map for contour plots. Passed directly to
`plt.contourf()`
Returns
-------
ax : `Matplotlib.Axes`
A 2-d matrix of Axes-objects with the sub-plots.
"""
# Here we define the values for which to plot the red dot (2d plot) and
# the red dotted line (1d plot).
# These same values will be used for evaluating the plots when
# calculating dependence. (Unless partial
# dependence is to be used instead).
space = result.space
# Get the relevant search-space dimensions.
if plot_dims is None:
# Get all dimensions.
plot_dims = []
for row in range(space.n_dims):
if space.dimensions[row].is_constant:
continue
plot_dims.append((row, space.dimensions[row]))
else:
plot_dims = space[plot_dims]
# Number of search-space dimensions we are using.
n_dims = len(plot_dims)
if dimensions is not None:
assert len(dimensions) == n_dims
x_vals = _evaluate_min_params(result, minimum, n_minimum_search)
if sample_source == "random":
x_eval = None
samples = space.transform(space.rvs(n_samples=n_samples))
else:
x_eval = _evaluate_min_params(result, sample_source, n_minimum_search)
samples = space.transform([x_eval])
x_samples, minimum, _ = _map_categories(space, result.x_iters, x_vals)
if zscale == "log":
locator = LogLocator()
elif zscale == "linear":
locator = None
else:
raise ValueError(
"Valid values for zscale are 'linear' and 'log'," " not '%s'." % zscale
)
fig, ax = plt.subplots(n_dims, n_dims, figsize=(size * n_dims, size * n_dims))
fig.subplots_adjust(
left=0.05, right=0.95, bottom=0.05, top=0.95, hspace=0.1, wspace=0.1
)
for i in range(n_dims):
for j in range(n_dims):
if i == j:
index, dim = plot_dims[i]
xi, yi = partial_dependence_1D(
space, result.models[-1], index, samples=samples, n_points=n_points
)
if n_dims > 1:
ax_ = ax[i, i]
else:
ax_ = ax
ax_.plot(xi, yi)
ax_.axvline(minimum[index], linestyle="--", color="r", lw=1)
# lower triangle
elif i > j:
index1, dim1 = plot_dims[i]
index2, dim2 = plot_dims[j]
ax_ = ax[i, j]
xi, yi, zi = partial_dependence_2D(
space, result.models[-1], index1, index2, samples, n_points
)
ax_.contourf(xi, yi, zi, levels, locator=locator, cmap=cmap)
if show_points:
ax_.scatter(
x_samples[:, index2], x_samples[:, index1], c="k", s=10, lw=0.0
)
ax_.scatter(
minimum[index2], minimum[index1], c=["r"], s=100, lw=0.0, marker="*"
)
ylabel = "Partial dependence"
# Make various adjustments to the plots.
return _format_scatter_plot_axes(
ax, space, ylabel=ylabel, plot_dims=plot_dims, dim_labels=dimensions
)
def plot_evaluations(result, bins=20, dimensions=None, plot_dims=None):
"""Visualize the order in which points were sampled during optimization.
This creates a 2-d matrix plot where the diagonal plots are histograms
that show the distribution of samples for each search-space dimension.
The plots below the diagonal are scatter-plots of the samples for
all combinations of search-space dimensions.
The order in which samples
were evaluated is encoded in each point's color.
A red star shows the best found parameters.
Parameters
----------
result : `OptimizeResult`
The optimization results from calling e.g. `gp_minimize()`.
bins : int, bins=20
Number of bins to use for histograms on the diagonal.
dimensions : list of str, default=None
Labels of the dimension
variables. `None` defaults to `space.dimensions[i].name`, or
if also `None` to `['X_0', 'X_1', ..]`.
plot_dims : list of str and int, default=None
List of dimension names or dimension indices from the
search-space dimensions to be included in the plot.
If `None` then use all dimensions except constant ones
from the search-space.
Returns
-------
ax : `Matplotlib.Axes`
A 2-d matrix of Axes-objects with the sub-plots.
"""
space = result.space
# Convert categoricals to integers, so we can ensure consistent ordering.
# Assign indices to categories in the order they appear in the Dimension.
# Matplotlib's categorical plotting functions are only present in v 2.1+,
# and may order categoricals differently in different plots anyway.
samples, minimum, iscat = _map_categories(space, result.x_iters, result.x)
order = range(samples.shape[0])
if plot_dims is None:
# Get all dimensions.
plot_dims = []
for row in range(space.n_dims):
if space.dimensions[row].is_constant:
continue
plot_dims.append((row, space.dimensions[row]))
else:
plot_dims = space[plot_dims]
# Number of search-space dimensions we are using.
n_dims = len(plot_dims)
if dimensions is not None:
assert len(dimensions) == n_dims
fig, ax = plt.subplots(n_dims, n_dims, figsize=(2 * n_dims, 2 * n_dims))
fig.subplots_adjust(
left=0.05, right=0.95, bottom=0.05, top=0.95, hspace=0.1, wspace=0.1
)
for i in range(n_dims):
for j in range(n_dims):
if i == j:
index, dim = plot_dims[i]
if iscat[j]:
bins_ = len(dim.categories)
elif dim.prior == "log-uniform":
low, high = space.bounds[index]
bins_ = np.logspace(np.log10(low), np.log10(high), bins)
else:
bins_ = bins
if n_dims == 1:
ax_ = ax
else:
ax_ = ax[i, i]
ax_.hist(
samples[:, index],
bins=bins_,
range=None if iscat[j] else dim.bounds,
)
# lower triangle
elif i > j:
index_i, dim_i = plot_dims[i]
index_j, dim_j = plot_dims[j]
ax_ = ax[i, j]
ax_.scatter(
samples[:, index_j],
samples[:, index_i],
c=order,
s=40,
lw=0.0,
cmap="viridis",
)
ax_.scatter(
minimum[index_j],
minimum[index_i],
c=["r"],
s=100,
lw=0.0,
marker="*",
)
# Make various adjustments to the plots.
return _format_scatter_plot_axes(
ax,
space,
ylabel="Number of samples",
plot_dims=plot_dims,
dim_labels=dimensions,
)
def _get_ylim_diagonal(ax):
"""Get the min / max of the ylim for all diagonal plots.
This is used in _adjust_fig() so the ylim is the same
for all diagonal plots.
Parameters
----------
ax : `Matplotlib.Axes`
2-dimensional matrix with Matplotlib Axes objects.
Returns
-------
ylim_diagonal : tuple(int)
The common min and max ylim for the diagonal plots.
"""
# Number of search-space dimensions used in this plot.
if isinstance(ax, (list, np.ndarray)):
n_dims = len(ax)
# Get ylim for all diagonal plots.
ylim = [ax[row, row].get_ylim() for row in range(n_dims)]
else:
n_dim = 1 # noqa: F841
ylim = [ax.get_ylim()]
# Separate into two lists with low and high ylim.
ylim_lo, ylim_hi = zip(*ylim)
# Min and max ylim for all diagonal plots.
ylim_min = np.min(ylim_lo)
ylim_max = np.max(ylim_hi)
return ylim_min, ylim_max
def partial_dependence_1D(space, model, i, samples, n_points=40):
"""
Calculate the partial dependence for a single dimension.
This uses the given model to calculate the average objective value
for all the samples, where the given dimension is fixed at
regular intervals between its bounds.
This shows how the given dimension affects the objective value
when the influence of all other dimensions are averaged out.
Parameters
----------
space : `Space`
The parameter space over which the minimization was performed.
model
Surrogate model for the objective function.
i : int
The dimension for which to calculate the partial dependence.
samples : np.array, shape=(n_points, n_dims)
Randomly sampled and transformed points to use when averaging
the model function at each of the `n_points` when using partial
dependence.
n_points : int, default=40
Number of points at which to evaluate the partial dependence
along each dimension `i`.
Returns
-------
xi : np.array
The points at which the partial dependence was evaluated.
yi : np.array
The average value of the modelled objective function at
each point `xi`.
"""
# The idea is to step through one dimension, evaluating the model with
# that dimension fixed and averaging either over random values or over
# the given ones in x_val in all other dimensions.
# (Or step through 2 dimensions when i and j are given.)
# Categorical dimensions make this interesting, because they are one-
# hot-encoded, so there is a one-to-many mapping of input dimensions
# to transformed (model) dimensions.
# dim_locs[i] is the (column index of the) start of dim i in
# sample_points.
# This is usefull when we are using one hot encoding, i.e using
# categorical values
dim_locs = np.cumsum([0] + [d.transformed_size for d in space.dimensions])
def _calc(x):
"""
Helper-function to calculate the average predicted
objective value for the given model, when setting
the index'th dimension of the search-space to the value x,
and then averaging over all samples.
"""
rvs_ = np.array(samples) # copy
# We replace the values in the dimension that we want to keep
# fixed
rvs_[:, dim_locs[i] : dim_locs[i + 1]] = x
# In case of `x_eval=None` rvs conists of random samples.
# Calculating the mean of these samples is how partial dependence
# is implemented.
return np.mean(model.predict(rvs_))
xi, xi_transformed = _evenly_sample(space.dimensions[i], n_points)
# Calculate the partial dependence for all the points.
yi = [_calc(x) for x in xi_transformed]
return xi, yi
def partial_dependence_2D(space, model, i, j, samples, n_points=40):
"""
Calculate the partial dependence for two dimensions in the search-space.
This uses the given model to calculate the average objective value
for all the samples, where the given dimensions are fixed at
regular intervals between their bounds.
This shows how the given dimensions affect the objective value
when the influence of all other dimensions are averaged out.
Parameters
----------
space : `Space`
The parameter space over which the minimization was performed.
model
Surrogate model for the objective function.
i : int
The first dimension for which to calculate the partial dependence.
j : int
The second dimension for which to calculate the partial dependence.
samples : np.array, shape=(n_points, n_dims)
Randomly sampled and transformed points to use when averaging
the model function at each of the `n_points` when using partial
dependence.
n_points : int, default=40
Number of points at which to evaluate the partial dependence
along each dimension `i` and `j`.
Returns
-------
xi : np.array, shape=n_points
The points at which the partial dependence was evaluated.
yi : np.array, shape=n_points
The points at which the partial dependence was evaluated.
zi : np.array, shape=(n_points, n_points)
The average value of the objective function at each point `(xi, yi)`.
"""
# The idea is to step through one dimension, evaluating the model with
# that dimension fixed and averaging either over random values or over
# the given ones in x_val in all other dimensions.
# (Or step through 2 dimensions when i and j are given.)
# Categorical dimensions make this interesting, because they are one-
# hot-encoded, so there is a one-to-many mapping of input dimensions
# to transformed (model) dimensions.
# dim_locs[i] is the (column index of the) start of dim i in
# sample_points.
# This is usefull when we are using one hot encoding, i.e using
# categorical values
dim_locs = np.cumsum([0] + [d.transformed_size for d in space.dimensions])
def _calc(x, y):
"""
Helper-function to calculate the average predicted
objective value for the given model, when setting
the index1'th dimension of the search-space to the value x
and setting the index2'th dimension to the value y,
and then averaging over all samples.
"""
rvs_ = np.array(samples) # copy
rvs_[:, dim_locs[j] : dim_locs[j + 1]] = x
rvs_[:, dim_locs[i] : dim_locs[i + 1]] = y
return np.mean(model.predict(rvs_))
xi, xi_transformed = _evenly_sample(space.dimensions[j], n_points)
yi, yi_transformed = _evenly_sample(space.dimensions[i], n_points)
# Calculate the partial dependence for all combinations of these points.
zi = [[_calc(x, y) for x in xi_transformed] for y in yi_transformed]
# Convert list-of-list to a numpy array.
zi = np.array(zi)
return xi, yi, zi
def plot_objective_2D(
result,
dimension_identifier1,
dimension_identifier2,
n_points=40,
n_samples=250,
levels=10,
zscale="linear",
sample_source="random",
minimum="result",
n_minimum_search=None,
ax=None,
):
"""
Create and return a Matplotlib figure and axes with a landscape
contour-plot of the last fitted model of the search-space,
overlaid with all the samples from the optimization results,
for the two given dimensions of the search-space.
This is similar to `plot_objective()` but only for 2 dimensions
whose doc-string also has a more extensive explanation.
Parameters
----------
result : `OptimizeResult`
The optimization results e.g. from calling `gp_minimize()`.
dimension_identifier1 : str or int
Name or index of a dimension in the search-space.
dimension_identifier2 : str or int
Name or index of a dimension in the search-space.
n_samples : int, default=250
Number of random samples used for estimating the contour-plot
of the objective function.
n_points : int, default=40
Number of points along each dimension where the partial dependence
is evaluated when generating the contour-plots.
levels : int, default=10
Number of levels to draw on the contour plot.
zscale : str, default='linear'
Scale to use for the z axis of the contour plots.
Either 'log' or linear for all other choices.
ax : `Matplotlib.Axes`, default: None
When set, everything is plotted inside this axis.
Returns
-------
ax : `Matplotlib.Axes`
The Matplotlib Figure-object.
For example, you can save the plot by calling
`fig.savefig('file.png')`
"""
# Get the search-space instance from the optimization results.
space = result.space
x_vals = _evaluate_min_params(result, minimum, n_minimum_search)
if sample_source == "random":
x_eval = None
samples = space.transform(space.rvs(n_samples=n_samples))
else:
x_eval = _evaluate_min_params(result, sample_source, n_minimum_search)
samples = space.transform([x_eval])
x_samples, x_minimum, _ = _map_categories(space, result.x_iters, x_vals)
# Get the dimension-object, its index in the search-space, and its name.
index1, dimension1 = space[dimension_identifier1]
index2, dimension2 = space[dimension_identifier2]
# Get the samples from the optimization-log for the relevant dimensions.
# samples1 = get_samples_dimension(result=result, index=index1)
samples1 = x_samples[:, index1]
samples2 = x_samples[:, index2]
# samples2 = get_samples_dimension(result=result, index=index2)
# Get the best-found samples for the relevant dimensions.
best_sample1 = x_minimum[index1]
best_sample2 = x_minimum[index2]
# Get the last fitted model for the search-space.
last_model = result.models[-1]
# Estimate the objective function for these sampled points
# using the last fitted model for the search-space.
xi, yi, zi = partial_dependence_2D(
space, last_model, index2, index1, samples, n_points=n_points
)
if ax is None:
ax = plt.gca()
# Scale for the z-axis of the contour-plot. Either Log or Linear (None).
locator = LogLocator() if zscale == "log" else None
# Plot the contour-landscape for the objective function.
ax.contourf(xi, yi, zi, levels, locator=locator, cmap="viridis_r")
# Plot all the parameters that were sampled during optimization.
# These are plotted as small black dots.
ax.scatter(samples1, samples2, c="black", s=10, linewidths=1)
# Plot the best parameters that were sampled during optimization.
# These are plotted as a big red star.
ax.scatter(best_sample1, best_sample2, c="red", s=50, linewidths=1, marker="*")
# Use the dimension-names as the labels for the plot-axes.
ax.set_xlabel(dimension1.name)
ax.set_ylabel(dimension2.name)
ax.autoscale(enable=True, axis="x", tight=True)
ax.autoscale(enable=True, axis="y", tight=True)
# Use log-scale on the x-axis?
if dimension1.prior == "log-uniform":
ax.set_xscale("log")
# Use log-scale on the y-axis?
if dimension2.prior == "log-uniform":
ax.set_yscale("log")
return ax
def plot_histogram(result, dimension_identifier, bins=20, rotate_labels=0, ax=None):
"""
Create and return a Matplotlib figure with a histogram
of the samples from the optimization results,
for a given dimension of the search-space.
Parameters
----------
result : `OptimizeResult`
The optimization results e.g. from calling `gp_minimize()`.
dimension_identifier : str or int
Name or index of a dimension in the search-space.
bins : int, bins=20
Number of bins in the histogram.
rotate_labels : int, rotate_labels=0
Degree to rotate category-names on the x-axis.
Only used for Categorical dimensions.
Returns
-------
ax : `Matplotlib.Axes`
The Matplotlib Axes-object.
"""
# Get the search-space instance from the optimization results.
space = result.space
# Get the dimension-object.
index, dimension = space[dimension_identifier]
# Get the samples from the optimization-log for that particular dimension.
samples = [x[index] for x in result.x_iters]
if ax is None:
ax = plt.gca()
if isinstance(dimension, Categorical):
# When the search-space dimension is Categorical, it means
# that the possible values are strings. Matplotlib's histogram
# does not support this, so we have to make a bar-plot instead.
# NOTE: This only shows the categories that are in the samples.
# So if a category was not sampled, it will not be shown here.
# Count the number of occurrences of the string-categories.
counter = Counter(samples)
# The counter returns a dict where the keys are the category-names
# and the values are the number of occurrences for each category.
names = list(counter.keys())
counts = list(counter.values())
# Although Matplotlib's docs indicate that the bar() function
# can take a list of strings for the x-axis, it doesn't appear to work.
# So we hack it by creating a list of integers and setting the
# tick-labels with the category-names instead.
x = np.arange(len(counts))
# Plot using bars.
ax.bar(x, counts, tick_label=names)
# Adjust the rotation of the category-names on the x-axis.
ax.set_xticklabels(labels=names, rotation=rotate_labels)
else:
# Otherwise the search-space Dimension is either integer or float,
# in which case the histogram can be plotted more easily.
if dimension.prior == "log-uniform":
# Map the number of bins to a log-space for the dimension bounds.
bins_mapped = np.logspace(*np.log10(dimension.bounds), bins)
else:
# Use the original number of bins.
bins_mapped = bins
# Plot the histogram.
ax.hist(samples, bins=bins_mapped, range=dimension.bounds)
# Use log-scale on the x-axis?
if dimension.prior == "log-uniform":
ax.set_xscale("log")
# Set the labels.
ax.set_xlabel(dimension.name)
ax.set_ylabel("Sample Count")
return ax
def _map_categories(space, points, minimum):
"""
Map categorical values to integers in a set of points.
Returns
-------
mapped_points : np.array, shape=points.shape
A copy of `points` with categoricals replaced with their indices in
the corresponding `Dimension`.
mapped_minimum : np.array, shape (space.n_dims,)
A copy of `minimum` with categoricals replaced with their indices in
the corresponding `Dimension`.
iscat : np.array, shape (space.n_dims,)
Boolean array indicating whether dimension `i` in the `space` is
categorical.
"""
points = np.asarray(points, dtype=object) # Allow slicing, preserve cats
iscat = np.repeat(False, space.n_dims)
min_ = np.zeros(space.n_dims)
pts_ = np.zeros(points.shape)
for i, dim in enumerate(space.dimensions):
if isinstance(dim, Categorical):
iscat[i] = True
catmap = dict(zip(dim.categories, count()))
pts_[:, i] = [catmap[cat] for cat in points[:, i]]
min_[i] = catmap[minimum[i]]
else:
pts_[:, i] = points[:, i]
min_[i] = minimum[i]
return pts_, min_, iscat
def _evenly_sample(dim, n_points):
"""Return `n_points` evenly spaced points from a Dimension.
Parameters
----------
dim : `Dimension`
The Dimension to sample from. Can be categorical; evenly-spaced
category indices are chosen in order without replacement (result
may be smaller than `n_points`).
n_points : int
The number of points to sample from `dim`.
Returns
-------
xi : np.array
The sampled points in the Dimension. For Categorical
dimensions, returns the index of the value in
`dim.categories`.
xi_transformed : np.array
The transformed values of `xi`, for feeding to a model.
"""
cats = np.array(getattr(dim, "categories", []), dtype=object)
if len(cats): # Sample categoricals while maintaining order
xi = np.linspace(0, len(cats) - 1, min(len(cats), n_points), dtype=int)
xi_transformed = dim.transform(cats[xi])
else:
bounds = dim.bounds
# XXX use linspace(*bounds, n_points) after python2 support ends
xi = np.linspace(bounds[0], bounds[1], n_points)
xi_transformed = dim.transform(xi)
return xi, xi_transformed
def _cat_format(dimension, x, _):
"""Categorical axis tick formatter function. Returns the name of category
`x` in `dimension`. Used with `matplotlib.ticker.FuncFormatter`."""
return str(dimension.categories[int(x)])
def _evaluate_min_params(
result, params="result", n_minimum_search=None, random_state=None
):
"""Returns the minimum based on `params`"""
x_vals = None
if isinstance(params, str):
if params == "result":
# Using the best observed result
x_vals = result.x
elif params == "expected_minimum":
if result.space.is_partly_categorical:
# space is also categorical
raise ValueError(
"expected_minimum does not support any" "categorical values"
)
# Do a gradient based minimum search using scipys own minimizer
if n_minimum_search:
# If a value for
# expected_minimum_samples has been parsed
x_vals, _ = expected_minimum(
result, n_random_starts=n_minimum_search, random_state=random_state
)
else: # Use standard of 20 random starting points
x_vals, _ = expected_minimum(
result, n_random_starts=20, random_state=random_state
)
elif params == "expected_minimum_random":
# Do a minimum search by evaluating the function with
# n_samples sample values
if n_minimum_search is not None:
# If a value for
# n_minimum_samples has been parsed
x_vals, _ = expected_minimum_random_sampling(
result, n_random_starts=n_minimum_search, random_state=random_state
)
else:
# Use standard of 10^n_parameters. Note this
# becomes very slow for many parameters
n_minimum_search = 10 ** len(result.x)
x_vals, _ = expected_minimum_random_sampling(
result, n_random_starts=n_minimum_search, random_state=random_state
)
else:
raise ValueError(
"Argument ´eval_min_params´ must be a valid" "string (´result´)"
)
elif isinstance(params, list):
assert len(params) == len(result.x), (
"Argument"
"´eval_min_params´ of type list must have same length as"
"number of features"
)
# Using defined x_values
x_vals = params
else:
raise ValueError("Argument ´eval_min_params´ must" "be a string or a list")
return x_vals
| 51,772 | 34.076558 | 88 | py |
deephyper | deephyper-master/deephyper/skopt/__init__.py | """
Scikit-Optimize, or `skopt`, is a simple and efficient library to
minimize (very) expensive and noisy black-box functions. It implements
several methods for sequential model-based optimization. `skopt` is reusable
in many contexts and accessible.
"""
try:
# This variable is injected in the __builtins__ by the build
# process. It is used to enable importing subpackages of sklearn when
# the binaries are not built
__SKOPT_SETUP__
except NameError:
__SKOPT_SETUP__ = False
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = "0.9.8"
if __SKOPT_SETUP__:
import sys
sys.stderr.write("Partial import of skopt during the build process.\n")
# We are not importing the rest of scikit-optimize during the build
# process, as it may not be compiled yet
else:
import platform
import struct
from . import acquisition
from . import benchmarks
from . import callbacks
from . import learning
from . import optimizer
from . import space
from . import sampler
from .optimizer import dummy_minimize
from .optimizer import forest_minimize
from .optimizer import gbrt_minimize
from .optimizer import gp_minimize
from .optimizer import Optimizer
from .searchcv import BayesSearchCV
from .space import Space
from .utils import dump
from .utils import expected_minimum
from .utils import load
__all__ = (
"acquisition",
"benchmarks",
"callbacks",
"learning",
"optimizer",
"plots",
"sampler",
"space",
"gp_minimize",
"dummy_minimize",
"forest_minimize",
"gbrt_minimize",
"Optimizer",
"dump",
"load",
"expected_minimum",
"BayesSearchCV",
"Space",
)
IS_PYPY = platform.python_implementation() == "PyPy"
_IS_32BIT = 8 * struct.calcsize("P") == 32
| 2,304 | 26.440476 | 76 | py |
deephyper | deephyper-master/deephyper/skopt/space/transformers.py | from __future__ import division
import numpy as np
from sklearn.preprocessing import LabelBinarizer
class Transformer(object):
"""Base class for all 1-D transformers."""
def fit(self, X):
return self
def transform(self, X):
raise NotImplementedError
def inverse_transform(self, X):
raise NotImplementedError
class Identity(Transformer):
"""Identity transform."""
def __init__(self, type_func=None) -> None:
self.type_func = type_func
def transform(self, X):
return X
def inverse_transform(self, Xt):
if self.type_func:
return [self.type_func(Xt[0])]
else:
return Xt
class ToInteger(Transformer):
"""Identity transform."""
def transform(self, X):
return int(X)
def inverse_transform(self, Xt):
return Xt
class StringEncoder(Transformer):
"""StringEncoder transform.
The transform will cast everything to a
string and the inverse transform will cast to the type defined in dtype.
"""
def __init__(self, dtype=str):
super(StringEncoder, self).__init__()
self.dtype = dtype
def fit(self, X):
"""Fit a list or array of categories. All elements must be from the
same type.
Parameters
----------
X : array-like, shape=(n_categories,)
List of categories.
"""
if len(X) > 0:
self.dtype = type(X[0])
def transform(self, X):
"""Transform an array of categories to a string encoded representation.
Parameters
----------
X : array-like, shape=(n_samples,)
List of categories.
Returns
-------
Xt : array-like, shape=(n_samples,)
The string encoded categories.
"""
return [str(x) for x in X]
def inverse_transform(self, Xt):
"""Inverse transform string encoded categories back to their original
representation.
Parameters
----------
Xt : array-like, shape=(n_samples,)
String encoded categories.
Returns
-------
X : array-like, shape=(n_samples,)
The original categories.
"""
return [self.dtype(x) for x in Xt]
class LogN(Transformer):
"""Base N logarithm transform."""
def __init__(self, base):
self._base = base
def transform(self, X):
return np.log10(np.asarray(X, dtype=float)) / np.log10(self._base)
def inverse_transform(self, Xt):
return self._base ** np.asarray(Xt, dtype=float)
class CategoricalEncoder(Transformer):
"""OneHotEncoder that can handle categorical variables."""
def __init__(self):
"""Convert labeled categories into one-hot encoded features."""
self._lb = LabelBinarizer()
def fit(self, X):
"""Fit a list or array of categories.
Parameters
----------
X : array-like, shape=(n_categories,)
List of categories.
"""
self.mapping_ = {v: i for i, v in enumerate(X)}
self.inverse_mapping_ = {i: v for v, i in self.mapping_.items()}
self._lb.fit([self.mapping_[v] for v in X])
self.n_classes = len(self._lb.classes_)
return self
def transform(self, X):
"""Transform an array of categories to a one-hot encoded representation.
Parameters
----------
X : array-like, shape=(n_samples,)
List of categories.
Returns
-------
Xt : array-like, shape=(n_samples, n_categories)
The one-hot encoded categories.
"""
return self._lb.transform([self.mapping_[v] for v in X])
def inverse_transform(self, Xt):
"""Inverse transform one-hot encoded categories back to their original
representation.
Parameters
----------
Xt : array-like, shape=(n_samples, n_categories)
One-hot encoded categories.
Returns
-------
X : array-like, shape=(n_samples,)
The original categories.
"""
Xt = np.asarray(Xt)
return [self.inverse_mapping_[i] for i in self._lb.inverse_transform(Xt)]
class LabelEncoder(Transformer):
"""LabelEncoder that can handle categorical variables."""
def __init__(self, X=None):
if X is not None:
self.fit(X)
def fit(self, X):
"""Fit a list or array of categories.
Parameters
----------
X : array-like, shape=(n_categories,)
List of categories.
"""
X = np.asarray(X)
if X.dtype == object:
self.mapping_ = {v: i for i, v in enumerate(X)}
else:
i = 0
self.mapping_ = {}
for v in np.unique(X):
self.mapping_[v] = i
i += 1
self.inverse_mapping_ = {i: v for v, i in self.mapping_.items()}
return self
def transform(self, X):
"""Transform an array of categories to a one-hot encoded
representation.
Parameters
----------
X : array-like, shape=(n_samples,)
List of categories.
Returns
-------
Xt : array-like, shape=(n_samples, n_categories)
The integer categories.
"""
X = np.asarray(X)
return [self.mapping_[v] for v in X]
def inverse_transform(self, Xt):
"""Inverse transform integer categories back to their original
representation.
Parameters
----------
Xt : array-like, shape=(n_samples, n_categories)
Integer categories.
Returns
-------
X : array-like, shape=(n_samples,)
The original categories.
"""
if isinstance(Xt, (float, np.float64)):
Xt = [Xt]
else:
Xt = np.asarray(Xt)
return [self.inverse_mapping_[int(np.round(i))] for i in Xt]
class Normalize(Transformer):
"""
Scales each dimension into the interval [0, 1].
Parameters
----------
low : float
Lower bound.
high : float
Higher bound.
is_int : bool, default=False
Round and cast the return value of `inverse_transform` to integer. Set
to `True` when applying this transform to integers.
"""
def __init__(self, low, high, is_int=False):
self.low = float(low)
self.high = float(high)
self.is_int = is_int
self._eps = 1e-8
def transform(self, X):
X = np.asarray(X)
if self.is_int:
if np.any(np.round(X) > self.high):
raise ValueError(
"All integer values should" "be less than %f" % self.high
)
if np.any(np.round(X) < self.low):
raise ValueError(
"All integer values should" "be greater than %f" % self.low
)
else:
if np.any(X > self.high + self._eps):
raise ValueError("All values should" "be less than %f" % self.high)
if np.any(X < self.low - self._eps):
raise ValueError("All values should" "be greater than %f" % self.low)
if (self.high - self.low) == 0.0:
return X * 0.0
if self.is_int:
return (np.round(X).astype(int) - self.low) / (self.high - self.low)
else:
return (X - self.low) / (self.high - self.low)
def inverse_transform(self, X):
X = np.asarray(X)
if np.any(X > 1.0 + self._eps):
raise ValueError("All values should be less than 1.0")
if np.any(X < 0.0 - self._eps):
raise ValueError("All values should be greater than 0.0")
X_orig = X * (self.high - self.low) + self.low
if self.is_int:
return np.round(X_orig).astype(int)
return X_orig
class Pipeline(Transformer):
"""
A lightweight pipeline to chain transformers.
Parameters
----------
transformers : list
A list of Transformer instances.
"""
def __init__(self, transformers):
self.transformers = list(transformers)
for transformer in self.transformers:
if not isinstance(transformer, Transformer):
raise ValueError(
"Provided transformers should be a Transformer "
"instance. Got %s" % transformer
)
def fit(self, X):
for transformer in self.transformers:
transformer.fit(X)
return self
def transform(self, X):
for transformer in self.transformers:
X = transformer.transform(X)
return X
def inverse_transform(self, X):
for transformer in self.transformers[::-1]:
X = transformer.inverse_transform(X)
return X
| 8,914 | 26.600619 | 85 | py |
deephyper | deephyper-master/deephyper/skopt/space/space.py | import numbers
import numpy as np
import yaml
from scipy.stats.distributions import randint
from scipy.stats.distributions import rv_discrete
from scipy.stats.distributions import uniform, truncnorm
from sklearn.utils import check_random_state
from sklearn.utils.fixes import sp_version
if type(sp_version) is not tuple: # Version object since sklearn>=2.3.x
if hasattr(sp_version, "release"):
sp_version = sp_version.release
else:
sp_version = sp_version._version.release
from .transformers import CategoricalEncoder
from .transformers import StringEncoder
from .transformers import LabelEncoder
from .transformers import Normalize
from .transformers import Identity
from .transformers import LogN
from .transformers import Pipeline
from .transformers import ToInteger
import ConfigSpace as CS
from ConfigSpace.util import deactivate_inactive_hyperparameters
from sklearn.impute import SimpleImputer
# helper class to be able to print [1, ..., 4] instead of [1, '...', 4]
class _Ellipsis:
def __repr__(self):
return "..."
def _transpose_list_array(x):
"""Transposes a list matrix"""
n_dims = len(x)
assert n_dims > 0
n_samples = len(x[0])
rows = [None] * n_samples
for i in range(n_samples):
r = [None] * n_dims
for j in range(n_dims):
r[j] = x[j][i]
rows[i] = r
return rows
def check_dimension(dimension, transform=None):
"""Turn a provided dimension description into a dimension object.
Checks that the provided dimension falls into one of the
supported types. For a list of supported types, look at
the documentation of ``dimension`` below.
If ``dimension`` is already a ``Dimension`` instance, return it.
Parameters
----------
dimension : Dimension
Search space Dimension.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
transform : "identity", "normalize", "string", "label", "onehot" optional
- For `Categorical` dimensions, the following transformations are
supported.
- "onehot" (default) one-hot transformation of the original space.
- "label" integer transformation of the original space
- "string" string transformation of the original space.
- "identity" same as the original space.
- For `Real` and `Integer` dimensions, the following transformations
are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between 0 and 1.
Returns
-------
dimension : Dimension
Dimension instance.
"""
if isinstance(dimension, Dimension):
return dimension
if not isinstance(dimension, (list, tuple, np.ndarray)):
raise ValueError("Dimension has to be a list or tuple.")
# A `Dimension` described by a single value is assumed to be
# a `Categorical` dimension. This can be used in `BayesSearchCV`
# to define subspaces that fix one value, e.g. to choose the
# model type, see "sklearn-gridsearchcv-replacement.py"
# for examples.
if len(dimension) == 1:
return Categorical(dimension, transform=transform)
if len(dimension) == 2:
if any(
[isinstance(d, (str, bool)) or isinstance(d, np.bool_) for d in dimension]
):
return Categorical(dimension, transform=transform)
elif all([isinstance(dim, numbers.Integral) for dim in dimension]):
return Integer(*dimension, transform=transform)
elif any([isinstance(dim, numbers.Real) for dim in dimension]):
return Real(*dimension, transform=transform)
else:
raise ValueError(
"Invalid dimension {}. Read the documentation for"
" supported types.".format(dimension)
)
if len(dimension) == 3:
if any([isinstance(dim, int) for dim in dimension[:2]]) and dimension[2] in [
"uniform",
"log-uniform",
]:
return Integer(*dimension, transform=transform)
elif any(
[isinstance(dim, (float, int)) for dim in dimension[:2]]
) and dimension[2] in ["uniform", "log-uniform"]:
return Real(*dimension, transform=transform)
else:
return Categorical(dimension, transform=transform)
if len(dimension) == 4:
if (
any([isinstance(dim, int) for dim in dimension[:2]])
and dimension[2] == "log-uniform"
and isinstance(dimension[3], int)
):
return Integer(*dimension, transform=transform)
elif (
any([isinstance(dim, (float, int)) for dim in dimension[:2]])
and dimension[2] == "log-uniform"
and isinstance(dimension[3], int)
):
return Real(*dimension, transform=transform)
if len(dimension) > 3:
return Categorical(dimension, transform=transform)
raise ValueError(
"Invalid dimension {}. Read the documentation for "
"supported types.".format(dimension)
)
class Dimension(object):
"""Base class for search space dimensions."""
prior = None
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples.
Parameters
----------
n_samples : int or None
The number of samples to be drawn.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
"""
rng = check_random_state(random_state)
samples = self._rvs.rvs(size=n_samples, random_state=rng)
return self.inverse_transform(samples)
def transform(self, X):
"""Transform samples form the original space to a warped space."""
return self.transformer.transform(X)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
return self.transformer.inverse_transform(Xt)
def set_transformer(self):
raise NotImplementedError
@property
def size(self):
return 1
@property
def transformed_size(self):
return 1
@property
def bounds(self):
raise NotImplementedError
@property
def is_constant(self):
raise NotImplementedError
@property
def transformed_bounds(self):
raise NotImplementedError
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if isinstance(value, str) or value is None:
self._name = value
else:
raise ValueError("Dimension's name must be either string or None.")
def _uniform_inclusive(loc=0.0, scale=1.0):
# like scipy.stats.distributions but inclusive of `high`
# XXX scale + 1. might not actually be a float after scale if
# XXX scale is very large.
return uniform(loc=loc, scale=np.nextafter(scale, scale + 1.0))
def _normal_inclusive(loc=0.0, scale=1.0, lower=-2, upper=2):
assert lower <= upper
a, b = (lower - loc) / scale, (upper - loc) / scale
return truncnorm(a, b, loc=loc, scale=scale)
class Real(Dimension):
"""Search space dimension that can take on any real value.
Parameters
----------
low : float
Lower bound (inclusive).
high : float
Upper bound (inclusive).
prior : "uniform" or "log-uniform", default="uniform"
Distribution to use when sampling random points for this dimension.
- If `"uniform"`, points are sampled uniformly between the lower
and upper bounds.
- If `"log-uniform"`, points are sampled uniformly between
`log(lower, base)` and `log(upper, base)` where log
has base `base`.
base : int
The logarithmic base to use for a log-uniform prior.
- Default 10, otherwise commonly 2.
transform : "identity", "normalize", optional
The following transformations are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between
0 and 1.
name : str or None
Name associated with the dimension, e.g., "learning rate".
dtype : str or dtype, default=float
float type which will be used in inverse_transform,
can be float.
"""
def __init__(
self,
low,
high,
prior="uniform",
base=10,
transform=None,
name=None,
dtype=float,
loc=None,
scale=None,
):
if high <= low:
raise ValueError(
"the lower bound {} has to be less than the"
" upper bound {}".format(low, high)
)
if prior not in ["uniform", "log-uniform", "normal"]:
raise ValueError(
"prior should be 'normal', 'uniform' or 'log-uniform'"
" got {}".format(prior)
)
self.low = low
self.high = high
self.prior = prior
self.base = base
self.log_base = np.log10(base)
self.name = name
self.dtype = dtype
self.loc = loc
self.scale = scale
self._rvs = None
self.transformer = None
self.transform_ = transform
if isinstance(self.dtype, str) and self.dtype not in [
"float",
"float16",
"float32",
"float64",
]:
raise ValueError(
"dtype must be 'float', 'float16', 'float32'"
"or 'float64'"
" got {}".format(self.dtype)
)
elif isinstance(self.dtype, type) and not np.issubdtype(
self.dtype, np.floating
):
raise ValueError(
"dtype must be a np.floating subtype;" " got {}".format(self.dtype)
)
if transform is None:
transform = "identity"
self.set_transformer(transform)
def set_transformer(self, transform="identity"):
"""Define rvs and transformer spaces.
Parameters
----------
transform : str
Can be 'normalize' or 'identity'
"""
self.transform_ = transform
if self.transform_ not in ["normalize", "identity"]:
raise ValueError(
"transform should be 'normalize' or 'identity'"
" got {}".format(self.transform_)
)
# XXX: The _rvs is for sampling in the transformed space.
# The rvs on Dimension calls inverse_transform on the points sampled
# using _rvs
if self.transform_ == "normalize":
# set upper bound to next float after 1. to make the numbers
# inclusive of upper edge
self._rvs = _uniform_inclusive(0.0, 1.0)
assert self.prior in ["uniform", "log-uniform"]
if self.prior == "uniform":
self.transformer = Pipeline(
[Identity(), Normalize(self.low, self.high)]
)
else:
self.transformer = Pipeline(
[
LogN(self.base),
Normalize(
np.log10(self.low) / self.log_base,
np.log10(self.high) / self.log_base,
),
]
)
else:
if self.prior == "uniform":
self._rvs = _uniform_inclusive(self.low, self.high - self.low)
self.transformer = Identity()
elif self.prior == "normal":
self._rvs = _normal_inclusive(self.loc, self.scale, self.low, self.high)
self.transformer = Identity()
else:
self._rvs = _uniform_inclusive(
np.log10(self.low) / self.log_base,
np.log10(self.high) / self.log_base
- np.log10(self.low) / self.log_base,
)
self.transformer = LogN(self.base)
def __eq__(self, other):
return (
type(self) is type(other)
and np.allclose([self.low], [other.low])
and np.allclose([self.high], [other.high])
and self.prior == other.prior
and self.transform_ == other.transform_
and self.loc == other.loc
and self.scale == other.scale
)
def __repr__(self):
return "Real(low={}, high={}, prior='{}', transform='{}', loc='{}', scale='{}')".format(
self.low, self.high, self.prior, self.transform_, self.loc, self.scale
)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
inv_transform = super(Real, self).inverse_transform(Xt)
if isinstance(inv_transform, list):
inv_transform = np.array(inv_transform)
# PB commenting clip
# print(inv_transform)
# inv_transform = np.clip(inv_transform, self.low, self.high).astype(self.dtype)
if self.dtype == float or self.dtype == "float":
# necessary, otherwise the type is converted to a numpy type
return getattr(inv_transform, "tolist")()
else:
return inv_transform
@property
def bounds(self):
return (self.low, self.high)
@property
def is_constant(self):
return self.low == self.high
def __contains__(self, point):
if isinstance(point, list):
point = np.array(point)
if point == np.nan:
return True
else:
return self.low <= point <= self.high
@property
def transformed_bounds(self):
if self.transform_ == "normalize":
return 0.0, 1.0
else:
if self.prior == "uniform":
return self.low, self.high
else:
return np.log10(self.low), np.log10(self.high)
def distance(self, a, b):
"""Compute distance between point `a` and `b`.
Parameters
----------
a : float
First point.
b : float
Second point.
"""
if not (a in self and b in self):
raise RuntimeError(
"Can only compute distance for values within "
"the space, not %s and %s." % (a, b)
)
return abs(a - b)
class Integer(Dimension):
"""Search space dimension that can take on integer values.
Parameters
----------
low : int
Lower bound (inclusive).
high : int
Upper bound (inclusive).
prior : "uniform" or "log-uniform", default="uniform"
Distribution to use when sampling random integers for
this dimension.
- If `"uniform"`, integers are sampled uniformly between the lower
and upper bounds.
- If `"log-uniform"`, integers are sampled uniformly between
`log(lower, base)` and `log(upper, base)` where log
has base `base`.
base : int
The logarithmic base to use for a log-uniform prior.
- Default 10, otherwise commonly 2.
transform : "identity", "normalize", optional
The following transformations are supported.
- "identity", (default) the transformed space is the same as the
original space.
- "normalize", the transformed space is scaled to be between
0 and 1.
name : str or None
Name associated with dimension, e.g., "number of trees".
dtype : str or dtype, default=np.int64
integer type which will be used in inverse_transform,
can be int, np.int16, np.uint32, np.int32, np.int64 (default).
When set to int, `inverse_transform` returns a list instead of
a numpy array
"""
def __init__(
self,
low,
high,
prior="uniform",
base=10,
transform=None,
name=None,
dtype=np.int64,
loc=None,
scale=None,
):
if high <= low:
raise ValueError(
"the lower bound {} has to be less than the"
" upper bound {}".format(low, high)
)
if prior not in ["uniform", "log-uniform"]:
raise ValueError(
"prior should be 'uniform' or 'log-uniform'" " got {}".format(prior)
)
self.low = low
self.high = high
self.prior = prior
self.base = base
self.log_base = np.log10(base)
self.name = name
self.dtype = dtype
self.transform_ = transform
self._rvs = None
self.transformer = None
self.loc = loc
self.scale = scale
if isinstance(self.dtype, str) and self.dtype not in [
"int",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
]:
raise ValueError(
"dtype must be 'int', 'int8', 'int16',"
"'int32', 'int64', 'uint8',"
"'uint16', 'uint32', or"
"'uint64', but got {}".format(self.dtype)
)
elif isinstance(self.dtype, type) and self.dtype not in [
int,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]:
raise ValueError(
"dtype must be 'int', 'np.int8', 'np.int16',"
"'np.int32', 'np.int64', 'np.uint8',"
"'np.uint16', 'np.uint32', or"
"'np.uint64', but got {}".format(self.dtype)
)
if transform is None:
transform = "identity"
self.set_transformer(transform)
def set_transformer(self, transform="identity"):
"""Define _rvs and transformer spaces.
Parameters
----------
transform : str
Can be 'normalize' or 'identity'
"""
self.transform_ = transform
if transform not in ["normalize", "identity"]:
raise ValueError(
"transform should be 'normalize' or 'identity'"
" got {}".format(self.transform_)
)
if self.transform_ == "normalize":
self._rvs = _uniform_inclusive(0.0, 1.0)
assert self.prior in ["uniform", "log-uniform"]
if self.prior == "uniform":
self.transformer = Pipeline(
[Identity(), Normalize(self.low, self.high, is_int=True)]
)
else:
self.transformer = Pipeline(
[
LogN(self.base),
Normalize(
np.log10(self.low) / self.log_base,
np.log10(self.high) / self.log_base,
),
]
)
else:
if self.prior == "uniform":
self._rvs = randint(self.low, self.high + 1)
self.transformer = Identity()
elif self.prior == "normal":
self._rvs = _normal_inclusive(self.loc, self.scale, self.low, self.high)
self.transformer = ToInteger()
else:
self._rvs = _uniform_inclusive(
np.log10(self.low) / self.log_base,
np.log10(self.high) / self.log_base
- np.log10(self.low) / self.log_base,
)
self.transformer = LogN(self.base)
def __eq__(self, other):
return (
type(self) is type(other)
and np.allclose([self.low], [other.low])
and np.allclose([self.high], [other.high])
)
def __repr__(self):
return "Integer(low={}, high={}, prior='{}', transform='{}')".format(
self.low, self.high, self.prior, self.transform_
)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
# The concatenation of all transformed dimensions makes Xt to be
# of type float, hence the required cast back to int.
inv_transform = super(Integer, self).inverse_transform(Xt)
if isinstance(inv_transform, list):
inv_transform = np.array(inv_transform)
inv_transform = np.clip(inv_transform, self.low, self.high)
# PB nan is a float cannot be converted to int
if any(np.isnan(inv_transform)):
nan_values = np.isnan(inv_transform)
inv_transform[nan_values] = np.round(inv_transform[nan_values])
return inv_transform
if self.dtype == int or self.dtype == "int":
# necessary, otherwise the type is converted to a numpy type
return getattr(np.round(inv_transform).astype(self.dtype), "tolist")()
else:
return np.round(inv_transform).astype(self.dtype)
@property
def bounds(self):
return (self.low, self.high)
@property
def is_constant(self):
return self.low == self.high
def __contains__(self, point):
if isinstance(point, list):
point = np.array(point)
if point == np.nan:
return True
else:
return self.low <= point <= self.high
@property
def transformed_bounds(self):
if self.transform_ == "normalize":
return 0.0, 1.0
else:
if self.prior == "uniform":
return self.low, self.high
else:
return np.log10(self.low), np.log10(self.high)
def distance(self, a, b):
"""Compute distance between point `a` and `b`.
Parameters
----------
a : int
First point.
b : int
Second point.
"""
if not (a in self and b in self):
raise RuntimeError(
"Can only compute distance for values within "
"the space, not %s and %s." % (a, b)
)
return abs(a - b)
class Categorical(Dimension):
"""Search space dimension that can take on categorical values.
Parameters
----------
categories : list, shape=(n_categories,)
Sequence of possible categories.
prior : list, shape=(categories,), default=None
Prior probabilities for each category. By default all categories
are equally likely.
transform : "onehot", "string", "identity", "label", default="onehot"
- "identity", the transformed space is the same as the original
space.
- "string", the transformed space is a string encoded
representation of the original space.
- "label", the transformed space is a label encoded
representation (integer) of the original space.
- "onehot", the transformed space is a one-hot encoded
representation of the original space.
name : str or None
Name associated with dimension, e.g., "colors".
"""
def __init__(self, categories, prior=None, transform=None, name=None):
self.categories = tuple(categories)
self.name = name
if transform is None:
transform = "onehot"
self.transform_ = transform
self.transformer = None
self._rvs = None
self.prior = prior
if prior is None:
self.prior_ = np.tile(1.0 / len(self.categories), len(self.categories))
else:
self.prior_ = prior
self.set_transformer(transform)
def set_transformer(self, transform="onehot"):
"""Define _rvs and transformer spaces.
Parameters
----------
transform : str
Can be 'normalize', 'onehot', 'string', 'label', or 'identity'
"""
self.transform_ = transform
if transform not in ["identity", "onehot", "string", "normalize", "label"]:
raise ValueError(
"Expected transform to be 'identity', 'string',"
"'label' or 'onehot' got {}".format(transform)
)
if transform == "onehot":
self.transformer = CategoricalEncoder()
self.transformer.fit(self.categories)
elif transform == "string":
self.transformer = StringEncoder()
self.transformer.fit(self.categories)
elif transform == "label":
self.transformer = LabelEncoder()
self.transformer.fit(self.categories)
elif transform == "normalize":
self.transformer = Pipeline(
[
LabelEncoder(list(self.categories)),
Normalize(0, len(self.categories) - 1, is_int=True),
]
)
else:
if all(isinstance(x, (int, np.integer)) for x in self.categories):
self.transformer = Identity(type_func=lambda x: int(x))
else:
self.transformer = Identity()
self.transformer.fit(self.categories)
if transform == "normalize":
self._rvs = _uniform_inclusive(0.0, 1.0)
else:
# XXX check that sum(prior) == 1
self._rvs = rv_discrete(values=(range(len(self.categories)), self.prior_))
def __eq__(self, other):
return (
type(self) is type(other)
and self.categories == other.categories
and np.allclose(self.prior_, other.prior_)
)
def __repr__(self):
if len(self.categories) > 7:
cats = self.categories[:3] + (_Ellipsis(),) + self.categories[-3:]
else:
cats = self.categories
if self.prior is not None and len(self.prior) > 7:
prior = self.prior[:3] + [_Ellipsis()] + self.prior[-3:]
else:
prior = self.prior
return "Categorical(categories={}, prior={}, transform={})".format(
cats, prior, self.transform_
)
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back into the
original space.
"""
# The concatenation of all transformed dimensions makes Xt to be
# of type float, hence the required cast back to int.
inv_transform = super(Categorical, self).inverse_transform(Xt)
if isinstance(inv_transform, list):
inv_transform = np.array(inv_transform)
return inv_transform
def rvs(self, n_samples=None, random_state=None):
choices = self._rvs.rvs(size=n_samples, random_state=random_state)
if isinstance(choices, numbers.Integral):
return self.categories[choices]
elif self.transform_ == "normalize" and isinstance(choices, float):
return self.inverse_transform([(choices)])
elif self.transform_ == "normalize":
return self.inverse_transform(list(choices))
else:
return [self.categories[c] for c in choices]
@property
def transformed_size(self):
if self.transform_ == "onehot":
size = len(self.categories)
# when len(categories) == 2, CategoricalEncoder outputs a
# single value
return size if size != 2 else 1
return 1
@property
def bounds(self):
return self.categories
@property
def is_constant(self):
return len(self.categories) <= 1
def __contains__(self, point):
return point in self.categories
@property
def transformed_bounds(self):
if self.transformed_size == 1:
N = len(self.categories)
if self.transform_ == "label":
return 0.0, float(N - 1)
else:
return 0.0, 1.0
else:
return [(0.0, 1.0) for i in range(self.transformed_size)]
def distance(self, a, b):
"""Compute distance between category `a` and `b`.
As categories have no order the distance between two points is one
if a != b and zero otherwise.
Parameters
----------
a : category
First category.
b : category
Second category.
"""
if not (a in self and b in self):
raise RuntimeError(
"Can only compute distance for values within"
" the space, not {} and {}.".format(a, b)
)
return 1 if a != b else 0
class Space(object):
"""Initialize a search space from given specifications.
Parameters
----------
dimensions : list, shape=(n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
.. note::
The upper and lower bounds are inclusive for `Integer`
dimensions.
"""
def __init__(self, dimensions, model_sdv=None):
# attributes used when a ConfigurationSpace from ConfigSpace is given
self.is_config_space = False
self.config_space_samples = None
self.config_space_explored = False
self.imp_const = SimpleImputer(
missing_values=np.nan, strategy="constant", fill_value=-1000
)
self.imp_const_inv = SimpleImputer(
missing_values=-1000, strategy="constant", fill_value=np.nan
)
# attribute used when a generative model is used to sample
self.model_sdv = model_sdv
self.hps_names = []
if isinstance(dimensions, CS.ConfigurationSpace):
self.is_config_space = True
self.config_space = dimensions
self.hps_type = {}
hps = self.config_space.get_hyperparameters()
cond_hps = self.config_space.get_all_conditional_hyperparameters()
space = []
for x in hps:
self.hps_names.append(x.name)
if isinstance(x, CS.hyperparameters.CategoricalHyperparameter):
categories = list(x.choices)
prior = list(x.probabilities)
if x.name in cond_hps:
categories.append("NA")
# remove p from prior
p = 1 / len(categories)
pi = p / (len(categories) - 1)
prior = [prior_i - pi for prior_i in prior]
prior.append(p)
param = Categorical(categories, prior=prior, name=x.name)
space.append(param)
self.hps_type[x.name] = "Categorical"
elif isinstance(x, CS.hyperparameters.OrdinalHyperparameter):
vals = list(x.sequence)
if x.name in cond_hps:
vals.append("NA")
param = Categorical(vals, name=x.name)
space.append(param)
self.hps_type[x.name] = "Categorical"
elif isinstance(x, CS.hyperparameters.UniformIntegerHyperparameter):
prior = "uniform"
if x.log:
prior = "log-uniform"
param = Integer(x.lower, x.upper, prior=prior, name=x.name)
space.append(param)
self.hps_type[x.name] = "Integer"
elif isinstance(x, CS.hyperparameters.NormalIntegerHyperparameter):
# TODO
prior = "uniform"
if x.log:
prior = "log-uniform"
param = Integer(x.lower, x.upper, prior=prior, name=x.name)
space.append(param)
self.hps_type[x.name] = "Integer"
# raise ValueError("NormalIntegerHyperparameter not implemented")
elif isinstance(x, CS.hyperparameters.UniformFloatHyperparameter):
prior = "uniform"
if x.log:
prior = "log-uniform"
param = Real(x.lower, x.upper, prior=prior, name=x.name)
space.append(param)
self.hps_type[x.name] = "Real"
elif isinstance(x, CS.hyperparameters.NormalFloatHyperparameter):
prior = "normal"
if x.log:
raise ValueError(
"Unsupported 'log' transformation for NormalFloatHyperparameter."
)
param = Real(
x.lower,
x.upper,
prior=prior,
name=x.name,
loc=x.mu,
scale=x.sigma,
)
space.append(param)
self.hps_type[x.name] = "Real"
else:
raise ValueError("Unknown Hyperparameter type.")
dimensions = space
self.dimensions = [check_dimension(dim) for dim in dimensions]
def __eq__(self, other):
return all([a == b for a, b in zip(self.dimensions, other.dimensions)])
def __repr__(self):
if len(self.dimensions) > 31:
dims = self.dimensions[:15] + [_Ellipsis()] + self.dimensions[-15:]
else:
dims = self.dimensions
return "Space([{}])".format(",\n ".join(map(str, dims)))
def __iter__(self):
return iter(self.dimensions)
@property
def dimension_names(self):
"""
Names of all the dimensions in the search-space.
"""
index = 0
names = []
for dim in self.dimensions:
if dim.name is None:
names.append("X_%d" % index)
else:
names.append(dim.name)
index += 1
return names
@property
def is_real(self):
"""
Returns true if all dimensions are Real
"""
return all([isinstance(dim, Real) for dim in self.dimensions])
@classmethod
def from_yaml(cls, yml_path, namespace=None):
"""Create Space from yaml configuration file
Parameters
----------
yml_path : str
Full path to yaml configuration file, example YaML below:
Space:
- Integer:
low: -5
high: 5
- Categorical:
categories:
- a
- b
- Real:
low: 1.0
high: 5.0
prior: log-uniform
namespace : str, default=None
Namespace within configuration file to use, will use first
namespace if not provided
Returns
-------
space : Space
Instantiated Space object
"""
with open(yml_path, "rb") as f:
config = yaml.safe_load(f)
dimension_classes = {
"real": Real,
"integer": Integer,
"categorical": Categorical,
}
# Extract space options for configuration file
if isinstance(config, dict):
if namespace is None:
options = next(iter(config.values()))
else:
options = config[namespace]
elif isinstance(config, list):
options = config
else:
raise TypeError("YaML does not specify a list or dictionary")
# Populate list with Dimension objects
dimensions = []
for option in options:
key = next(iter(option.keys()))
# Make configuration case insensitive
dimension_class = key.lower()
values = {k.lower(): v for k, v in option[key].items()}
if dimension_class in dimension_classes:
# Instantiate Dimension subclass and add it to the list
dimension = dimension_classes[dimension_class](**values)
dimensions.append(dimension)
space = cls(dimensions=dimensions)
return space
def rvs(self, n_samples=1, random_state=None):
"""Draw random samples.
The samples are in the original space. They need to be transformed
before being passed to a model or minimizer by `space.transform()`.
Parameters
----------
n_samples : int, default=1
Number of samples to be drawn from the space.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
points : list of lists, shape=(n_points, n_dims)
Points sampled from the space.
"""
rng = check_random_state(random_state)
if self.is_config_space:
req_points = []
hps_names = self.config_space.get_hyperparameter_names()
if self.model_sdv is None:
confs = self.config_space.sample_configuration(n_samples)
if n_samples == 1:
confs = [confs]
else:
confs = self.model_sdv.sample(n_samples)
sdv_names = confs.columns
new_hps_names = list(set(hps_names) - set(sdv_names))
# randomly sample the new hyperparameters
for name in new_hps_names:
hp = self.config_space.get_hyperparameter(name)
rvs = []
for i in range(n_samples):
v = hp._sample(rng)
rv = hp._transform(v)
rvs.append(rv)
confs[name] = rvs
# reoder the column names
confs = confs[hps_names]
confs = confs.to_dict("records")
for idx, conf in enumerate(confs):
cf = deactivate_inactive_hyperparameters(conf, self.config_space)
confs[idx] = cf.get_dictionary()
# TODO: remove because debug instructions
# check if other conditions are not met; generate valid 1-exchange neighbor; need to test and develop the logic
# print('conf invalid...generating valid 1-exchange neighbor')
# neighborhood = get_one_exchange_neighbourhood(cf,1)
# for new_config in neighborhood:
# print(new_config)
# print(new_config.is_valid_configuration())
# confs[idx] = new_config.get_dictionary()
for idx, conf in enumerate(confs):
point = []
for hps_name in hps_names:
val = np.nan
if self.hps_type[hps_name] == "Categorical":
val = "NA"
if hps_name in conf.keys():
val = conf[hps_name]
point.append(val)
req_points.append(point)
return req_points
else:
if self.model_sdv is None:
# Draw
columns = []
for dim in self.dimensions:
columns.append(dim.rvs(n_samples=n_samples, random_state=rng))
# Transpose
return _transpose_list_array(columns)
else:
confs = self.model_sdv.sample(n_samples) # sample from SDV
columns = []
for dim in self.dimensions:
if dim.name in confs.columns:
columns.append(confs[dim.name].values.tolist())
else:
columns.append(dim.rvs(n_samples=n_samples, random_state=rng))
# Transpose
return _transpose_list_array(columns)
def set_transformer(self, transform):
"""Sets the transformer of all dimension objects to `transform`
Parameters
----------
transform : str or list of str
Sets all transformer,, when `transform` is a string.
Otherwise, transform must be a list with strings with
the same length as `dimensions`
"""
# Transform
for j in range(self.n_dims):
if isinstance(transform, list):
self.dimensions[j].set_transformer(transform[j])
else:
self.dimensions[j].set_transformer(transform)
def set_transformer_by_type(self, transform, dim_type):
"""Sets the transformer of `dim_type` objects to `transform`
Parameters
----------
transform : str
Sets all transformer of type `dim_type` to `transform`
dim_type : type
Can be `deephyper.skopt.space.Real`, `deephyper.skopt.space.Integer` or
`deephyper.skopt.space.Categorical`
"""
# Transform
for j in range(self.n_dims):
if isinstance(self.dimensions[j], dim_type):
self.dimensions[j].set_transformer(transform)
def get_transformer(self):
"""Returns all transformers as list"""
return [self.dimensions[j].transform_ for j in range(self.n_dims)]
def transform(self, X):
"""Transform samples from the original space into a warped space.
Note: this transformation is expected to be used to project samples
into a suitable space for numerical optimization.
Parameters
----------
X : list of lists, shape=(n_samples, n_dims)
The samples to transform.
Returns
-------
Xt : array of floats, shape=(n_samples, transformed_n_dims)
The transformed samples.
"""
# Pack by dimension
columns = []
for dim in self.dimensions:
columns.append([])
for i in range(len(X)):
for j in range(self.n_dims):
columns[j].append(X[i][j])
# Transform
for j in range(self.n_dims):
columns[j] = self.dimensions[j].transform(columns[j])
# Repack as an array
Xt = np.hstack([np.asarray(c).reshape((len(X), -1)) for c in columns])
if False and self.is_config_space:
self.imp_const.fit(Xt)
Xtt = self.imp_const.transform(Xt)
Xt = Xtt
return Xt
def inverse_transform(self, Xt):
"""Inverse transform samples from the warped space back to the
original space.
Parameters
----------
Xt : array of floats, shape=(n_samples, transformed_n_dims)
The samples to inverse transform.
Returns
-------
X : list of lists, shape=(n_samples, n_dims)
The original samples.
"""
Xt = self.imp_const_inv.fit_transform(Xt)
# Inverse transform
columns = []
start = 0
Xt = np.asarray(Xt)
for j in range(self.n_dims):
dim = self.dimensions[j]
offset = dim.transformed_size
if offset == 1:
columns.append(dim.inverse_transform(Xt[:, start]))
else:
columns.append(dim.inverse_transform(Xt[:, start : start + offset]))
start += offset
# Transpose
return _transpose_list_array(columns)
@property
def n_dims(self):
"""The dimensionality of the original space."""
return len(self.dimensions)
@property
def transformed_n_dims(self):
"""The dimensionality of the warped space."""
return sum([dim.transformed_size for dim in self.dimensions])
@property
def bounds(self):
"""The dimension bounds, in the original space."""
b = []
for dim in self.dimensions:
if dim.size == 1:
b.append(dim.bounds)
else:
b.extend(dim.bounds)
return b
def __contains__(self, point):
"""Check that `point` is within the bounds of the space."""
for component, dim in zip(point, self.dimensions):
if component not in dim:
return False
return True
def __getitem__(self, dimension_names):
"""
Lookup and return the search-space dimension with the given name.
This allows for dict-like lookup of dimensions, for example:
`space['foo']` returns the dimension named 'foo' if it exists,
otherwise `None` is returned.
It also allows for lookup of a list of dimension-names, for example:
`space[['foo', 'bar']]` returns the two dimensions named
'foo' and 'bar' if they exist.
Parameters
----------
dimension_names : str or list(str)
Name of a single search-space dimension (str).
List of names for search-space dimensions (list(str)).
Returns
-------
dims tuple (index, Dimension), list(tuple(index, Dimension)), \
(None, None)
A single search-space dimension with the given name,
or a list of search-space dimensions with the given names.
"""
def _get(dimension_name):
"""Helper-function for getting a single dimension."""
index = 0
# Get the index of the search-space dimension using its name.
for dim in self.dimensions:
if dimension_name == dim.name:
return (index, dim)
elif dimension_name == index:
return (index, dim)
index += 1
return (None, None)
if isinstance(dimension_names, (str, int)):
# Get a single search-space dimension.
dims = _get(dimension_name=dimension_names)
elif isinstance(dimension_names, (list, tuple)):
# Get a list of search-space dimensions.
# Note that we do not check whether the names are really strings.
dims = [_get(dimension_name=name) for name in dimension_names]
else:
msg = (
"Dimension name should be either string or"
"list of strings, but got {}."
)
raise ValueError(msg.format(type(dimension_names)))
return dims
@property
def transformed_bounds(self):
"""The dimension bounds, in the warped space."""
b = []
for dim in self.dimensions:
if dim.transformed_size == 1:
b.append(dim.transformed_bounds)
else:
b.extend(dim.transformed_bounds)
return b
@property
def is_categorical(self):
"""Space contains exclusively categorical dimensions"""
return all([isinstance(dim, Categorical) for dim in self.dimensions])
@property
def is_partly_categorical(self):
"""Space contains any categorical dimensions"""
return any([isinstance(dim, Categorical) for dim in self.dimensions])
@property
def n_constant_dimensions(self):
"""Returns the number of constant dimensions which have zero degree of
freedom, e.g. an Integer dimensions with (0., 0.) as bounds.
"""
n = 0
for dim in self.dimensions:
if dim.is_constant:
n += 1
return n
def distance(self, point_a, point_b):
"""Compute distance between two points in this space.
Parameters
----------
point_a : array
First point.
point_b : array
Second point.
"""
distance = 0.0
for a, b, dim in zip(point_a, point_b, self.dimensions):
distance += dim.distance(a, b)
return distance
| 48,902 | 32.154576 | 131 | py |
deephyper | deephyper-master/deephyper/skopt/space/__init__.py | """
Utilities to define a search space.
"""
from .space import * # noqa: F401, F403
| 86 | 13.5 | 40 | py |
deephyper | deephyper-master/deephyper/skopt/sampler/base.py | class InitialPointGenerator(object):
def generate(self, dimensions, n_samples, random_state=None):
raise NotImplementedError
def set_params(self, **params):
"""
Set the parameters of this initial point generator.
Parameters
----------
**params : dict
Generator parameters.
Returns
-------
self : object
Generator instance.
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
for key, value in params.items():
setattr(self, key, value)
return self
| 655 | 25.24 | 65 | py |
deephyper | deephyper-master/deephyper/skopt/sampler/lhs.py | """
Lhs functions are inspired by
https://github.com/clicumu/pyDOE2/blob/
master/pyDOE2/doe_lhs.py
"""
import numpy as np
from sklearn.utils import check_random_state
from scipy import spatial
from ..space import Space
from .base import InitialPointGenerator
def _random_permute_matrix(h, random_state=None):
rng = check_random_state(random_state)
h_rand_perm = np.zeros_like(h)
samples, n = h.shape
for j in range(n):
order = rng.permutation(range(samples))
h_rand_perm[:, j] = h[order, j]
return h_rand_perm
class Lhs(InitialPointGenerator):
"""Latin hypercube sampling
Parameters
----------
lhs_type : str, default='classic'
- 'classic' - a small random number is added
- 'centered' - points are set uniformly in each interval
criterion : str or None, default='maximin'
When set to None, the LHS is not optimized
- 'correlation' : optimized LHS by minimizing the correlation
- 'maximin' : optimized LHS by maximizing the minimal pdist
- 'ratio' : optimized LHS by minimizing the ratio
`max(pdist) / min(pdist)`
iterations : int
Defines the number of iterations for optimizing LHS
"""
def __init__(self, lhs_type="classic", criterion="maximin", iterations=1000):
self.lhs_type = lhs_type
self.criterion = criterion
self.iterations = iterations
def generate(self, dimensions, n_samples, random_state=None):
"""Creates latin hypercube samples.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_samples : int
The order of the LHS sequence. Defines the number of samples.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
np.array, shape=(n_dim, n_samples)
LHS set
"""
rng = check_random_state(random_state)
space = Space(dimensions)
transformer = space.get_transformer()
n_dim = space.n_dims
space.set_transformer("normalize")
if self.criterion is None or n_samples == 1:
h = self._lhs_normalized(n_dim, n_samples, rng)
h = space.inverse_transform(h)
space.set_transformer(transformer)
return h
else:
h_opt = self._lhs_normalized(n_dim, n_samples, rng)
h_opt = space.inverse_transform(h_opt)
if self.criterion == "correlation":
mincorr = np.inf
for i in range(self.iterations):
# Generate a random LHS
h = self._lhs_normalized(n_dim, n_samples, rng)
r = np.corrcoef(np.array(h).T)
if (
len(np.abs(r[r != 1])) > 0
and np.max(np.abs(r[r != 1])) < mincorr
):
mincorr = np.max(np.abs(r - np.eye(r.shape[0])))
h_opt = h.copy()
h_opt = space.inverse_transform(h_opt)
elif self.criterion == "maximin":
maxdist = 0
# Maximize the minimum distance between points
for i in range(self.iterations):
h = self._lhs_normalized(n_dim, n_samples, rng)
d = spatial.distance.pdist(np.array(h), "euclidean")
if maxdist < np.min(d):
maxdist = np.min(d)
h_opt = h.copy()
h_opt = space.inverse_transform(h_opt)
elif self.criterion == "ratio":
minratio = np.inf
# Maximize the minimum distance between points
for i in range(self.iterations):
h = self._lhs_normalized(n_dim, n_samples, rng)
p = spatial.distance.pdist(np.array(h), "euclidean")
if np.min(p) == 0:
ratio = np.max(p) / 1e-8
else:
ratio = np.max(p) / np.min(p)
if minratio > ratio:
minratio = ratio
h_opt = h.copy()
h_opt = space.inverse_transform(h_opt)
else:
raise ValueError("Wrong criterion." "Got {}".format(self.criterion))
space.set_transformer(transformer)
return h_opt
def _lhs_normalized(self, n_dim, n_samples, random_state):
rng = check_random_state(random_state)
x = np.linspace(0, 1, n_samples + 1)
u = rng.rand(n_samples, n_dim)
h = np.zeros_like(u)
if self.lhs_type == "centered":
for j in range(n_dim):
h[:, j] = np.diff(x) / 2.0 + x[:n_samples]
elif self.lhs_type == "classic":
for j in range(n_dim):
h[:, j] = u[:, j] * np.diff(x) + x[:n_samples]
else:
raise ValueError("Wrong lhs_type. Got {}".format(self.lhs_type))
return _random_permute_matrix(h, random_state=rng)
| 5,671 | 37.585034 | 84 | py |
deephyper | deephyper-master/deephyper/skopt/sampler/hammersly.py | # -*- coding: utf-8 -*-
""" Inspired by https://github.com/jonathf/chaospy/blob/master/chaospy/
distributions/sampler/sequences/hammersley.py
"""
import numpy as np
from .halton import Halton
from ..space import Space
from .base import InitialPointGenerator
from sklearn.utils import check_random_state
class Hammersly(InitialPointGenerator):
"""Creates `Hammersley` sequence samples.
The Hammersley set is equivalent to the Halton sequence, except for one
dimension is replaced with a regular grid. It is not recommended to
generate a Hammersley sequence with more than 10 dimension.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
References
----------
T-T. Wong, W-S. Luk, and P-A. Heng, "Sampling with Hammersley and Halton
Points," Journal of Graphics Tools, vol. 2, no. 2, 1997, pp. 9 - 24.
Parameters
----------
min_skip : int, default=-1
Minimum skipped seed number. When `min_skip != max_skip` and
both are > -1, a random number is picked.
max_skip : int, default=-1
Maximum skipped seed number. When `min_skip != max_skip` and
both are > -1, a random number is picked.
primes : tuple, default=None
The (non-)prime base to calculate values along each axis. If
empty, growing prime values starting from 2 will be used.
"""
def __init__(self, min_skip=0, max_skip=0, primes=None):
self.primes = primes
self.min_skip = min_skip
self.max_skip = max_skip
def generate(self, dimensions, n_samples, random_state=None):
"""Creates samples from Hammersly set.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_samples : int
The order of the Hammersley sequence.
Defines the number of samples.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
np.array, shape=(n_dim, n_samples)
Hammersley set.
"""
rng = check_random_state(random_state)
halton = Halton(
min_skip=self.min_skip, max_skip=self.max_skip, primes=self.primes
)
space = Space(dimensions)
n_dim = space.n_dims
transformer = space.get_transformer()
space.set_transformer("normalize")
if n_dim == 1:
out = halton.generate(dimensions, n_samples, random_state=rng)
else:
out = np.empty((n_dim, n_samples), dtype=float)
out[: n_dim - 1] = np.array(
halton.generate(
[
(0.0, 1.0),
]
* (n_dim - 1),
n_samples,
random_state=rng,
)
).T
out[n_dim - 1] = np.linspace(0, 1, n_samples + 1)[:-1]
out = space.inverse_transform(out.T)
space.set_transformer(transformer)
return out
| 3,556 | 34.217822 | 78 | py |
deephyper | deephyper-master/deephyper/skopt/sampler/sobol.py | """
Authors:
Original FORTRAN77 version of i4_sobol by Bennett Fox.
MATLAB version by John Burkardt.
PYTHON version by Corrado Chisari
Original Python version of is_prime by Corrado Chisari
Original MATLAB versions of other functions by John Burkardt.
PYTHON versions by Corrado Chisari
Modified Python version by Holger Nahrstaedt
Original code is available from
http://people.sc.fsu.edu/~jburkardt/py_src/sobol/sobol.html
"""
from __future__ import division
import warnings
import numpy as np
from .base import InitialPointGenerator
from ..space import Space
from sklearn.utils import check_random_state
class Sobol(InitialPointGenerator):
"""Generates a new quasirandom Sobol' vector with each call.
Parameters
----------
skip : int
Skipped seed number.
randomize : bool, default=False
When set to True, random shift is applied.
Notes
-----
Sobol' sequences [1]_ provide :math:`n=2^m` low discrepancy points in
:math:`[0,1)^{dim}`. Scrambling them makes them suitable for singular
integrands, provides a means of error estimation, and can improve their
rate of convergence.
There are many versions of Sobol' sequences depending on their
'direction numbers'. Here, the maximum number of dimension is 40.
The routine adapts the ideas of Antonov and Saleev [2]_.
.. warning::
Sobol' sequences are a quadrature rule and they lose their balance
properties if one uses a sample size that is not a power of 2, or skips
the first point, or thins the sequence [5]_.
If :math:`n=2^m` points are not enough then one should take :math:`2^M`
points for :math:`M>m`. When scrambling, the number R of independent
replicates does not have to be a power of 2.
Sobol' sequences are generated to some number :math:`B` of bits. Then
after :math:`2^B` points have been generated, the sequence will repeat.
Currently :math:`B=30`.
References
----------
.. [1] I. M. Sobol. The distribution of points in a cube and the accurate
evaluation of integrals. Zh. Vychisl. Mat. i Mat. Phys., 7:784-802,
1967.
.. [2] Antonov, Saleev,
USSR Computational Mathematics and Mathematical Physics,
Volume 19, 1980, pages 252 - 256.
.. [3] Paul Bratley, Bennett Fox,
Algorithm 659:
Implementing Sobol's Quasirandom Sequence Generator,
ACM Transactions on Mathematical Software,
Volume 14, Number 1, pages 88-100, 1988.
.. [4] Bennett Fox,
Algorithm 647:
Implementation and Relative Efficiency of Quasirandom
Sequence Generators,
.. [5] Art B. Owen. On dropping the first Sobol' point. arXiv 2008.08051,
2020.
"""
def __init__(self, skip=0, randomize=True):
if not (skip & (skip - 1) == 0):
raise ValueError(
"The balance properties of Sobol' points require"
" skipping a power of 2."
)
if skip != 0:
warnings.warn(
f"{skip} points have been skipped: "
f"{skip} points can be generated before the "
f"sequence repeats."
)
self.skip = skip
self.num_generated = 0
self.randomize = randomize
self.dim_max = 40
self.log_max = 30
self.atmost = 2**self.log_max - 1
self.lastq = None
self.maxcol = None
self.poly = None
self.recipd = None
self.seed_save = -1
self.v = np.zeros((self.dim_max, self.log_max))
self.dim_num_save = -1
def init(self, dim_num):
self.dim_num_save = dim_num
self.v = np.zeros((self.dim_max, self.log_max))
self.v[0:40, 0] = np.transpose(
[
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
]
)
self.v[2:40, 1] = np.transpose(
[
1,
3,
1,
3,
1,
3,
3,
1,
3,
1,
3,
1,
3,
1,
1,
3,
1,
3,
1,
3,
1,
3,
3,
1,
3,
1,
3,
1,
3,
1,
1,
3,
1,
3,
1,
3,
1,
3,
]
)
self.v[3:40, 2] = np.transpose(
[
7,
5,
1,
3,
3,
7,
5,
5,
7,
7,
1,
3,
3,
7,
5,
1,
1,
5,
3,
3,
1,
7,
5,
1,
3,
3,
7,
5,
1,
1,
5,
7,
7,
5,
1,
3,
3,
]
)
self.v[5:40, 3] = np.transpose(
[
1,
7,
9,
13,
11,
1,
3,
7,
9,
5,
13,
13,
11,
3,
15,
5,
3,
15,
7,
9,
13,
9,
1,
11,
7,
5,
15,
1,
15,
11,
5,
3,
1,
7,
9,
]
)
self.v[7:40, 4] = np.transpose(
[
9,
3,
27,
15,
29,
21,
23,
19,
11,
25,
7,
13,
17,
1,
25,
29,
3,
31,
11,
5,
23,
27,
19,
21,
5,
1,
17,
13,
7,
15,
9,
31,
9,
]
)
self.v[13:40, 5] = np.transpose(
[
37,
33,
7,
5,
11,
39,
63,
27,
17,
15,
23,
29,
3,
21,
13,
31,
25,
9,
49,
33,
19,
29,
11,
19,
27,
15,
25,
]
)
self.v[19:40, 6] = np.transpose(
[
13,
33,
115,
41,
79,
17,
29,
119,
75,
73,
105,
7,
59,
65,
21,
3,
113,
61,
89,
45,
107,
]
)
self.v[37:40, 7] = np.transpose([7, 23, 39])
# Set POLY.
self.poly = [
1,
3,
7,
11,
13,
19,
25,
37,
59,
47,
61,
55,
41,
67,
97,
91,
109,
103,
115,
131,
193,
137,
145,
143,
241,
157,
185,
167,
229,
171,
213,
191,
253,
203,
211,
239,
247,
285,
369,
299,
]
# Find the number of bits in ATMOST.
self.maxcol = _bit_hi1(self.atmost)
# Initialize row 1 of V.
self.v[0, 0 : self.maxcol] = 1
# Check parameters.
if dim_num < 1 or self.dim_max < dim_num:
raise ValueError(
f"I4_SOBOL - Fatal error!\n"
f" The spatial dimension DIM_NUM should "
f"satisfy:\n"
f" 1 <= DIM_NUM <= {self.dim_max}\n"
f" But this input value is DIM_NUM = {dim_num}"
)
# Initialize the remaining rows of V.
for i in range(2, dim_num + 1):
# The bits of the integer POLY(I) gives the form of polynomial I.
# Find the degree of polynomial I from binary encoding.
j = self.poly[i - 1]
m = 0
j //= 2
while j > 0:
j //= 2
m += 1
# Expand this bit pattern to separate components
# of the logical array INCLUD.
j = self.poly[i - 1]
includ = np.zeros(m)
for k in range(m, 0, -1):
j2 = j // 2
includ[k - 1] = j != 2 * j2
j = j2
# Calculate the remaining elements of row I as explained
# in Bratley and Fox, section 2.
for j in range(m + 1, self.maxcol + 1):
newv = self.v[i - 1, j - m - 1]
p2 = 1
for k in range(1, m + 1):
p2 *= 2
if includ[k - 1]:
newv = np.bitwise_xor(
int(newv), int(p2 * self.v[i - 1, j - k - 1])
)
self.v[i - 1, j - 1] = newv
# Multiply columns of V by appropriate power of 2.
p2 = 1
for j in range(self.maxcol - 1, 0, -1):
p2 *= 2
self.v[0:dim_num, j - 1] = self.v[0:dim_num, j - 1] * p2
# RECIPD is 1/(common denominator of the elements in V).
self.recipd = 1.0 / (2 * p2)
self.lastq = np.zeros(dim_num)
def generate(self, dimensions, n_samples, random_state=None):
"""Creates samples from Sobol' set.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_samples : int
The order of the Sobol' sequence. Defines the number of samples.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
sample : array_like (n_samples, dim)
Sobol' set.
"""
total_n_samples = self.num_generated + n_samples
if not (total_n_samples & (total_n_samples - 1) == 0):
warnings.warn(
"The balance properties of Sobol' points require "
"n to be a power of 2. {0} points have been "
"previously generated, then: n={0}+{1}={2}. ".format(
self.num_generated, n_samples, total_n_samples
)
)
if self.skip != 0 and total_n_samples > self.skip:
raise ValueError(
f"{self.skip} points have been skipped: "
f"generating "
f"{n_samples} more points would cause the "
f"sequence to repeat."
)
rng = check_random_state(random_state)
space = Space(dimensions)
n_dim = space.n_dims
transformer = space.get_transformer()
space.set_transformer("normalize")
r = np.full((n_samples, n_dim), np.nan)
seed = self.skip
for j in range(n_samples):
r[j, 0:n_dim], seed = self._sobol(n_dim, seed)
if self.randomize:
r = _random_shift(r, rng)
r = space.inverse_transform(r)
space.set_transformer(transformer)
self.num_generated += n_samples
return r
def _sobol(self, dim_num, seed):
"""Generates a new quasirandom Sobol' vector with each call.
Parameters
----------
dim_num : int
Number of spatial dimensions.
`dim_num` must satisfy 1 <= DIM_NUM <= 40.
seed : int
the `seed` for the sequence.
This is essentially the index in the sequence of the quasirandom
value to be generated. On output, `seed` has been set to the
appropriate next value, usually simply `seed`+1.
If `seed` is less than 0 on input, it is treated as though it were 0.
An input value of 0 requests the first (0-th) element of
the sequence.
Returns
-------
vector, seed : np.array (n_dim,), int
The next quasirandom vector and the seed of its next vector.
"""
# Things to do only if the dimension changed.
if dim_num != self.dim_num_save:
self.init(dim_num)
seed = int(np.floor(seed))
if seed < 0:
seed = 0
pos_lo0 = 1
if seed == 0:
self.lastq = np.zeros(dim_num)
elif seed == self.seed_save + 1:
# Find the position of the right-hand zero in SEED.
pos_lo0 = _bit_lo0(seed)
elif seed <= self.seed_save:
self.seed_save = 0
self.lastq = np.zeros(dim_num)
for seed_temp in range(int(self.seed_save), int(seed)):
pos_lo0 = _bit_lo0(seed_temp)
for i in range(1, dim_num + 1):
self.lastq[i - 1] = np.bitwise_xor(
int(self.lastq[i - 1]), int(self.v[i - 1, pos_lo0 - 1])
)
pos_lo0 = _bit_lo0(seed)
elif self.seed_save + 1 < seed:
for seed_temp in range(int(self.seed_save + 1), int(seed)):
pos_lo0 = _bit_lo0(seed_temp)
for i in range(1, dim_num + 1):
self.lastq[i - 1] = np.bitwise_xor(
int(self.lastq[i - 1]), int(self.v[i - 1, pos_lo0 - 1])
)
pos_lo0 = _bit_lo0(seed)
# Check that the user is not calling too many times!
if self.maxcol < pos_lo0:
raise ValueError(
f"I4_SOBOL - Fatal error!\n"
f" Too many calls!\n"
f" MAXCOL = {self.maxcol}\n"
f" L = {pos_lo0}\n"
)
# Calculate the new components of QUASI.
quasi = np.zeros(dim_num)
for i in range(1, dim_num + 1):
quasi[i - 1] = self.lastq[i - 1] * self.recipd
self.lastq[i - 1] = np.bitwise_xor(
int(self.lastq[i - 1]), int(self.v[i - 1, pos_lo0 - 1])
)
self.seed_save = seed
seed += 1
return [quasi, seed]
def _bit_hi1(n):
"""Returns the position of the high 1 bit base 2 in an integer.
Parameters
----------
n : int
Input, should be positive.
"""
bin_repr = np.binary_repr(n)
most_left_one = bin_repr.find("1")
if most_left_one == -1:
return 0
else:
return len(bin_repr) - most_left_one
def _bit_lo0(n):
"""Returns the position of the low 0 bit base 2 in an integer.
Parameters
----------
n : int
Input, should be positive.
"""
bin_repr = np.binary_repr(n)
most_right_zero = bin_repr[::-1].find("0")
if most_right_zero == -1:
most_right_zero = len(bin_repr)
return most_right_zero + 1
def _random_shift(dm, random_state=None):
"""Random shifting of a vector.
Randomization of the quasi-MC samples can be achieved in the easiest manner
by random shift (or the Cranley-Patterson rotation).
References
-----------
.. [1] C. Lemieux, "Monte Carlo and Quasi-Monte Carlo Sampling," Springer
Series in Statistics 692, Springer Science+Business Media, New York,
2009
Parameters
----------
dm : array, shape(n, d)
Input matrix.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
dm : array, shape(n, d)
Randomized Sobol' design matrix.
"""
rng = check_random_state(random_state)
# Generate random shift matrix from uniform distribution
shift = np.repeat(rng.rand(1, dm.shape[1]), dm.shape[0], axis=0)
# Return the shifted Sobol' design
return (dm + shift) % 1
| 18,637 | 25.103641 | 79 | py |
deephyper | deephyper-master/deephyper/skopt/sampler/grid.py | """
Inspired by https://github.com/jonathf/chaospy/blob/master/chaospy/
distributions/sampler/sequences/grid.py
"""
import numpy as np
from .base import InitialPointGenerator
from ..space import Space
from sklearn.utils import check_random_state
def _quadrature_combine(args):
args = [np.asarray(arg).reshape(len(arg), -1) for arg in args]
shapes = [arg.shape for arg in args]
size = np.prod(shapes, 0)[0] * np.sum(shapes, 0)[1]
if size > 10**9:
raise MemoryError("Too large sets")
out = args[0]
for arg in args[1:]:
out = np.hstack(
[
np.tile(out, len(arg)).reshape(-1, out.shape[1]),
np.tile(arg.T, len(out)).reshape(arg.shape[1], -1).T,
]
)
return out
def _create_uniform_grid_exclude_border(n_dim, order):
assert order > 0
assert n_dim > 0
x_data = np.arange(1, order + 1) / (order + 1.0)
x_data = _quadrature_combine([x_data] * n_dim)
return x_data
def _create_uniform_grid_include_border(n_dim, order):
assert order > 1
assert n_dim > 0
x_data = np.arange(0, order) / (order - 1.0)
x_data = _quadrature_combine([x_data] * n_dim)
return x_data
def _create_uniform_grid_only_border(n_dim, order):
assert n_dim > 0
assert order > 1
x = [[0.0, 1.0]] * (n_dim - 1)
x.append(list(np.arange(0, order) / (order - 1.0)))
x_data = _quadrature_combine(x)
return x_data
class Grid(InitialPointGenerator):
"""Generate samples from a regular grid.
Parameters
----------
border : str, default='exclude'
defines how the samples are generated:
- 'include' : Includes the border into the grid layout
- 'exclude' : Excludes the border from the grid layout
- 'only' : Selects only points at the border of the dimension
use_full_layout : boolean, default=True
When True, a full factorial design is generated and
missing points are taken from the next larger full factorial
design, depending on `append_border`
When False, the next larger full factorial design is
generated and points are randomly selected from it.
append_border : str, default="only"
When use_full_layout is True, this parameter defines how the missing
points will be generated from the next larger grid layout:
- 'include' : Includes the border into the grid layout
- 'exclude' : Excludes the border from the grid layout
- 'only' : Selects only points at the border of the dimension
"""
def __init__(self, border="exclude", use_full_layout=True, append_border="only"):
self.border = border
self.use_full_layout = use_full_layout
self.append_border = append_border
def generate(self, dimensions, n_samples, random_state=None):
"""Creates samples from a regular grid.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_samples : int
The order of the Halton sequence. Defines the number of samples.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
np.array, shape=(n_dim, n_samples)
grid set
"""
rng = check_random_state(random_state)
space = Space(dimensions)
n_dim = space.n_dims
transformer = space.get_transformer()
space.set_transformer("normalize")
if self.border == "include":
if self.use_full_layout:
order = int(np.floor(np.sqrt(n_samples)))
else:
order = int(np.ceil(np.sqrt(n_samples)))
if order < 2:
order = 2
h = _create_uniform_grid_include_border(n_dim, order)
elif self.border == "exclude":
if self.use_full_layout:
order = int(np.floor(np.sqrt(n_samples)))
else:
order = int(np.ceil(np.sqrt(n_samples)))
if order < 1:
order = 1
h = _create_uniform_grid_exclude_border(n_dim, order)
elif self.border == "only":
if self.use_full_layout:
order = int(np.floor(n_samples / 2.0))
else:
order = int(np.ceil(n_samples / 2.0))
if order < 2:
order = 2
h = _create_uniform_grid_exclude_border(n_dim, order)
else:
raise ValueError("Wrong value for border")
if np.size(h, 0) > n_samples:
rng.shuffle(h)
h = h[:n_samples, :]
elif np.size(h, 0) < n_samples:
if self.append_border == "only":
order = int(np.ceil((n_samples - np.size(h, 0)) / 2.0))
if order < 2:
order = 2
h2 = _create_uniform_grid_only_border(n_dim, order)
elif self.append_border == "include":
order = int(np.ceil(np.sqrt(n_samples - np.size(h, 0))))
if order < 2:
order = 2
h2 = _create_uniform_grid_include_border(n_dim, order)
elif self.append_border == "exclude":
order = int(np.ceil(np.sqrt(n_samples - np.size(h, 0))))
if order < 1:
order = 1
h2 = _create_uniform_grid_exclude_border(n_dim, order)
else:
raise ValueError("Wrong value for append_border")
h = np.vstack((h, h2[: (n_samples - np.size(h, 0))]))
rng.shuffle(h)
else:
rng.shuffle(h)
h = space.inverse_transform(h)
space.set_transformer(transformer)
return h
| 6,274 | 35.482558 | 85 | py |
deephyper | deephyper-master/deephyper/skopt/sampler/halton.py | """
Inspired by https://github.com/jonathf/chaospy/blob/master/chaospy/
distributions/sampler/sequences/halton.py
"""
import numpy as np
from .base import InitialPointGenerator
from ..space import Space
from sklearn.utils import check_random_state
class Halton(InitialPointGenerator):
"""Creates `Halton` sequence samples.
In statistics, Halton sequences are sequences used to generate
points in space for numerical methods such as Monte Carlo simulations.
Although these sequences are deterministic, they are of low discrepancy,
that is, appear to be random
for many purposes. They were first introduced in 1960 and are an example
of a quasi-random number sequence. They generalise the one-dimensional
van der Corput sequences.
For ``dim == 1`` the sequence falls back to Van Der Corput sequence.
Parameters
----------
min_skip : int
Minimum skipped seed number. When `min_skip != max_skip`
a random number is picked.
max_skip : int
Maximum skipped seed number. When `min_skip != max_skip`
a random number is picked.
primes : tuple, default=None
The (non-)prime base to calculate values along each axis. If
empty or None, growing prime values starting from 2 will be used.
"""
def __init__(self, min_skip=0, max_skip=0, primes=None):
self.primes = primes
self.min_skip = min_skip
self.max_skip = max_skip
def generate(self, dimensions, n_samples, random_state=None):
"""Creates samples from Halton set.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_samples : int
The order of the Halton sequence. Defines the number of samples.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
Returns
-------
np.array, shape=(n_dim, n_samples)
Halton set.
"""
rng = check_random_state(random_state)
if self.primes is None:
primes = []
else:
primes = list(self.primes)
space = Space(dimensions)
n_dim = space.n_dims
transformer = space.get_transformer()
space.set_transformer("normalize")
if len(primes) < n_dim:
prime_order = 10 * n_dim
while len(primes) < n_dim:
primes = _create_primes(prime_order)
prime_order *= 2
primes = primes[:n_dim]
assert len(primes) == n_dim, "not enough primes"
if self.min_skip == self.max_skip:
skip = self.min_skip
elif self.min_skip < 0 and self.max_skip < 0:
skip = max(primes)
elif self.min_skip < 0 or self.max_skip < 0:
skip = np.max(self.min_skip, self.max_skip)
else:
skip = rng.randint(self.min_skip, self.max_skip)
out = np.empty((n_dim, n_samples))
indices = [idx + skip for idx in range(n_samples)]
for dim_ in range(n_dim):
out[dim_] = _van_der_corput_samples(indices, number_base=primes[dim_])
out = space.inverse_transform(np.transpose(out))
space.set_transformer(transformer)
return out
def _van_der_corput_samples(idx, number_base=2):
"""Create `Van Der Corput` low discrepancy sequence samples.
A van der Corput sequence is an example of the simplest one-dimensional
low-discrepancy sequence over the unit interval; it was first described in
1935 by the Dutch mathematician J. G. van der Corput. It is constructed by
reversing the base-n representation of the sequence of natural numbers
(1, 2, 3, ...).
In practice, use Halton sequence instead of Van Der Corput, as it is the
same, but generalized to work in multiple dimensions.
Parameters
----------
idx (int, numpy.ndarray):
The index of the sequence. If array is provided, all values in
array is returned.
number_base : int
The numerical base from where to create the samples from.
Returns
-------
float, numpy.ndarray
Van der Corput samples.
"""
assert number_base > 1
idx = np.asarray(idx).flatten()
out = np.zeros(len(idx), dtype=float)
base = float(number_base)
active = np.ones(len(idx), dtype=bool)
while np.any(active):
out[active] += (idx[active] % number_base) / base
idx //= number_base
base *= number_base
active = idx > 0
return out
def _create_primes(threshold):
"""
Generate prime values using sieve of Eratosthenes method.
Parameters
----------
threshold : int
The upper bound for the size of the prime values.
Returns
------
List
All primes from 2 and up to ``threshold``.
"""
if threshold == 2:
return [2]
elif threshold < 2:
return []
numbers = list(range(3, threshold + 1, 2))
root_of_threshold = threshold**0.5
half = int((threshold + 1) / 2 - 1)
idx = 0
counter = 3
while counter <= root_of_threshold:
if numbers[idx]:
idy = int((counter * counter - 3) / 2)
numbers[idy] = 0
while idy < half:
numbers[idy] = 0
idy += counter
idx += 1
counter = 2 * idx + 3
return [2] + [number for number in numbers if number]
| 5,951 | 31.52459 | 82 | py |
deephyper | deephyper-master/deephyper/skopt/sampler/__init__.py | """
Utilities for generating initial sequences
"""
from .lhs import Lhs
from .sobol import Sobol
from .halton import Halton
from .hammersly import Hammersly
from .grid import Grid
from .base import InitialPointGenerator
__all__ = ["Lhs", "Sobol", "Halton", "Hammersly", "Grid", "InitialPointGenerator"]
| 305 | 22.538462 | 82 | py |
deephyper | deephyper-master/deephyper/skopt/learning/gbrt.py | import numpy as np
from sklearn.base import clone
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.utils import check_random_state
from joblib import Parallel, delayed
def _parallel_fit(regressor, X, y):
return regressor.fit(X, y)
class GradientBoostingQuantileRegressor(BaseEstimator, RegressorMixin):
"""Predict several quantiles with one estimator.
This is a wrapper around `GradientBoostingRegressor`'s quantile
regression that allows you to predict several `quantiles` in
one go.
Parameters
----------
quantiles : array-like
Quantiles to predict. By default the 16, 50 and 84%
quantiles are predicted.
base_estimator : GradientBoostingRegressor instance or None (default)
Quantile regressor used to make predictions. Only instances
of `GradientBoostingRegressor` are supported. Use this to change
the hyper-parameters of the estimator.
n_jobs : int, default=1
The number of jobs to run in parallel for `fit`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
"""
def __init__(
self,
quantiles=[0.16, 0.5, 0.84],
base_estimator=None,
n_jobs=1,
random_state=None,
):
self.quantiles = quantiles
self.random_state = random_state
self.base_estimator = base_estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit one regressor for each quantile.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like, shape=(n_samples,)
Target values (real numbers in regression)
"""
rng = check_random_state(self.random_state)
if self.base_estimator is None:
base_estimator = GradientBoostingRegressor(loss="quantile")
else:
base_estimator = self.base_estimator
if not isinstance(base_estimator, GradientBoostingRegressor):
raise ValueError(
"base_estimator has to be of type" " GradientBoostingRegressor."
)
if not base_estimator.loss == "quantile":
raise ValueError(
"base_estimator has to use quantile"
" loss not %s" % base_estimator.loss
)
# The predictions for different quantiles should be sorted.
# Therefore each of the regressors need the same seed.
base_estimator.set_params(random_state=rng)
regressors = []
for q in self.quantiles:
regressor = clone(base_estimator)
regressor.set_params(alpha=q)
regressors.append(regressor)
self.regressors_ = Parallel(n_jobs=self.n_jobs, backend="threading")(
delayed(_parallel_fit)(regressor, X, y) for regressor in regressors
)
return self
def predict(self, X, return_std=False, return_quantiles=False):
"""Predict.
Predict `X` at every quantile if `return_std` is set to False.
If `return_std` is set to True, then return the mean
and the predicted standard deviation, which is approximated as
the (0.84th quantile - 0.16th quantile) divided by 2.0
Parameters
----------
X : array-like, shape=(n_samples, n_features)
where `n_samples` is the number of samples
and `n_features` is the number of features.
"""
predicted_quantiles = np.asarray([rgr.predict(X) for rgr in self.regressors_])
if return_quantiles:
return predicted_quantiles.T
elif return_std:
std_quantiles = [0.16, 0.5, 0.84]
is_present_mask = np.in1d(std_quantiles, self.quantiles)
if not np.all(is_present_mask):
raise ValueError(
"return_std works only if the quantiles during "
"instantiation include 0.16, 0.5 and 0.84"
)
low = self.regressors_[self.quantiles.index(0.16)].predict(X)
high = self.regressors_[self.quantiles.index(0.84)].predict(X)
mean = self.regressors_[self.quantiles.index(0.5)].predict(X)
return mean, ((high - low) / 2.0)
# return the mean
return self.regressors_[self.quantiles.index(0.5)].predict(X)
| 4,710 | 34.689394 | 86 | py |
deephyper | deephyper-master/deephyper/skopt/learning/__init__.py | """Machine learning extensions for model-based optimization."""
from .forest import RandomForestRegressor
from .forest import ExtraTreesRegressor
from .gaussian_process import GaussianProcessRegressor
from .gbrt import GradientBoostingQuantileRegressor
__all__ = [
"RandomForestRegressor",
"ExtraTreesRegressor",
"GradientBoostingQuantileRegressor",
"GaussianProcessRegressor",
]
try:
from skgarden.mondrian import MondrianForestRegressor # noqa: F401
__all__.append("MondrianForestRegressor")
except ImportError:
pass
| 553 | 24.181818 | 71 | py |
deephyper | deephyper-master/deephyper/skopt/learning/forest.py | import numpy as np
from sklearn.ensemble import ExtraTreesRegressor as _sk_ExtraTreesRegressor
from sklearn.ensemble._forest import ForestRegressor, DecisionTreeRegressor
def _return_std(X, n_outputs, trees, predictions, min_variance):
"""
Returns `std(Y | X)`.
Can be calculated by E[Var(Y | Tree)] + Var(E[Y | Tree]) where
P(Tree) is `1 / len(trees)`.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data.
n_outputs: int.
Number of outputs.
trees : list, shape=(n_estimators,)
List of fit sklearn trees as obtained from the ``estimators_``
attribute of a fit RandomForestRegressor or ExtraTreesRegressor.
predictions : array-like, shape=(n_samples,)
Prediction of each data point as returned by RandomForestRegressor
or ExtraTreesRegressor.
Returns
-------
std : array-like, shape=(n_samples,)
Standard deviation of `y` at `X`. If criterion
is set to "mse", then `std[i] ~= std(y | X[i])`.
"""
# This derives std(y | x) as described in 4.3.2 of arXiv:1211.0906
flat = len(predictions.shape) == 1
if flat:
predictions = predictions.reshape(-1, 1)
std = np.zeros((n_outputs, len(X)))
for tree in trees:
var_tree = tree.tree_.impurity[tree.apply(X)]
# This rounding off is done in accordance with the
# adjustment done in section 4.3.3
# of http://arxiv.org/pdf/1211.0906v2.pdf to account
# for cases such as leaves with 1 sample in which there
# is zero variance.
var_tree[var_tree < min_variance] = min_variance
mean_tree = tree.predict(X).T
std += var_tree + mean_tree**2
std = std.T
std /= len(trees)
std -= predictions**2.0
std[std < 0.0] = 0.0
std = std**0.5
if flat:
std = std.reshape(-1)
return std
class RandomForestRegressor(ForestRegressor):
"""
RandomForestRegressor that supports conditional std computation.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
.. note::
The search for a split does not stop until at least one
valid partition of the node samples is found, even if it
requires to effectively inspect more than ``max_features``
features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
"""
def __init__(
self,
n_estimators=100,
*,
criterion="squared_error",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap=True,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
ccp_alpha=0.0,
max_samples=None,
min_variance=0.0,
splitter="random"
):
super().__init__(
# !keyword-argument changing from sklearn==1.2.0, positional fixed it!
DecisionTreeRegressor(splitter=splitter),
n_estimators=n_estimators,
estimator_params=(
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"random_state",
"ccp_alpha",
),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples,
)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.ccp_alpha = ccp_alpha
self.min_variance = min_variance
self.splitter = splitter
def predict(self, X, return_std=False, forestci=False):
"""Predict continuous output for X.
Parameters
----------
X : array of shape = (n_samples, n_features)
Input data.
return_std : boolean
Whether or not to return the standard deviation.
Returns
-------
predictions : array-like of shape = (n_samples,)
Predicted values for X. If criterion is set to "mse",
then `predictions[i] ~= mean(y | X[i])`.
std : array-like of shape=(n_samples,)
Standard deviation of `y` at `X`. If criterion
is set to "mse", then `std[i] ~= std(y | X[i])`.
"""
mean = super(RandomForestRegressor, self).predict(X)
if return_std:
if self.criterion != "squared_error":
raise ValueError(
"Expected impurity to be 'squared_error', got %s instead"
% self.criterion
)
std = _return_std(
X, self.n_outputs_, self.estimators_, mean, self.min_variance
)
return mean, std
return mean
class ExtraTreesRegressor(_sk_ExtraTreesRegressor):
"""
ExtraTreesRegressor that supports conditional standard deviation.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="squared_error")
The function to measure the quality of a split. Supported criteria
are "squared_error" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
.. note::
The search for a split does not stop until at least one
valid partition of the node samples is found, even if it
requires to effectively inspect more than ``max_features``
features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
"""
def __init__(
self,
n_estimators=10,
criterion="squared_error",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
min_variance=0.0,
):
self.min_variance = min_variance
super(ExtraTreesRegressor, self).__init__(
n_estimators=n_estimators,
criterion=criterion,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
)
def predict(self, X, return_std=False):
"""
Predict continuous output for X.
Parameters
----------
X : array-like of shape=(n_samples, n_features)
Input data.
return_std : boolean
Whether or not to return the standard deviation.
Returns
-------
predictions : array-like of shape=(n_samples,)
Predicted values for X. If criterion is set to "squared_error",
then `predictions[i] ~= mean(y | X[i])`.
std : array-like of shape=(n_samples,)
Standard deviation of `y` at `X`. If criterion
is set to "squared_error", then `std[i] ~= std(y | X[i])`.
"""
mean = super(ExtraTreesRegressor, self).predict(X)
if return_std:
if self.criterion != "squared_error":
raise ValueError(
"Expected impurity to be 'squared_error', got %s instead"
% self.criterion
)
std = _return_std(
X, self.n_outputs_, self.estimators_, mean, self.min_variance
)
return mean, std
return mean
| 19,918 | 36.725379 | 82 | py |
deephyper | deephyper-master/deephyper/skopt/learning/gaussian_process/kernels.py | from math import sqrt
import numpy as np
from sklearn.gaussian_process.kernels import Kernel as sk_Kernel
from sklearn.gaussian_process.kernels import ConstantKernel as sk_ConstantKernel
from sklearn.gaussian_process.kernels import DotProduct as sk_DotProduct
from sklearn.gaussian_process.kernels import Exponentiation as sk_Exponentiation
from sklearn.gaussian_process.kernels import ExpSineSquared as sk_ExpSineSquared
from sklearn.gaussian_process.kernels import Hyperparameter
from sklearn.gaussian_process.kernels import Matern as sk_Matern
from sklearn.gaussian_process.kernels import (
NormalizedKernelMixin as sk_NormalizedKernelMixin,
)
from sklearn.gaussian_process.kernels import Product as sk_Product
from sklearn.gaussian_process.kernels import RationalQuadratic as sk_RationalQuadratic
from sklearn.gaussian_process.kernels import RBF as sk_RBF
from sklearn.gaussian_process.kernels import (
StationaryKernelMixin as sk_StationaryKernelMixin,
)
from sklearn.gaussian_process.kernels import Sum as sk_Sum
from sklearn.gaussian_process.kernels import WhiteKernel as sk_WhiteKernel
class Kernel(sk_Kernel):
"""
Base class for deephyper.skopt.gaussian_process kernels.
Supports computation of the gradient of the kernel with respect to X
"""
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def gradient_x(self, x, X_train):
"""
Computes gradient of K(x, X_train) with respect to x
Parameters
----------
x: array-like, shape=(n_features,)
A single test point.
X_train: array-like, shape=(n_samples, n_features)
Training data used to fit the gaussian process.
Returns
-------
gradient_x: array-like, shape=(n_samples, n_features)
Gradient of K(x, X_train) with respect to x.
"""
raise NotImplementedError
class RBF(Kernel, sk_RBF):
def gradient_x(self, x, X_train):
# diff = (x - X) / length_scale
# size = (n_train_samples, n_dimensions)
x = np.asarray(x)
X_train = np.asarray(X_train)
length_scale = np.asarray(self.length_scale)
diff = x - X_train
diff /= length_scale
# e = -exp(0.5 * \sum_{i=1}^d (diff ** 2))
# size = (n_train_samples, 1)
exp_diff_squared = np.sum(diff**2, axis=1)
exp_diff_squared *= -0.5
exp_diff_squared = np.exp(exp_diff_squared, exp_diff_squared)
exp_diff_squared = np.expand_dims(exp_diff_squared, axis=1)
exp_diff_squared *= -1
# gradient = (e * diff) / length_scale
gradient = exp_diff_squared * diff
gradient /= length_scale
return gradient
class Matern(Kernel, sk_Matern):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
length_scale = np.asarray(self.length_scale)
# diff = (x - X_train) / length_scale
# size = (n_train_samples, n_dimensions)
diff = x - X_train
diff /= length_scale
# dist_sq = \sum_{i=1}^d (diff ^ 2)
# dist = sqrt(dist_sq)
# size = (n_train_samples,)
dist_sq = np.sum(diff**2, axis=1)
dist = np.sqrt(dist_sq)
if self.nu == 0.5:
# e = -np.exp(-dist) / dist
# size = (n_train_samples, 1)
scaled_exp_dist = -dist
scaled_exp_dist = np.exp(scaled_exp_dist, scaled_exp_dist)
scaled_exp_dist *= -1
# grad = (e * diff) / length_scale
# For all i in [0, D) if x_i equals y_i.
# 1. e -> -1
# 2. (x_i - y_i) / \sum_{j=1}^D (x_i - y_i)**2 approaches 1.
# Hence the gradient when for all i in [0, D),
# x_i equals y_i is -1 / length_scale[i].
gradient = -np.ones((X_train.shape[0], x.shape[0]))
mask = dist != 0.0
scaled_exp_dist[mask] /= dist[mask]
scaled_exp_dist = np.expand_dims(scaled_exp_dist, axis=1)
gradient[mask] = scaled_exp_dist[mask] * diff[mask]
gradient /= length_scale
return gradient
elif self.nu == 1.5:
# grad(fg) = f'g + fg'
# where f = 1 + sqrt(3) * euclidean((X - Y) / length_scale)
# where g = exp(-sqrt(3) * euclidean((X - Y) / length_scale))
sqrt_3_dist = sqrt(3) * dist
f = np.expand_dims(1 + sqrt_3_dist, axis=1)
# When all of x_i equals y_i, f equals 1.0, (1 - f) equals
# zero, hence from below
# f * g_grad + g * f_grad (where g_grad = -g * f_grad)
# -f * g * f_grad + g * f_grad
# g * f_grad * (1 - f) equals zero.
# sqrt_3_by_dist can be set to any value since diff equals
# zero for this corner case.
sqrt_3_by_dist = np.zeros_like(dist)
nzd = dist != 0.0
sqrt_3_by_dist[nzd] = sqrt(3) / dist[nzd]
dist_expand = np.expand_dims(sqrt_3_by_dist, axis=1)
f_grad = diff / length_scale
f_grad *= dist_expand
sqrt_3_dist *= -1
exp_sqrt_3_dist = np.exp(sqrt_3_dist, sqrt_3_dist)
g = np.expand_dims(exp_sqrt_3_dist, axis=1)
g_grad = -g * f_grad
# f * g_grad + g * f_grad (where g_grad = -g * f_grad)
f *= -1
f += 1
return g * f_grad * f
elif self.nu == 2.5:
# grad(fg) = f'g + fg'
# where f = (1 + sqrt(5) * euclidean((X - Y) / length_scale) +
# 5 / 3 * sqeuclidean((X - Y) / length_scale))
# where g = exp(-sqrt(5) * euclidean((X - Y) / length_scale))
sqrt_5_dist = sqrt(5) * dist
f2 = (5.0 / 3.0) * dist_sq
f2 += sqrt_5_dist
f2 += 1
f = np.expand_dims(f2, axis=1)
# For i in [0, D) if x_i equals y_i
# f = 1 and g = 1
# Grad = f'g + fg' = f' + g'
# f' = f_1' + f_2'
# Also g' = -g * f1'
# Grad = f'g - g * f1' * f
# Grad = g * (f' - f1' * f)
# Grad = f' - f1'
# Grad = f2' which equals zero when x = y
# Since for this corner case, diff equals zero,
# dist can be set to anything.
nzd_mask = dist != 0.0
nzd = dist[nzd_mask]
dist[nzd_mask] = np.reciprocal(nzd, nzd)
dist *= sqrt(5)
dist = np.expand_dims(dist, axis=1)
diff /= length_scale
f1_grad = dist * diff
f2_grad = (10.0 / 3.0) * diff
f_grad = f1_grad + f2_grad
sqrt_5_dist *= -1
g = np.exp(sqrt_5_dist, sqrt_5_dist)
g = np.expand_dims(g, axis=1)
g_grad = -g * f1_grad
return f * g_grad + g * f_grad
class RationalQuadratic(Kernel, sk_RationalQuadratic):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
alpha = self.alpha
length_scale = self.length_scale
# diff = (x - X_train) / length_scale
# size = (n_train_samples, n_dimensions)
diff = x - X_train
diff /= length_scale
# dist = -(1 + (\sum_{i=1}^d (diff^2) / (2 * alpha)))** (-alpha - 1)
# size = (n_train_samples,)
scaled_dist = np.sum(diff**2, axis=1)
scaled_dist /= 2 * self.alpha
scaled_dist += 1
scaled_dist **= -alpha - 1
scaled_dist *= -1
scaled_dist = np.expand_dims(scaled_dist, axis=1)
diff_by_ls = diff / length_scale
return scaled_dist * diff_by_ls
class ExpSineSquared(Kernel, sk_ExpSineSquared):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
length_scale = self.length_scale
periodicity = self.periodicity
diff = x - X_train
sq_dist = np.sum(diff**2, axis=1)
dist = np.sqrt(sq_dist)
pi_by_period = dist * (np.pi / periodicity)
sine = np.sin(pi_by_period) / length_scale
sine_squared = -2 * sine**2
exp_sine_squared = np.exp(sine_squared)
grad_wrt_exp = -2 * np.sin(2 * pi_by_period) / length_scale**2
# When x_i -> y_i for all i in [0, D), the gradient becomes
# zero. See https://github.com/MechCoder/Notebooks/blob/master/ExpSineSquared%20Kernel%20gradient%20computation.ipynb
# for a detailed math explanation
# grad_wrt_theta can be anything since diff is zero
# for this corner case, hence we set to zero.
grad_wrt_theta = np.zeros_like(dist)
nzd = dist != 0.0
grad_wrt_theta[nzd] = np.pi / (periodicity * dist[nzd])
return (
np.expand_dims(grad_wrt_theta * exp_sine_squared * grad_wrt_exp, axis=1)
* diff
)
class ConstantKernel(Kernel, sk_ConstantKernel):
def gradient_x(self, x, X_train):
return np.zeros_like(X_train)
class WhiteKernel(Kernel, sk_WhiteKernel):
def gradient_x(self, x, X_train):
return np.zeros_like(X_train)
class Exponentiation(Kernel, sk_Exponentiation):
def gradient_x(self, x, X_train):
x = np.asarray(x)
X_train = np.asarray(X_train)
expo = self.exponent
kernel = self.kernel
K = np.expand_dims(kernel(np.expand_dims(x, axis=0), X_train)[0], axis=1)
return expo * K ** (expo - 1) * kernel.gradient_x(x, X_train)
class Sum(Kernel, sk_Sum):
def gradient_x(self, x, X_train):
return self.k1.gradient_x(x, X_train) + self.k2.gradient_x(x, X_train)
class Product(Kernel, sk_Product):
def gradient_x(self, x, X_train):
x = np.asarray(x)
x = np.expand_dims(x, axis=0)
X_train = np.asarray(X_train)
f_ggrad = np.expand_dims(self.k1(x, X_train)[0], axis=1) * self.k2.gradient_x(
x, X_train
)
fgrad_g = np.expand_dims(self.k2(x, X_train)[0], axis=1) * self.k1.gradient_x(
x, X_train
)
return f_ggrad + fgrad_g
class DotProduct(Kernel, sk_DotProduct):
def gradient_x(self, x, X_train):
return np.asarray(X_train)
class HammingKernel(sk_StationaryKernelMixin, sk_NormalizedKernelMixin, Kernel):
r"""
The HammingKernel is used to handle categorical inputs.
``K(x_1, x_2) = exp(\sum_{j=1}^{d} -ls_j * (I(x_1j != x_2j)))``
Parameters
-----------
* `length_scale` [float, array-like, shape=[n_features,], 1.0 (default)]
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
* `length_scale_bounds` [array-like, [1e-5, 1e5] (default)]
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.length_scale_bounds = length_scale_bounds
@property
def hyperparameter_length_scale(self):
length_scale = self.length_scale
anisotropic = np.iterable(length_scale) and len(length_scale) > 1
if anisotropic:
return Hyperparameter(
"length_scale", "numeric", self.length_scale_bounds, len(length_scale)
)
return Hyperparameter("length_scale", "numeric", self.length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
* `X` [array-like, shape=(n_samples_X, n_features)]
Left argument of the returned kernel k(X, Y)
* `Y` [array-like, shape=(n_samples_Y, n_features) or None(default)]
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
* `eval_gradient` [bool, False(default)]
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
* `K` [array-like, shape=(n_samples_X, n_samples_Y)]
Kernel k(X, Y)
* `K_gradient` [array-like, shape=(n_samples_X, n_samples_X, n_dims)]
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
length_scale = self.length_scale
anisotropic = np.iterable(length_scale) and len(length_scale) > 1
if np.iterable(length_scale):
if len(length_scale) > 1:
length_scale = np.asarray(length_scale, dtype=float)
else:
length_scale = float(length_scale[0])
else:
length_scale = float(length_scale)
X = np.atleast_2d(X)
if anisotropic and X.shape[1] != len(length_scale):
raise ValueError(
"Expected X to have %d features, got %d"
% (len(length_scale), X.shape[1])
)
n_samples, n_dim = X.shape
Y_is_None = Y is None
if Y_is_None:
Y = X
elif eval_gradient:
raise ValueError("gradient can be evaluated only when Y != X")
else:
Y = np.atleast_2d(Y)
indicator = np.expand_dims(X, axis=1) != Y
kernel_prod = np.exp(-np.sum(length_scale * indicator, axis=2))
# dK / d theta = (dK / dl) * (dl / d theta)
# theta = log(l) => dl / d (theta) = e^theta = l
# dK / d theta = l * dK / dl
# dK / dL computation
if anisotropic:
grad = -np.expand_dims(kernel_prod, axis=-1) * np.array(
indicator, dtype=np.float32
)
else:
grad = -np.expand_dims(kernel_prod * np.sum(indicator, axis=2), axis=-1)
grad *= length_scale
if eval_gradient:
return kernel_prod, grad
return kernel_prod
| 14,672 | 34.442029 | 125 | py |
deephyper | deephyper-master/deephyper/skopt/learning/gaussian_process/gpr.py | import warnings
import numpy as np
import sklearn
from packaging import version
from scipy.linalg import cho_solve, solve_triangular
from sklearn.gaussian_process import (
GaussianProcessRegressor as sk_GaussianProcessRegressor,
)
from sklearn.utils import check_array
from .kernels import RBF, ConstantKernel, Sum, WhiteKernel
def _param_for_white_kernel_in_Sum(kernel, kernel_str=""):
"""
Check if a WhiteKernel exists in a Sum Kernel
and if it does return the corresponding key in
`kernel.get_params()`
"""
if kernel_str != "":
kernel_str = kernel_str + "__"
if isinstance(kernel, Sum):
for param, child in kernel.get_params(deep=False).items():
if isinstance(child, WhiteKernel):
return True, kernel_str + param
else:
present, child_str = _param_for_white_kernel_in_Sum(
child, kernel_str + param
)
if present:
return True, child_str
return False, "_"
class GaussianProcessRegressor(sk_GaussianProcessRegressor):
"""
GaussianProcessRegressor that allows noise tunability.
The implementation is based on Algorithm 2.1 of Gaussian Processes
for Machine Learning (GPML) by Rasmussen and Williams.
In addition to standard scikit-learn estimator API,
GaussianProcessRegressor:
* allows prediction without prior fitting (based on the GP prior);
* provides an additional method sample_y(X), which evaluates samples
drawn from the GPR (prior or posterior) at given inputs;
* exposes a method log_marginal_likelihood(theta), which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
alpha : float or array-like, optional (default: 1e-10)
Value added to the diagonal of the kernel matrix during fitting.
Larger values correspond to increased noise level in the observations
and reduce potential numerical issue during fitting. If an array is
passed, it must have the same number of entries as the data used for
fitting and is used as datapoint-dependent noise level. Note that this
is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify
the noise level directly as a parameter is mainly for convenience and
for consistency with Ridge.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer == 0 implies that one
run is performed.
normalize_y : boolean, optional (default: False)
Whether the target values y are normalized, i.e., the mean of the
observed target values become zero. This parameter should be set to
True if the target values' mean is expected to differ considerable from
zero. When enabled, the normalization effectively modifies the GP's
prior based on the data, which contradicts the likelihood principle;
normalization is thus disabled per default.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
noise : string, "gaussian", optional
If set to "gaussian", then it is assumed that `y` is a noisy
estimate of `f(x)` where the noise is gaussian.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples, [n_output_dims])
Target values in training data (also required for prediction)
kernel_ kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``
alpha_ : array-like, shape = (n_samples,)
Dual coefficients of training data points in kernel space
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
noise_ : float
Estimate of the gaussian noise. Useful only when noise is set to
"gaussian".
"""
def __init__(
self,
kernel=None,
alpha=1e-10,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0,
normalize_y=False,
copy_X_train=True,
random_state=None,
noise=None,
):
self.noise = noise
super(GaussianProcessRegressor, self).__init__(
kernel=kernel,
alpha=alpha,
optimizer=optimizer,
n_restarts_optimizer=n_restarts_optimizer,
normalize_y=normalize_y,
copy_X_train=copy_X_train,
random_state=random_state,
)
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples, [n_output_dims])
Target values
Returns
-------
self
Returns an instance of self.
"""
if isinstance(self.noise, str) and self.noise != "gaussian":
raise ValueError("expected noise to be 'gaussian', got %s" % self.noise)
if self.kernel is None:
self.kernel = ConstantKernel(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
if self.noise == "gaussian":
self.kernel = self.kernel + WhiteKernel()
elif self.noise:
self.kernel = self.kernel + WhiteKernel(
noise_level=self.noise, noise_level_bounds="fixed"
)
super(GaussianProcessRegressor, self).fit(X, y)
self.noise_ = None
if self.noise:
# The noise component of this kernel should be set to zero
# while estimating K(X_test, X_test)
# Note that the term K(X, X) should include the noise but
# this (K(X, X))^-1y is precomputed as the attribute `alpha_`.
# (Notice the underscore).
# This has been described in Eq 2.24 of
# http://www.gaussianprocess.org/gpml/chapters/RW2.pdf
# Hence this hack
if isinstance(self.kernel_, WhiteKernel):
self.kernel_.set_params(noise_level=0.0)
else:
white_present, white_param = _param_for_white_kernel_in_Sum(
self.kernel_
)
# This should always be true. Just in case.
if white_present:
noise_kernel = self.kernel_.get_params()[white_param]
self.noise_ = noise_kernel.noise_level
self.kernel_.set_params(
**{white_param: WhiteKernel(noise_level=0.0)}
)
# Precompute arrays needed at prediction
L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0]))
self.K_inv_ = L_inv.dot(L_inv.T)
# Fix deprecation warning #462
sklearn_version = version.parse(sklearn.__version__)
if sklearn_version >= version.parse("0.23.0"):
self.y_train_std_ = self._y_train_std
self.y_train_mean_ = self._y_train_mean
elif sklearn_version >= version.parse("0.19.0"):
self.y_train_mean_ = self._y_train_mean
self.y_train_std_ = 1
else:
self.y_train_mean_ = self.y_train_mean
self.y_train_std_ = 1
return self
def predict(
self,
X,
return_std=False,
return_cov=False,
return_mean_grad=False,
return_std_grad=False,
):
"""
Predict output for X.
In addition to the mean of the predictive distribution, also its
standard deviation (return_std=True) or covariance (return_cov=True),
the gradient of the mean and the standard-deviation with respect to X
can be optionally provided.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Query points where the GP is evaluated.
return_std : bool, default: False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default: False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
return_mean_grad : bool, default: False
Whether or not to return the gradient of the mean.
Only valid when X is a single point.
return_std_grad : bool, default: False
Whether or not to return the gradient of the std.
Only valid when X is a single point.
Returns
-------
y_mean : array, shape = (n_samples, [n_output_dims])
Mean of predictive distribution a query points
y_std : array, shape = (n_samples,), optional
Standard deviation of predictive distribution at query points.
Only returned when return_std is True.
y_cov : array, shape = (n_samples, n_samples), optional
Covariance of joint predictive distribution a query points.
Only returned when return_cov is True.
y_mean_grad : shape = (n_samples, n_features)
The gradient of the predicted mean
y_std_grad : shape = (n_samples, n_features)
The gradient of the predicted std.
"""
if return_std and return_cov:
raise RuntimeError(
"Not returning standard deviation of predictions when "
"returning full covariance."
)
if return_std_grad and not return_std:
raise ValueError("Not returning std_gradient without returning " "the std.")
X = check_array(X)
if X.shape[0] != 1 and (return_mean_grad or return_std_grad):
raise ValueError("Not implemented for n_samples > 1")
if not hasattr(self, "X_train_"): # Not fit; predict based on GP prior
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = self.kernel(X)
return y_mean, y_cov
elif return_std:
y_var = self.kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star)
# undo normalisation
y_mean = self.y_train_std_ * y_mean + self.y_train_mean_
if return_cov:
v = cho_solve((self.L_, True), K_trans.T) # Line 5
y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6
# undo normalisation
y_cov = y_cov * self.y_train_std_**2
return y_mean, y_cov
elif return_std:
K_inv = self.K_inv_
# Compute variance of predictive distribution
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ki,kj,ij->k", K_trans, K_trans, K_inv)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn(
"Predicted variances smaller than 0. "
"Setting those variances to 0."
)
y_var[y_var_negative] = 0.0
# undo normalisation
y_var = y_var * self.y_train_std_**2
y_std = np.sqrt(y_var)
if return_mean_grad:
grad = self.kernel_.gradient_x(X[0], self.X_train_)
grad_mean = np.dot(grad.T, self.alpha_)
# undo normalisation
grad_mean = grad_mean * self.y_train_std_
if return_std_grad:
grad_std = np.zeros(X.shape[1])
if not np.allclose(y_std, grad_std):
grad_std = -np.dot(K_trans, np.dot(K_inv, grad))[0] / y_std
# undo normalisation
grad_std = grad_std * self.y_train_std_**2
return y_mean, y_std, grad_mean, grad_std
if return_std:
return y_mean, y_std, grad_mean
else:
return y_mean, grad_mean
else:
if return_std:
return y_mean, y_std
else:
return y_mean
| 15,391 | 38.568123 | 88 | py |
deephyper | deephyper-master/deephyper/skopt/learning/gaussian_process/__init__.py | from .gpr import GaussianProcessRegressor # noqa: F401
__all__ = "GaussianProcessRegressor"
| 94 | 22.75 | 55 | py |
deephyper | deephyper-master/deephyper/skopt/learning/gaussian_process/tests/test_gpr.py | import numpy as np
import pytest
from scipy import optimize
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from deephyper.skopt.learning import GaussianProcessRegressor
from deephyper.skopt.learning.gaussian_process.kernels import RBF
from deephyper.skopt.learning.gaussian_process.kernels import Matern
from deephyper.skopt.learning.gaussian_process.kernels import WhiteKernel
from deephyper.skopt.learning.gaussian_process.gpr import _param_for_white_kernel_in_Sum
rng = np.random.RandomState(0)
X = rng.randn(5, 5)
y = rng.randn(5)
rbf = RBF()
wk = WhiteKernel()
mat = Matern()
kernel1 = rbf
kernel2 = mat + rbf
kernel3 = mat * rbf
kernel4 = wk * rbf
kernel5 = mat + rbf * wk
def predict_wrapper(X, gpr):
"""Predict that can handle 1-D input"""
X = np.expand_dims(X, axis=0)
return gpr.predict(X, return_std=True)
@pytest.mark.hps
@pytest.mark.parametrize("kernel", [kernel1, kernel2, kernel3, kernel4])
def test_param_for_white_kernel_in_Sum(kernel):
kernel_with_noise = kernel + wk
wk_present, wk_param = _param_for_white_kernel_in_Sum(kernel + wk)
assert wk_present
kernel_with_noise.set_params(**{wk_param: WhiteKernel(noise_level=0.0)})
assert_array_equal(kernel_with_noise(X), kernel(X))
assert not _param_for_white_kernel_in_Sum(kernel5)[0]
@pytest.mark.hps
def test_noise_equals_gaussian():
gpr1 = GaussianProcessRegressor(rbf + wk).fit(X, y)
# gpr2 sets the noise component to zero at predict time.
gpr2 = GaussianProcessRegressor(rbf, noise="gaussian").fit(X, y)
assert not gpr1.noise_
assert gpr2.noise_
assert_almost_equal(gpr1.kernel_.k2.noise_level, gpr2.noise_, 4)
mean1, std1 = gpr1.predict(X, return_std=True)
mean2, std2 = gpr2.predict(X, return_std=True)
assert_array_almost_equal(mean1, mean2, 4)
assert not np.any(std1 == std2)
@pytest.mark.hps
def test_mean_gradient():
length_scale = np.arange(1, 6)
X = rng.randn(10, 5)
y = rng.randn(10)
X_new = rng.randn(5)
rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)
mean, std, mean_grad = gpr.predict(
np.expand_dims(X_new, axis=0),
return_std=True,
return_cov=False,
return_mean_grad=True,
)
num_grad = optimize.approx_fprime(X_new, lambda x: predict_wrapper(x, gpr)[0], 1e-4)
assert_array_almost_equal(mean_grad, num_grad, decimal=3)
@pytest.mark.hps
def test_std_gradient():
length_scale = np.arange(1, 6)
X = rng.randn(10, 5)
y = rng.randn(10)
X_new = rng.randn(5)
rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)
_, _, _, std_grad = gpr.predict(
np.expand_dims(X_new, axis=0),
return_std=True,
return_cov=False,
return_mean_grad=True,
return_std_grad=True,
)
num_grad = optimize.approx_fprime(X_new, lambda x: predict_wrapper(x, gpr)[1], 1e-4)
assert_array_almost_equal(std_grad, num_grad, decimal=3)
def test_gpr_handles_similar_points():
"""
This tests whether our implementation of GPR
does not crash when the covariance matrix whose
inverse is calculated during fitting of the
regressor is singular.
Singular covariance matrix often indicates
that same or very close points are explored
during the optimization procedure.
Essentially checks that the default value of `alpha` is non zero.
"""
X = np.random.rand(8, 3)
y = np.random.rand(8)
X[:3, :] = 0.0
y[:3] = 1.0
model = GaussianProcessRegressor()
# this fails if singular matrix is not handled
model.fit(X, y)
| 3,818 | 29.552 | 88 | py |
deephyper | deephyper-master/deephyper/skopt/learning/gaussian_process/tests/test_kernels.py | import numpy as np
from scipy import optimize
from scipy.spatial.distance import pdist, squareform
try:
from sklearn.preprocessing import OrdinalEncoder
UseOrdinalEncoder = True
except ImportError:
UseOrdinalEncoder = False
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
import pytest
from deephyper.skopt.learning.gaussian_process import GaussianProcessRegressor
from deephyper.skopt.learning.gaussian_process.kernels import ConstantKernel
from deephyper.skopt.learning.gaussian_process.kernels import DotProduct
from deephyper.skopt.learning.gaussian_process.kernels import ExpSineSquared
from deephyper.skopt.learning.gaussian_process.kernels import HammingKernel
from deephyper.skopt.learning.gaussian_process.kernels import Matern
from deephyper.skopt.learning.gaussian_process.kernels import RationalQuadratic
from deephyper.skopt.learning.gaussian_process.kernels import RBF
from deephyper.skopt.learning.gaussian_process.kernels import WhiteKernel
KERNELS = []
for length_scale in [np.arange(1, 6), [0.2, 0.3, 0.5, 0.6, 0.1]]:
KERNELS.extend(
[
RBF(length_scale=length_scale),
Matern(length_scale=length_scale, nu=0.5),
Matern(length_scale=length_scale, nu=1.5),
Matern(length_scale=length_scale, nu=2.5),
RationalQuadratic(alpha=2.0, length_scale=2.0),
ExpSineSquared(length_scale=2.0, periodicity=3.0),
ConstantKernel(constant_value=1.0),
WhiteKernel(noise_level=2.0),
Matern(length_scale=length_scale, nu=2.5) ** 3.0,
RBF(length_scale=length_scale) + Matern(length_scale=length_scale, nu=1.5),
RBF(length_scale=length_scale) * Matern(length_scale=length_scale, nu=1.5),
DotProduct(sigma_0=2.0),
]
)
# Copied (shamelessly) from sklearn.gaussian_process.kernels
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
def kernel_X_Y(x, y, kernel):
X = np.expand_dims(x, axis=0)
Y = np.expand_dims(y, axis=0)
return kernel(X, Y)[0][0]
def numerical_gradient(X, Y, kernel, step_size=1e-4):
grad = []
for y in Y:
num_grad = optimize.approx_fprime(X, kernel_X_Y, step_size, y, kernel)
grad.append(num_grad)
return np.asarray(grad)
def check_gradient_correctness(kernel, X, Y, step_size=1e-4):
X_grad = kernel.gradient_x(X, Y)
num_grad = numerical_gradient(X, Y, kernel, step_size)
assert_array_almost_equal(X_grad, num_grad, decimal=3)
@pytest.mark.hps
@pytest.mark.parametrize("kernel", KERNELS)
def test_gradient_correctness(kernel):
rng = np.random.RandomState(0)
X = rng.randn(5)
Y = rng.randn(10, 5)
check_gradient_correctness(kernel, X, Y)
@pytest.mark.hps
@pytest.mark.parametrize("random_state", [0, 1])
@pytest.mark.parametrize("kernel", KERNELS)
def test_gradient_finiteness(random_state, kernel):
"""
When x is the same as X_train, gradients might become undefined because
they are divided by d(x, X_train).
Check they are equal to numerical gradients at such points.
"""
rng = np.random.RandomState(random_state)
X = rng.randn(5).tolist()
Y = [X]
check_gradient_correctness(kernel, X, Y, 1e-6)
@pytest.mark.hps
def test_distance_string():
# Inspired by test_hamming_string_array in scipy.tests.test_distance
a = np.array(
[
"eggs",
"spam",
"spam",
"eggs",
"spam",
"spam",
"spam",
"spam",
"spam",
"spam",
"spam",
"eggs",
"eggs",
"spam",
"eggs",
"eggs",
"eggs",
"eggs",
"eggs",
"spam",
],
dtype="|S4",
)
b = np.array(
[
"eggs",
"spam",
"spam",
"eggs",
"eggs",
"spam",
"spam",
"spam",
"spam",
"eggs",
"spam",
"eggs",
"spam",
"eggs",
"spam",
"spam",
"eggs",
"spam",
"spam",
"eggs",
],
dtype="|S4",
)
true_values = np.array([[0, 0.45], [0.45, 0]])
X = np.vstack((a, b))
hm = HammingKernel()
assert_array_almost_equal(-np.log(hm(X)) / 20.0, true_values)
@pytest.mark.hps
def test_isotropic_kernel():
rng = np.random.RandomState(0)
X = rng.randint(0, 4, (5, 3))
hm = HammingKernel()
# Scipy calulates the mean. We need exp(-sum)
hamming_distance = squareform(pdist(X, metric="hamming"))
scipy_dist = np.exp(-hamming_distance * X.shape[1])
assert_array_almost_equal(scipy_dist, hm(X))
@pytest.mark.hps
def test_anisotropic_kernel():
rng = np.random.RandomState(0)
X = rng.randint(0, 4, (5, 3))
hm = HammingKernel()
X_kernel = hm(X)
hm_aniso = HammingKernel(length_scale=[1.0, 1.0, 1.0])
X_kernel_aniso = hm_aniso(X)
assert_array_almost_equal(X_kernel, X_kernel_aniso)
hm = HammingKernel(length_scale=2.0)
X_kernel = hm(X)
hm_aniso = HammingKernel(length_scale=[2.0, 2.0, 2.0])
X_kernel_aniso = hm_aniso(X)
assert_array_almost_equal(X_kernel, X_kernel_aniso)
@pytest.mark.hps
def test_kernel_gradient():
rng = np.random.RandomState(0)
hm = HammingKernel(length_scale=2.0)
X = rng.randint(0, 4, (5, 3))
K, K_gradient = hm(X, eval_gradient=True)
assert_array_equal(K_gradient.shape, (5, 5, 1))
def eval_kernel_for_theta(theta, kernel):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = _approx_fprime(hm.theta, eval_kernel_for_theta, 1e-10, (hm,))
assert_array_almost_equal(K_gradient_approx, K_gradient, 4)
hm = HammingKernel(length_scale=[1.0, 1.0, 1.0])
K_gradient_approx = _approx_fprime(hm.theta, eval_kernel_for_theta, 1e-10, (hm,))
K, K_gradient = hm(X, eval_gradient=True)
assert_array_equal(K_gradient.shape, (5, 5, 3))
assert_array_almost_equal(K_gradient_approx, K_gradient, 4)
X = rng.randint(0, 4, (3, 2))
hm = HammingKernel(length_scale=[0.1, 2.0])
K_gradient_approx = _approx_fprime(hm.theta, eval_kernel_for_theta, 1e-10, (hm,))
K, K_gradient = hm(X, eval_gradient=True)
assert_array_equal(K_gradient.shape, (3, 3, 2))
assert_array_almost_equal(K_gradient_approx, K_gradient, 4)
@pytest.mark.hps
def test_Y_is_not_None():
rng = np.random.RandomState(0)
hm = HammingKernel()
X = rng.randint(0, 4, (5, 3))
hm = HammingKernel(length_scale=[1.0, 1.0, 1.0])
assert_array_equal(hm(X), hm(X, X))
@pytest.mark.hps
def test_gp_regressor():
rng = np.random.RandomState(0)
X = np.asarray(
[["ham", "spam", "ted"], ["ham", "ted", "ted"], ["ham", "spam", "spam"]]
)
y = rng.randn(3)
hm = HammingKernel(length_scale=[1.0, 1.0, 1.0])
if UseOrdinalEncoder:
enc = OrdinalEncoder()
enc.fit(X)
gpr = GaussianProcessRegressor(hm)
if UseOrdinalEncoder:
gpr.fit(enc.transform(X), y)
assert_array_almost_equal(gpr.predict(enc.transform(X)), y)
assert_array_almost_equal(gpr.predict(enc.transform(X[:2])), y[:2])
else:
gpr.fit(X, y)
assert_array_almost_equal(gpr.predict(X), y)
assert_array_almost_equal(gpr.predict(X[:2]), y[:2])
| 7,824 | 29.928854 | 87 | py |
deephyper | deephyper-master/deephyper/skopt/learning/gaussian_process/tests/__init__.py | 0 | 0 | 0 | py | |
deephyper | deephyper-master/deephyper/skopt/learning/tests/test_gbrt.py | import numpy as np
import pytest
from scipy import stats
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from numpy.testing import assert_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from deephyper.skopt.learning import GradientBoostingQuantileRegressor
def truth(X):
return 0.5 * np.sin(1.75 * X[:, 0])
@pytest.mark.hps
def test_gbrt_gaussian():
# estimate quantiles of the normal distribution
rng = np.random.RandomState(1)
N = 10000
X = np.ones((N, 1))
y = rng.normal(size=N)
rgr = GradientBoostingQuantileRegressor()
rgr.fit(X, y)
estimates = rgr.predict(X, return_quantiles=True)
assert_almost_equal(
stats.norm.ppf(rgr.quantiles), np.mean(estimates, axis=0), decimal=2
)
@pytest.mark.hps
def test_gbrt_base_estimator():
rng = np.random.RandomState(1)
N = 10000
X = np.ones((N, 1))
y = rng.normal(size=N)
base = RandomForestRegressor()
rgr = GradientBoostingQuantileRegressor(base_estimator=base)
with pytest.raises(ValueError):
# 'type GradientBoostingRegressor',
rgr.fit(X, y)
base = GradientBoostingRegressor()
rgr = GradientBoostingQuantileRegressor(base_estimator=base)
with pytest.raises(ValueError):
# 'quantile loss'
rgr.fit(X, y)
base = GradientBoostingRegressor(loss="quantile", n_estimators=20)
rgr = GradientBoostingQuantileRegressor(base_estimator=base)
rgr.fit(X, y)
estimates = rgr.predict(X, return_quantiles=True)
assert_almost_equal(
stats.norm.ppf(rgr.quantiles), np.mean(estimates, axis=0), decimal=2
)
@pytest.mark.hps
def test_gbrt_with_std():
# simple test of the interface
rng = np.random.RandomState(1)
X = rng.uniform(0, 5, 500)[:, np.newaxis]
noise_level = 0.5
y = truth(X) + rng.normal(0, noise_level, len(X))
X_ = np.linspace(0, 5, 1000)[:, np.newaxis]
model = GradientBoostingQuantileRegressor()
model.fit(X, y)
# three quantiles, so three numbers per sample
assert_array_equal(model.predict(X_, return_quantiles=True).shape, (len(X_), 3))
# "traditional" API which returns one number per sample, in this case
# just the median/mean
assert_array_equal(model.predict(X_).shape, (len(X_)))
l, c, h = model.predict(X_, return_quantiles=True).T
assert_equal(l.shape, c.shape)
assert_equal(c.shape, h.shape)
assert_equal(l.shape[0], X_.shape[0])
mean, std = model.predict(X_, return_std=True)
assert_array_equal(mean, c)
assert_array_equal(std, (h - l) / 2.0)
@pytest.mark.hps
def test_gbrt_in_parallel():
# check estimate quantiles with parallel
rng = np.random.RandomState(1)
N = 10000
X = np.ones((N, 1))
y = rng.normal(size=N)
rgr = GradientBoostingQuantileRegressor(
n_jobs=1, random_state=np.random.RandomState(1)
)
rgr.fit(X, y)
estimates = rgr.predict(X)
rgr.set_params(n_jobs=2, random_state=np.random.RandomState(1))
rgr.fit(X, y)
estimates_parallel = rgr.predict(X)
assert_array_equal(estimates, estimates_parallel)
| 3,191 | 27 | 84 | py |
deephyper | deephyper-master/deephyper/skopt/learning/tests/test_forest.py | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from deephyper.skopt.learning import ExtraTreesRegressor, RandomForestRegressor
def truth(X):
return 0.5 * np.sin(1.75 * X[:, 0])
@pytest.mark.hps
def test_random_forest():
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
clf = RandomForestRegressor(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
clf = RandomForestRegressor(
n_estimators=10, min_impurity_decrease=0.1, random_state=1
)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
clf = RandomForestRegressor(
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=1,
verbose=0,
warm_start=False,
)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
clf = RandomForestRegressor(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
# also test apply
leaf_indices = clf.apply(X)
assert leaf_indices.shape == (len(X), clf.n_estimators)
@pytest.mark.hps
def test_extra_forest():
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
clf = ExtraTreesRegressor(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
clf = ExtraTreesRegressor(
n_estimators=10, min_impurity_decrease=0.1, random_state=1
)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
clf = ExtraTreesRegressor(
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=1,
verbose=0,
warm_start=False,
)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
clf = ExtraTreesRegressor(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
# also test apply
leaf_indices = clf.apply(X)
assert leaf_indices.shape == (len(X), clf.n_estimators)
| 3,034 | 24.940171 | 80 | py |
deephyper | deephyper-master/deephyper/skopt/learning/tests/__init__.py | 0 | 0 | 0 | py | |
deephyper | deephyper-master/deephyper/skopt/moo/_pf.py | import numpy as np
def is_pareto_efficient(new_obj, objvals):
"""Check if the new objective vector is pareto efficient with respect to previously computed values.
Args:
new_obj (array or list): Array or list of size (n_objectives, )
objvals (array or list): Array or list of size (n_points, n_objectives)
Returns:
bool: True if the vector is pareto efficient and false otherwise.
"""
return np.all(np.any(np.asarray(new_obj) < objvals, axis=1))
def pareto_front(y):
"""Extract the pareto front (actual objective values of the non-dominated set).
Args:
y (array or list): Array or list of size (n_points, n_objectives)
Returns:
array: Subarray of y representing the pareto front.
"""
nds = non_dominated_set(y, return_mask=False)
return y[nds]
def non_dominated_set_ranked(y, fraction, return_mask=True):
"""Find the set of top-``fraction x 100%`` of non-dominated points. The number of points returned is ``min(n_points, ceil(fraction * n_points))`` where ``n_points`` is the number of points in the input array. Function assumes minimization.
Args:
y (array or list): Array or list of size (n_points, n_objectives)
fraction (float or int): Fraction of points to return.
return_mask (bool, optional): Whether to return a mask or the actual indices of the non-dominated set. Defaults to True.
Raises:
ValueError: Raised if ``fraction`` is not a non-negative number or if y is not an array of size (n_points, n_objectives).
Returns:
array: If return_mask is True, this will be an (n_points, ) boolean array. Else it will be a 1-d integer array of indices indicating which points are in the top non-dominated set.
"""
if not isinstance(fraction, (float, int)) or fraction < 0:
raise ValueError("Expected 'fraction' to be a non-negative scalar")
if np.ndim(y) == 0:
return np.asarray([fraction > 0.0])
n_points = np.shape(y)[0]
req_number = min(np.ceil(fraction * n_points).astype(int), n_points)
if req_number <= 0:
return np.zeros(n_points, dtype=bool)
if req_number >= n_points:
return np.ones(n_points, dtype=bool)
chosen_indices = []
map_indices = np.arange(n_points)
while len(chosen_indices) < req_number:
nds = non_dominated_set(y, return_mask=True)
chosen_indices.extend(map_indices[nds])
if len(chosen_indices) > req_number:
del chosen_indices[req_number:]
break
y = y[~nds]
map_indices = map_indices[~nds]
if return_mask:
chosen = np.zeros(n_points, dtype=bool)
chosen[chosen_indices] = True
return chosen
return chosen_indices
def non_dominated_set(y, return_mask=True):
"""Find the set of non-dominated points. If there are multiple duplicate non-dominated points, then only one will be included. The function assumes minimization and is adapted from https://stackoverflow.com/a/40239615 by adding a sorting step to improve efficiency.
Args:
y (array or list): Array or list of size (n_points, n_objectives)
return_mask (bool, optional): Whether to return a mask or the actual indices of the non-dominated set. Defaults to True.
Returns:
array: If return_mask is True, this will be an (n_points, ) boolean array. Else it will be a 1-d integer array of indices indicating which points are non-dominated.
"""
if np.ndim(y) == 1:
y = np.asarray_chkfinite(y)[:, np.newaxis]
elif np.ndim(y) == 2:
y = np.asarray_chkfinite(y)
else:
raise ValueError("Expected y to be an array of size (n_points, n_objectives)")
order = np.argsort(y.sum(axis=1))
costs = y[order]
n_points = y.shape[0]
is_efficient = np.arange(n_points)
idx = 0
while idx < len(costs):
nondominated_point_mask = np.any(costs < costs[idx], axis=1)
nondominated_point_mask[idx] = True
is_efficient = is_efficient[nondominated_point_mask] # Remove dominated points
costs = costs[nondominated_point_mask]
idx = np.sum(nondominated_point_mask[:idx], dtype=int) + 1
if return_mask:
is_efficient_mask = np.zeros(n_points, dtype=bool)
is_efficient_mask[is_efficient] = True
is_efficient_mask[order] = is_efficient_mask.copy()
return is_efficient_mask
return order[is_efficient]
def non_dominated_set_dumb(y):
"""For debugging purposes."""
is_efficient = np.ones(y.shape[0], dtype=bool)
for i, c in enumerate(y):
is_efficient[i] = np.all(np.any(y[:i] > c, axis=1)) and np.all(
np.any(y[i + 1 :] > c, axis=1)
)
return is_efficient
if __name__ == "__main__":
rng = np.random.RandomState(42)
npoints = 1000
nobj = 2
for it in range(100):
y = rng.rand(npoints, nobj)
pf = non_dominated_set(y, return_mask=True)
assert np.array_equal(non_dominated_set_dumb(y), pf)
assert np.array_equal(
pf.nonzero()[0], np.sort(non_dominated_set(y, return_mask=False))
)
| 5,137 | 37.924242 | 269 | py |
deephyper | deephyper-master/deephyper/skopt/moo/_multiobjective.py | import abc
import numpy as np
from deephyper.skopt.utils import is_listlike
class MoScalarFunction(abc.ABC):
"""Abstract class representing a scalarizing function.
Args:
n_objectives (int, optional): Number of objective functions. Defaults to 1.
weight (float or 1-D array, optional): Array of weights for each objective function. Defaults to None.
utopia_point (float or 1-D array, optional): Array of reference values for each objective function. Defaults to None.
random_state (int, optional): Random seed. Defaults to None.
"""
def __init__(
self,
n_objectives: int = 1,
weight=None,
utopia_point=None,
random_state=None,
):
self._seed = None
if type(random_state) is int:
self._seed = random_state
self._rng = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.RandomState):
self._rng = random_state
else:
self._rng = np.random.RandomState()
if not (type(n_objectives) is int):
raise ValueError("Parameter 'n_objectives' shoud be an integer value!")
self._n_objectives = n_objectives
self._utopia_point = None
if utopia_point is not None:
self._check_shape(utopia_point)
self._utopia_point = np.asarray(utopia_point)
if weight is not None:
self._check_shape(weight)
self._weight = np.asarray(weight)
else:
self._weight = self._rng.rand(self._n_objectives)
self._weight /= np.sum(self._weight)
self._scaling = np.ones(self._n_objectives)
def _check_shape(self, y):
"""Check if the shape of y is consistent with the object."""
if not (
(np.ndim(y) == 0 and self._n_objectives == 1)
or (np.ndim(y) == 1 and np.shape(y)[0] == self._n_objectives)
):
raise ValueError(
f"expected y to be a scalar or 1-D array of length {self._n_objectives}"
)
def scalarize(self, y):
"""Convert the input array (or scalar) into a scalar value.
Args:
yi (scalar or 1-D array): The input array or scalar to be scalarized.
Returns:
float: The converted scalar value.
"""
self._check_shape(y)
if np.ndim(y) == 0:
return y
return self._scalarize(y)
def normalize(self, yi):
"""Compute normalization constants based on the history of evaluated objective values.
Args:
yi (array): Array of evaluated objective values.
Raises:
ValueError: Raised if yi is not a list of scalars each of length _n_objectives.
"""
if not is_listlike(yi):
raise ValueError("expected yi to be a list")
for y in yi:
self._check_shape(y)
y_max = np.max(yi, axis=0)
y_min = np.min(yi, axis=0)
self._utopia_point = y_min
self._scaling = 1.0 / np.maximum(y_max - y_min, 1e-6)
@abc.abstractmethod
def _scalarize(self, y):
"""Scalarization to be implemented.
Args:
y: Array of length _n_objectives.
Returns:
float: Converted scalar value.
"""
class MoLinearFunction(MoScalarFunction):
"""This scalarizing function linearly combines the individual objective values (after automatically scaling them in [0, 1]).
Args:
n_objectives (int, optional): Number of objective functions. Defaults to 1.
weight (float or 1-D array, optional): Array of weights for each objective function. Defaults to None.
utopia_point (float or 1-D array, optional): Array of reference values for each objective function. Defaults to None.
random_state (int, optional): Random seed. Defaults to None.
"""
def __init__(
self,
n_objectives: int = 1,
weight=None,
utopia_point=None,
random_state=None,
):
super().__init__(n_objectives, weight, utopia_point, random_state)
def _scalarize(self, y):
return np.dot(self._weight, np.asarray(y))
class MoChebyshevFunction(MoScalarFunction):
"""This scalarizing function computes a weighted infinity-norm of the individual objective values (after automatically scaling them in [0, 1]).
Args:
n_objectives (int, optional): Number of objective functions. Defaults to 1.
weight (float or 1-D array, optional): Array of weights for each objective function. Defaults to None.
utopia_point (float or 1-D array, optional): Array of reference values for each objective function. Defaults to None.
random_state (int, optional): Random seed. Defaults to None.
"""
def __init__(
self,
n_objectives: int = 1,
weight=None,
utopia_point=None,
random_state=None,
):
super().__init__(n_objectives, weight, utopia_point, random_state)
def _scalarize(self, y):
y = np.multiply(self._scaling, np.asarray(y) - self._utopia_point)
return np.max(np.multiply(self._weight, np.abs(y)))
class MoPBIFunction(MoScalarFunction):
"""This scalarizing function computes the projection of the objective vector along a reference vector and adds a penalty term to minimize deviations from the projected point to the attainable objective set. See https://doi.org/10.1109/TEVC.2007.892759
Args:
n_objectives (int, optional): Number of objective functions. Defaults to 1.
weight (float or 1-D array, optional): Array of weights for each objective function. Defaults to None.
utopia_point (float or 1-D array, optional): Array of reference values for each objective function. Defaults to None.
random_state (int, optional): Random seed. Defaults to None.
penalty (float, optional): Value of penalty parameter. Defaults to 100.0.
"""
def __init__(
self,
n_objectives: int = 1,
weight=None,
utopia_point=None,
random_state=None,
penalty: float = 100.0,
):
super().__init__(n_objectives, weight, utopia_point, random_state)
self._weightnorm = np.linalg.norm(self._weight) ** 2
self._penalty = np.abs(penalty) if np.isreal(penalty) else 100.0
def _scalarize(self, y):
y = np.multiply(self._scaling, np.asarray(y) - self._utopia_point)
d1 = np.dot(self._weight, y) / self._weightnorm
d2 = np.linalg.norm(y - (d1 * self._weight), 1)
return d1 + (self._penalty * d2)
class MoAugmentedChebyshevFunction(MoScalarFunction):
"""This scalarizing function computes a sum of weighted infinity- and 1-norms of the individual objective values (after automatically scaling them in [0, 1]).
Args:
n_objectives (int, optional): Number of objective functions. Defaults to 1.
weight (float or 1-D array, optional): Array of weights for each objective function. Defaults to None.
utopia_point (float or 1-D array, optional): Array of reference values for each objective function. Defaults to None.
random_state (int, optional): Random seed. Defaults to None.
penalty (float, optional): Value of weight given to 1-norm. Defaults to 0.001.
"""
def __init__(
self,
n_objectives: int = 1,
weight=None,
utopia_point=None,
random_state=None,
alpha: float = 0.001,
):
super().__init__(n_objectives, weight, utopia_point, random_state)
self._alpha = np.abs(alpha) if np.isreal(alpha) else 0.001
def _scalarize(self, y):
y = np.multiply(self._scaling, np.asarray(y) - self._utopia_point)
y = np.multiply(self._weight, np.abs(y))
return np.max(y) + (self._alpha * np.linalg.norm(y, 1))
class MoQuadraticFunction(MoScalarFunction):
"""This scalarizing function quadratically combines the individual objective values (after automatically scaling them in [0, 1]). It can be interpreted a smoother version of `MoChebyshevFunction`.
Args:
n_objectives (int, optional): Number of objective functions. Defaults to 1.
weight (float or 1-D array, optional): Array of weights for each objective function. Defaults to None.
utopia_point (float or 1-D array, optional): Array of reference values for each objective function. Defaults to None.
random_state (int, optional): Random seed. Defaults to None.
penalty (float, optional): Value of smoothness parameter. Larger values make it less smooth. Defaults to 10.0.
"""
def __init__(
self,
n_objectives: int = 1,
weight=None,
utopia_point=None,
random_state=None,
alpha: float = 10.0,
):
super().__init__(n_objectives, weight, utopia_point, random_state)
U, _, _ = np.linalg.svd(self._weight.reshape(-1, 1), full_matrices=True)
self._alpha = np.abs(alpha) if np.isreal(alpha) else 10.0
self._Q = U.dot(
np.diag([self._alpha if j > 0 else 1.0 for j in range(self._n_objectives)])
).dot(U.T)
def _scalarize(self, y):
y = np.multiply(self._scaling, np.asarray(y) - self._utopia_point)
return y.T.dot(self._Q).dot(y)
| 9,331 | 38.210084 | 255 | py |
deephyper | deephyper-master/deephyper/skopt/moo/_hv.py | # Copyright (C) 2010 Simon Wessing
# TU Dortmund University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Simon Wessing"
from ._pf import non_dominated_set
def hypervolume(pointset, ref):
"""Compute the absolute hypervolume of a *pointset* according to the
reference point *ref*.
"""
nds = non_dominated_set(pointset, return_mask=True)
hv = _HyperVolume(ref)
return hv.compute(pointset[nds])
class _HyperVolume:
"""
Hypervolume computation based on variant 3 of the algorithm in the paper:
C. M. Fonseca, L. Paquete, and M. Lopez-Ibanez. An improved dimension-sweep
algorithm for the hypervolume indicator. In IEEE Congress on Evolutionary
Computation, pages 1157-1163, Vancouver, Canada, July 2006.
Minimization is implicitly assumed here!
"""
def __init__(self, referencePoint):
"""Constructor."""
self.referencePoint = referencePoint
self.list = []
def compute(self, front):
"""Returns the hypervolume that is dominated by a non-dominated front.
Before the HV computation, front and reference point are translated, so
that the reference point is [0, ..., 0].
"""
def weaklyDominates(point, other):
for i in range(len(point)):
if point[i] > other[i]:
return False
return True
relevantPoints = []
referencePoint = self.referencePoint
dimensions = len(referencePoint)
#######
# fmder: Here it is assumed that every point dominates the reference point
# for point in front:
# # only consider points that dominate the reference point
# if weaklyDominates(point, referencePoint):
# relevantPoints.append(point)
relevantPoints = front
# fmder
#######
if any(referencePoint):
# shift points so that referencePoint == [0, ..., 0]
# this way the reference point doesn't have to be explicitly used
# in the HV computation
#######
# fmder: Assume relevantPoints are numpy array
# for j in range(len(relevantPoints)):
# relevantPoints[j] = [relevantPoints[j][i] - referencePoint[i] for i in range(dimensions)]
relevantPoints -= referencePoint
# fmder
#######
self.preProcess(relevantPoints)
bounds = [-1.0e308] * dimensions
hyperVolume = self.hvRecursive(dimensions - 1, len(relevantPoints), bounds)
return hyperVolume
def hvRecursive(self, dimIndex, length, bounds):
"""Recursive call to hypervolume calculation.
In contrast to the paper, the code assumes that the reference point
is [0, ..., 0]. This allows the avoidance of a few operations.
"""
hvol = 0.0
sentinel = self.list.sentinel
if length == 0:
return hvol
elif dimIndex == 0:
# special case: only one dimension
# why using hypervolume at all?
return -sentinel.next[0].cargo[0]
elif dimIndex == 1:
# special case: two dimensions, end recursion
q = sentinel.next[1]
h = q.cargo[0]
p = q.next[1]
while p is not sentinel:
pCargo = p.cargo
hvol += h * (q.cargo[1] - pCargo[1])
if pCargo[0] < h:
h = pCargo[0]
q = p
p = q.next[1]
hvol += h * q.cargo[1]
return hvol
else:
remove = self.list.remove
reinsert = self.list.reinsert
hvRecursive = self.hvRecursive
p = sentinel
q = p.prev[dimIndex]
while q.cargo is not None:
if q.ignore < dimIndex:
q.ignore = 0
q = q.prev[dimIndex]
q = p.prev[dimIndex]
while length > 1 and (
q.cargo[dimIndex] > bounds[dimIndex]
or q.prev[dimIndex].cargo[dimIndex] >= bounds[dimIndex]
):
p = q
remove(p, dimIndex, bounds)
q = p.prev[dimIndex]
length -= 1
qArea = q.area
qCargo = q.cargo
qPrevDimIndex = q.prev[dimIndex]
if length > 1:
hvol = qPrevDimIndex.volume[dimIndex] + qPrevDimIndex.area[dimIndex] * (
qCargo[dimIndex] - qPrevDimIndex.cargo[dimIndex]
)
else:
qArea[0] = 1
qArea[1 : dimIndex + 1] = [
qArea[i] * -qCargo[i] for i in range(dimIndex)
]
q.volume[dimIndex] = hvol
if q.ignore >= dimIndex:
qArea[dimIndex] = qPrevDimIndex.area[dimIndex]
else:
qArea[dimIndex] = hvRecursive(dimIndex - 1, length, bounds)
if qArea[dimIndex] <= qPrevDimIndex.area[dimIndex]:
q.ignore = dimIndex
while p is not sentinel:
pCargoDimIndex = p.cargo[dimIndex]
hvol += q.area[dimIndex] * (pCargoDimIndex - q.cargo[dimIndex])
bounds[dimIndex] = pCargoDimIndex
reinsert(p, dimIndex, bounds)
length += 1
q = p
p = p.next[dimIndex]
q.volume[dimIndex] = hvol
if q.ignore >= dimIndex:
q.area[dimIndex] = q.prev[dimIndex].area[dimIndex]
else:
q.area[dimIndex] = hvRecursive(dimIndex - 1, length, bounds)
if q.area[dimIndex] <= q.prev[dimIndex].area[dimIndex]:
q.ignore = dimIndex
hvol -= q.area[dimIndex] * q.cargo[dimIndex]
return hvol
def preProcess(self, front):
"""Sets up the list data structure needed for calculation."""
dimensions = len(self.referencePoint)
nodeList = _MultiList(dimensions)
nodes = [_MultiList.Node(dimensions, point) for point in front]
for i in range(dimensions):
self.sortByDimension(nodes, i)
nodeList.extend(nodes, i)
self.list = nodeList
def sortByDimension(self, nodes, i):
"""Sorts the list of nodes by the i-th value of the contained points."""
# build a list of tuples of (point[i], node)
decorated = [(node.cargo[i], node) for node in nodes]
# sort by this value
decorated.sort()
# write back to original list
nodes[:] = [node for (_, node) in decorated]
class _MultiList:
"""A special data structure needed by FonsecaHyperVolume.
It consists of several doubly linked lists that share common nodes. So,
every node has multiple predecessors and successors, one in every list.
"""
class Node:
def __init__(self, numberLists, cargo=None):
self.cargo = cargo
self.next = [None] * numberLists
self.prev = [None] * numberLists
self.ignore = 0
self.area = [0.0] * numberLists
self.volume = [0.0] * numberLists
def __str__(self):
return str(self.cargo)
def __lt__(self, other):
return all(self.cargo < other.cargo)
def __init__(self, numberLists):
"""Constructor.
Builds 'numberLists' doubly linked lists.
"""
self.numberLists = numberLists
self.sentinel = _MultiList.Node(numberLists)
self.sentinel.next = [self.sentinel] * numberLists
self.sentinel.prev = [self.sentinel] * numberLists
def __str__(self):
strings = []
for i in range(self.numberLists):
currentList = []
node = self.sentinel.next[i]
while node != self.sentinel:
currentList.append(str(node))
node = node.next[i]
strings.append(str(currentList))
stringRepr = ""
for string in strings:
stringRepr += string + "\n"
return stringRepr
def __len__(self):
"""Returns the number of lists that are included in this _MultiList."""
return self.numberLists
def getLength(self, i):
"""Returns the length of the i-th list."""
length = 0
sentinel = self.sentinel
node = sentinel.next[i]
while node != sentinel:
length += 1
node = node.next[i]
return length
def append(self, node, index):
"""Appends a node to the end of the list at the given index."""
lastButOne = self.sentinel.prev[index]
node.next[index] = self.sentinel
node.prev[index] = lastButOne
# set the last element as the new one
self.sentinel.prev[index] = node
lastButOne.next[index] = node
def extend(self, nodes, index):
"""Extends the list at the given index with the nodes."""
sentinel = self.sentinel
for node in nodes:
lastButOne = sentinel.prev[index]
node.next[index] = sentinel
node.prev[index] = lastButOne
# set the last element as the new one
sentinel.prev[index] = node
lastButOne.next[index] = node
def remove(self, node, index, bounds):
"""Removes and returns 'node' from all lists in [0, 'index'[."""
for i in range(index):
predecessor = node.prev[i]
successor = node.next[i]
predecessor.next[i] = successor
successor.prev[i] = predecessor
if bounds[i] > node.cargo[i]:
bounds[i] = node.cargo[i]
return node
def reinsert(self, node, index, bounds):
"""
Inserts 'node' at the position it had in all lists in [0, 'index'[
before it was removed. This method assumes that the next and previous
nodes of the node that is reinserted are in the list.
"""
for i in range(index):
node.prev[i].next[i] = node
node.next[i].prev[i] = node
if bounds[i] > node.cargo[i]:
bounds[i] = node.cargo[i]
| 10,912 | 35.376667 | 107 | py |
deephyper | deephyper-master/deephyper/skopt/moo/__init__.py | from ._hv import hypervolume
from ._multiobjective import (
MoAugmentedChebyshevFunction,
MoChebyshevFunction,
MoLinearFunction,
MoPBIFunction,
MoQuadraticFunction,
)
from ._pf import (
is_pareto_efficient,
non_dominated_set,
non_dominated_set_ranked,
pareto_front,
)
__all__ = [
"hypervolume",
"MoLinearFunction",
"MoAugmentedChebyshevFunction",
"MoChebyshevFunction",
"MoPBIFunction",
"MoQuadraticFunction",
"is_pareto_efficient",
"non_dominated_set",
"non_dominated_set_ranked",
"pareto_front",
]
| 577 | 19.642857 | 35 | py |
deephyper | deephyper-master/deephyper/skopt/optimizer/base.py | """
Abstraction for optimizers.
It is sufficient that one re-implements the base estimator.
"""
import warnings
import numbers
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from ..callbacks import check_callback
from ..callbacks import VerboseCallback
from .optimizer import Optimizer
from ..utils import eval_callbacks
def base_minimize(
func,
dimensions,
base_estimator,
n_calls=100,
n_random_starts=None,
n_initial_points=10,
initial_point_generator="random",
acq_func="EI",
acq_optimizer="lbfgs",
x0=None,
y0=None,
random_state=None,
verbose=False,
callback=None,
n_points=10000,
n_restarts_optimizer=5,
xi=0.01,
kappa=1.96,
n_jobs=1,
model_queue_size=None,
):
"""Base optimizer class
Parameters
----------
func : callable
Function to minimize. Should take a single list of parameters
and return the objective value.
If you have a search-space where all dimensions have names,
then you can use :func:`deephyper.skopt.utils.use_named_args` as a decorator
on your objective function, in order to call it directly
with the named arguments. See `use_named_args` for an example.
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
.. note:: The upper and lower bounds are inclusive for `Integer`
dimensions.
base_estimator : sklearn regressor
Should inherit from `sklearn.base.RegressorMixin`.
In addition, should have an optional `return_std` argument,
which returns `std(Y | x)` along with `E[Y | x]`.
n_calls : int, default: 100
Maximum number of calls to `func`. An objective function will
always be evaluated this number of times; Various options to
supply initialization points do not affect this value.
n_random_starts : int, default: None
Number of evaluations of `func` with random points before
approximating it with `base_estimator`.
.. deprecated:: 0.8
use `n_initial_points` instead.
n_initial_points : int, default: 10
Number of evaluations of `func` with initialization points
before approximating it with `base_estimator`. Initial point
generator can be changed by setting `initial_point_generator`.
initial_point_generator : str, InitialPointGenerator instance, \
default: `"random"`
Sets a initial points generator. Can be either
- `"random"` for uniform random numbers,
- `"sobol"` for a Sobol' sequence,
- `"halton"` for a Halton sequence,
- `"hammersly"` for a Hammersly sequence,
- `"lhs"` for a latin hypercube sequence,
- `"grid"` for a uniform grid sequence
acq_func : string, default: `"EI"`
Function to minimize over the posterior distribution. Can be either
- `"LCB"` for lower confidence bound,
- `"EI"` for negative expected improvement,
- `"PI"` for negative probability of improvement.
- `"EIps"` for negated expected improvement per second to take into
account the function compute time. Then, the objective function is
assumed to return two values, the first being the objective value and
the second being the time taken in seconds.
- `"PIps"` for negated probability of improvement per second. The
return type of the objective function is assumed to be similar to
that of `"EIps"`
acq_optimizer : string, `"sampling"` or `"lbfgs"`, default: `"lbfgs"`
Method to minimize the acquisition function. The fit model
is updated with the optimal value obtained by optimizing `acq_func`
with `acq_optimizer`.
- If set to `"sampling"`, then `acq_func` is optimized by computing
`acq_func` at `n_points` randomly sampled points and the smallest
value found is used.
- If set to `"lbfgs"`, then
- The `n_restarts_optimizer` no. of points which the acquisition
function is least are taken as start points.
- `"lbfgs"` is run for 20 iterations with these points as initial
points to find local minima.
- The optimal of these local minima is used to update the prior.
x0 : list, list of lists or `None`
Initial input points.
- If it is a list of lists, use it as a list of input points. If no
corresponding outputs `y0` are supplied, then len(x0) of total
calls to the objective function will be spent evaluating the points
in `x0`. If the corresponding outputs are provided, then they will
be used together with evaluated points during a run of the algorithm
to construct a surrogate.
- If it is a list, use it as a single initial input point. The
algorithm will spend 1 call to evaluate the initial point, if the
outputs are not provided.
- If it is `None`, no initial input points are used.
y0 : list, scalar or `None`
Objective values at initial input points.
- If it is a list, then it corresponds to evaluations of the function
at each element of `x0` : the i-th element of `y0` corresponds
to the function evaluated at the i-th element of `x0`.
- If it is a scalar, then it corresponds to the evaluation of the
function at `x0`.
- If it is None and `x0` is provided, then the function is evaluated
at each element of `x0`.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
verbose : boolean, default: False
Control the verbosity. It is advised to set the verbosity to True
for long optimization runs.
callback : callable, list of callables, optional
If callable then `callback(res)` is called after each call to `func`.
If list of callables, then each callable in the list is called.
n_points : int, default: 10000
If `acq_optimizer` is set to `"sampling"`, then `acq_func` is
optimized by computing `acq_func` at `n_points` randomly sampled
points.
n_restarts_optimizer : int, default: 5
The number of restarts of the optimizer when `acq_optimizer`
is `"lbfgs"`.
xi : float, default: 0.01
Controls how much improvement one wants over the previous best
values. Used when the acquisition is either `"EI"` or `"PI"`.
kappa : float, default: 1.96
Controls how much of the variance in the predicted values should be
taken into account. If set to be very high, then we are favouring
exploration over exploitation and vice versa.
Used when the acquisition is `"LCB"`.
n_jobs : int, default: 1
Number of cores to run in parallel while running the lbfgs
optimizations over the acquisition function and given to
the base_estimator. Valid only when
`acq_optimizer` is set to "lbfgs". or when the base_estimator
supports n_jobs as parameter and was given as string.
Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set
to number of cores.
model_queue_size : int or None, default: None
Keeps list of models only as long as the argument given. In the
case of None, the list has no capped length.
Returns
-------
res : `OptimizeResult`, scipy object
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [list]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `models`: surrogate models used for each iteration.
- `x_iters` [list of lists]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimization space.
- `specs` [dict]`: the call specifications.
- `rng` [RandomState instance]: State of the random state
at the end of minimization.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
"""
specs = {"args": locals(), "function": "base_minimize"}
acq_optimizer_kwargs = {
"n_points": n_points,
"n_restarts_optimizer": n_restarts_optimizer,
"n_jobs": n_jobs,
}
acq_func_kwargs = {"xi": xi, "kappa": kappa}
# Initialize optimization
# Suppose there are points provided (x0 and y0), record them
# check x0: list-like, requirement of minimal points
if x0 is None:
x0 = []
elif not isinstance(x0[0], (list, tuple)):
x0 = [x0]
if not isinstance(x0, list):
raise ValueError("`x0` should be a list, but got %s" % type(x0))
# Check `n_random_starts` deprecation first
if n_random_starts is not None:
warnings.warn(
(
"n_random_starts will be removed in favour of "
"n_initial_points. It overwrites n_initial_points."
),
DeprecationWarning,
)
n_initial_points = n_random_starts
if n_initial_points <= 0 and not x0:
raise ValueError("Either set `n_initial_points` > 0," " or provide `x0`")
# check y0: list-like, requirement of maximal calls
if isinstance(y0, Iterable):
y0 = list(y0)
elif isinstance(y0, numbers.Number):
y0 = [y0]
required_calls = n_initial_points + (len(x0) if not y0 else 0)
if n_calls < required_calls:
raise ValueError("Expected `n_calls` >= %d, got %d" % (required_calls, n_calls))
# calculate the total number of initial points
n_initial_points = n_initial_points + len(x0)
# Build optimizer
# create optimizer class
optimizer = Optimizer(
dimensions,
base_estimator,
n_initial_points=n_initial_points,
initial_point_generator=initial_point_generator,
n_jobs=n_jobs,
acq_func=acq_func,
acq_optimizer=acq_optimizer,
random_state=random_state,
model_queue_size=model_queue_size,
acq_optimizer_kwargs=acq_optimizer_kwargs,
acq_func_kwargs=acq_func_kwargs,
)
# check x0: element-wise data type, dimensionality
assert all(isinstance(p, Iterable) for p in x0)
if not all(len(p) == optimizer.space.n_dims for p in x0):
raise RuntimeError(
"Optimization space (%s) and initial points in x0 "
"use inconsistent dimensions." % optimizer.space
)
# check callback
callbacks = check_callback(callback)
if verbose:
callbacks.append(
VerboseCallback(
n_init=len(x0) if not y0 else 0,
n_random=n_initial_points,
n_total=n_calls,
)
)
# Record provided points
# create return object
result = None
# evaluate y0 if only x0 is provided
if x0 and y0 is None:
y0 = list(map(func, x0))
n_calls -= len(y0)
# record through tell function
if x0:
if not (isinstance(y0, Iterable) or isinstance(y0, numbers.Number)):
raise ValueError(
"`y0` should be an iterable or a scalar, got %s" % type(y0)
)
if len(x0) != len(y0):
raise ValueError("`x0` and `y0` should have the same length")
result = optimizer.tell(x0, y0)
result.specs = specs
if eval_callbacks(callbacks, result):
return result
# Optimize
for n in range(n_calls):
next_x = optimizer.ask()
next_y = func(next_x)
result = optimizer.tell(next_x, next_y)
result.specs = specs
if eval_callbacks(callbacks, result):
break
return result
| 12,427 | 36.433735 | 94 | py |
deephyper | deephyper-master/deephyper/skopt/optimizer/gp.py | """Gaussian process-based minimization algorithms."""
import numpy as np
from sklearn.utils import check_random_state
from .base import base_minimize
from ..utils import cook_estimator
from ..utils import normalize_dimensions
def gp_minimize(
func,
dimensions,
base_estimator=None,
n_calls=100,
n_random_starts=None,
n_initial_points=10,
initial_point_generator="random",
acq_func="gp_hedge",
acq_optimizer="auto",
x0=None,
y0=None,
random_state=None,
verbose=False,
callback=None,
n_points=10000,
n_restarts_optimizer=5,
xi=0.01,
kappa=1.96,
noise="gaussian",
n_jobs=1,
model_queue_size=None,
):
"""Bayesian optimization using Gaussian Processes.
If every function evaluation is expensive, for instance
when the parameters are the hyperparameters of a neural network
and the function evaluation is the mean cross-validation score across
ten folds, optimizing the hyperparameters by standard optimization
routines would take for ever!
The idea is to approximate the function using a Gaussian process.
In other words the function values are assumed to follow a multivariate
gaussian. The covariance of the function values are given by a
GP kernel between the parameters. Then a smart choice to choose the
next parameter to evaluate can be made by the acquisition function
over the Gaussian prior which is much quicker to evaluate.
The total number of evaluations, `n_calls`, are performed like the
following. If `x0` is provided but not `y0`, then the elements of `x0`
are first evaluated, followed by `n_initial_points` evaluations.
Finally, `n_calls - len(x0) - n_initial_points` evaluations are
made guided by the surrogate model. If `x0` and `y0` are both
provided then `n_initial_points` evaluations are first made then
`n_calls - n_initial_points` subsequent evaluations are made
guided by the surrogate model.
The first `n_initial_points` are generated by the
`initial_point_generator`.
Parameters
----------
func : callable
Function to minimize. Should take a single list of parameters
and return the objective value.
If you have a search-space where all dimensions have names,
then you can use :func:`deephyper.skopt.utils.use_named_args` as a decorator
on your objective function, in order to call it directly
with the named arguments. See `use_named_args` for an example.
dimensions : [list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
.. note:: The upper and lower bounds are inclusive for `Integer`
dimensions.
base_estimator : a Gaussian process estimator
The Gaussian process estimator to use for optimization.
By default, a Matern kernel is used with the following
hyperparameters tuned.
- All the length scales of the Matern kernel.
- The covariance amplitude that each element is multiplied with.
- Noise that is added to the matern kernel. The noise is assumed
to be iid gaussian.
n_calls : int, default: 100
Number of calls to `func`.
n_random_starts : int, default: None
Number of evaluations of `func` with random points before
approximating it with `base_estimator`.
.. deprecated:: 0.8
use `n_initial_points` instead.
n_initial_points : int, default: 10
Number of evaluations of `func` with initialization points
before approximating it with `base_estimator`. Initial point
generator can be changed by setting `initial_point_generator`.
initial_point_generator : str, InitialPointGenerator instance, \
default: 'random'
Sets a initial points generator. Can be either
- `"random"` for uniform random numbers,
- `"sobol"` for a Sobol' sequence,
- `"halton"` for a Halton sequence,
- `"hammersly"` for a Hammersly sequence,
- `"lhs"` for a latin hypercube sequence,
acq_func : string, default: `"gp_hedge"`
Function to minimize over the gaussian prior. Can be either
- `"LCB"` for lower confidence bound.
- `"EI"` for negative expected improvement.
- `"PI"` for negative probability of improvement.
- `"gp_hedge"` Probabilistically choose one of the above three
acquisition functions at every iteration. The weightage
given to these gains can be set by :math:`\\eta` through
`acq_func_kwargs`.
- The gains `g_i` are initialized to zero.
- At every iteration,
- Each acquisition function is optimised independently to
propose an candidate point `X_i`.
- Out of all these candidate points, the next point `X_best` is
chosen by :math:`softmax(\\eta g_i)`
- After fitting the surrogate model with `(X_best, y_best)`,
the gains are updated such that :math:`g_i -= \\mu(X_i)`
- `"EIps"` for negated expected improvement per second to take into
account the function compute time. Then, the objective function is
assumed to return two values, the first being the objective value and
the second being the time taken in seconds.
- `"PIps"` for negated probability of improvement per second. The
return type of the objective function is assumed to be similar to
that of `"EIps"`
acq_optimizer : string, `"sampling"` or `"lbfgs"`, default: `"lbfgs"`
Method to minimize the acquisition function. The fit model
is updated with the optimal value obtained by optimizing `acq_func`
with `acq_optimizer`.
The `acq_func` is computed at `n_points` sampled randomly.
- If set to `"auto"`, then `acq_optimizer` is configured on the
basis of the space searched over.
If the space is Categorical then this is set to be `"sampling"`.
- If set to `"sampling"`, then the point among these `n_points`
where the `acq_func` is minimum is the next candidate minimum.
- If set to `"lbfgs"`, then
- The `n_restarts_optimizer` no. of points which the acquisition
function is least are taken as start points.
- `"lbfgs"` is run for 20 iterations with these points as initial
points to find local minima.
- The optimal of these local minima is used to update the prior.
x0 : list, list of lists or `None`
Initial input points.
- If it is a list of lists, use it as a list of input points.
- If it is a list, use it as a single initial input point.
- If it is `None`, no initial input points are used.
y0 : list, scalar or `None`
Evaluation of initial input points.
- If it is a list, then it corresponds to evaluations of the function
at each element of `x0` : the i-th element of `y0` corresponds
to the function evaluated at the i-th element of `x0`.
- If it is a scalar, then it corresponds to the evaluation of the
function at `x0`.
- If it is None and `x0` is provided, then the function is evaluated
at each element of `x0`.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
verbose : boolean, default: False
Control the verbosity. It is advised to set the verbosity to True
for long optimization runs.
callback : callable, list of callables, optional
If callable then `callback(res)` is called after each call to `func`.
If list of callables, then each callable in the list is called.
n_points : int, default: 10000
Number of points to sample to determine the next "best" point.
Useless if acq_optimizer is set to `"lbfgs"`.
n_restarts_optimizer : int, default: 5
The number of restarts of the optimizer when `acq_optimizer`
is `"lbfgs"`.
kappa : float, default: 1.96
Controls how much of the variance in the predicted values should be
taken into account. If set to be very high, then we are favouring
exploration over exploitation and vice versa.
Used when the acquisition is `"LCB"`.
xi : float, default: 0.01
Controls how much improvement one wants over the previous best
values. Used when the acquisition is either `"EI"` or `"PI"`.
noise : float, default: "gaussian"
- Use noise="gaussian" if the objective returns noisy observations.
The noise of each observation is assumed to be iid with
mean zero and a fixed variance.
- If the variance is known before-hand, this can be set directly
to the variance of the noise.
- Set this to a value close to zero (1e-10) if the function is
noise-free. Setting to zero might cause stability issues.
n_jobs : int, default: 1
Number of cores to run in parallel while running the lbfgs
optimizations over the acquisition function. Valid only
when `acq_optimizer` is set to `"lbfgs"`.
Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set
to number of cores.
model_queue_size : int or None, default: None
Keeps list of models only as long as the argument given. In the
case of None, the list has no capped length.
Returns
-------
res : `OptimizeResult`, scipy object
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [list]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `models`: surrogate models used for each iteration.
- `x_iters` [list of lists]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimization space.
- `specs` [dict]`: the call specifications.
- `rng` [RandomState instance]: State of the random state
at the end of minimization.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
.. seealso:: functions :class:`deephyper.skopt.forest_minimize`,
:class:`deephyper.skopt.dummy_minimize`, :class:`deephyper.skopt.gbrt_minimize`
"""
# Check params
rng = check_random_state(random_state)
space = normalize_dimensions(dimensions)
if base_estimator is None:
base_estimator = cook_estimator(
"GP",
space=space,
random_state=rng.randint(0, np.iinfo(np.int32).max),
noise=noise,
)
return base_minimize(
func,
space,
base_estimator=base_estimator,
acq_func=acq_func,
xi=xi,
kappa=kappa,
acq_optimizer=acq_optimizer,
n_calls=n_calls,
n_points=n_points,
n_random_starts=n_random_starts,
n_initial_points=n_initial_points,
initial_point_generator=initial_point_generator,
n_restarts_optimizer=n_restarts_optimizer,
x0=x0,
y0=y0,
random_state=rng,
verbose=verbose,
callback=callback,
n_jobs=n_jobs,
model_queue_size=model_queue_size,
)
| 11,926 | 38.889632 | 94 | py |
deephyper | deephyper-master/deephyper/skopt/optimizer/gbrt.py | from sklearn.utils import check_random_state
from .base import base_minimize
from ..utils import cook_estimator
def gbrt_minimize(
func,
dimensions,
base_estimator=None,
n_calls=100,
n_random_starts=None,
n_initial_points=10,
initial_point_generator="random",
acq_func="EI",
acq_optimizer="auto",
x0=None,
y0=None,
random_state=None,
verbose=False,
callback=None,
n_points=10000,
xi=0.01,
kappa=1.96,
n_jobs=1,
model_queue_size=None,
):
"""Sequential optimization using gradient boosted trees.
Gradient boosted regression trees are used to model the (very)
expensive to evaluate function `func`. The model is improved
by sequentially evaluating the expensive function at the next
best point. Thereby finding the minimum of `func` with as
few evaluations as possible.
The total number of evaluations, `n_calls`, are performed like the
following. If `x0` is provided but not `y0`, then the elements of `x0`
are first evaluated, followed by `n_initial_points` evaluations.
Finally, `n_calls - len(x0) - n_initial_points` evaluations are
made guided by the surrogate model. If `x0` and `y0` are both
provided then `n_initial_points` evaluations are first made then
`n_calls - n_initial_points` subsequent evaluations are made
guided by the surrogate model.
The first `n_initial_points` are generated by the
`initial_point_generator`.
Parameters
----------
func : callable
Function to minimize. Should take a single list of parameters
and return the objective value.
If you have a search-space where all dimensions have names,
then you can use `deephyper.skopt.utils.use_named_args` as a decorator
on your objective function, in order to call it directly
with the named arguments. See `use_named_args` for an example.
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
base_estimator : `GradientBoostingQuantileRegressor`
The regressor to use as surrogate model
n_calls : int, default: 100
Number of calls to `func`.
n_random_starts : int, default: None
Number of evaluations of `func` with random points before
approximating it with `base_estimator`.
.. deprecated:: 0.8
use `n_initial_points` instead.
n_initial_points : int, default: 10
Number of evaluations of `func` with initialization points
before approximating it with `base_estimator`. Initial point
generator can be changed by setting `initial_point_generator`.
initial_point_generator : str, InitialPointGenerator instance, \
default: `"random"`
Sets a initial points generator. Can be either
- `"random"` for uniform random numbers,
- `"sobol"` for a Sobol' sequence,
- `"halton"` for a Halton sequence,
- `"hammersly"` for a Hammersly sequence,
- `"lhs"` for a latin hypercube sequence,
- `"grid"` for a uniform grid sequence
acq_func : string, default: `"LCB"`
Function to minimize over the forest posterior. Can be either
- `"LCB"` for lower confidence bound.
- `"EI"` for negative expected improvement.
- `"PI"` for negative probability of improvement.
- ``"EIps"`` for negated expected improvement per second to take into
account the function compute time. Then, the objective function is
assumed to return two values, the first being the objective value and
the second being the time taken.
- `"PIps"` for negated probability of improvement per second.
x0 : list, list of lists or `None`
Initial input points.
- If it is a list of lists, use it as a list of input points.
- If it is a list, use it as a single initial input point.
- If it is `None`, no initial input points are used.
y0 : list, scalar or `None`
Evaluation of initial input points.
- If it is a list, then it corresponds to evaluations of the function
at each element of `x0` : the i-th element of `y0` corresponds
to the function evaluated at the i-th element of `x0`.
- If it is a scalar, then it corresponds to the evaluation of the
function at `x0`.
- If it is None and `x0` is provided, then the function is evaluated
at each element of `x0`.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
verbose : boolean, default: False
Control the verbosity. It is advised to set the verbosity to True
for long optimization runs.
callback : callable, optional
If provided, then `callback(res)` is called after call to func.
n_points : int, default: 10000
Number of points to sample when minimizing the acquisition function.
xi : float, default: 0.01
Controls how much improvement one wants over the previous best
values. Used when the acquisition is either `"EI"` or `"PI"`.
kappa : float, default: 1.96
Controls how much of the variance in the predicted values should be
taken into account. If set to be very high, then we are favouring
exploration over exploitation and vice versa.
Used when the acquisition is `"LCB"`.
n_jobs : int, default: 1
The number of jobs to run in parallel for `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
model_queue_size : int or None, default: None
Keeps list of models only as long as the argument given. In the
case of None, the list has no capped length.
Returns
-------
res : `OptimizeResult`, scipy object
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [list]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `models`: surrogate models used for each iteration.
- `x_iters` [list of lists]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimization space.
- `specs` [dict]`: the call specifications.
- `rng` [RandomState instance]: State of the random state
at the end of minimization.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
.. seealso:: functions :class:`deephyper.skopt.gp_minimize`,
:class:`deephyper.skopt.dummy_minimize`, :class:`deephyper.skopt.forest_minimize`
"""
# Check params
rng = check_random_state(random_state)
if base_estimator is None:
base_estimator = cook_estimator("GBRT", random_state=rng, n_jobs=n_jobs)
return base_minimize(
func,
dimensions,
base_estimator,
n_calls=n_calls,
n_points=n_points,
n_random_starts=n_random_starts,
n_initial_points=n_initial_points,
initial_point_generator=initial_point_generator,
x0=x0,
y0=y0,
random_state=random_state,
xi=xi,
kappa=kappa,
acq_func=acq_func,
verbose=verbose,
callback=callback,
acq_optimizer="sampling",
model_queue_size=model_queue_size,
)
| 7,968 | 36.947619 | 94 | py |
deephyper | deephyper-master/deephyper/skopt/optimizer/__init__.py | from .base import base_minimize
from .dummy import dummy_minimize
from .forest import forest_minimize
from .gbrt import gbrt_minimize
from .gp import gp_minimize
from .optimizer import Optimizer, OBJECTIVE_VALUE_FAILURE
__all__ = [
"base_minimize",
"dummy_minimize",
"forest_minimize",
"gbrt_minimize",
"gp_minimize",
"Optimizer",
"OBJECTIVE_VALUE_FAILURE",
]
| 390 | 20.722222 | 57 | py |
deephyper | deephyper-master/deephyper/skopt/optimizer/forest.py | """Forest based minimization algorithms."""
from .base import base_minimize
def forest_minimize(
func,
dimensions,
base_estimator="ET",
n_calls=100,
n_random_starts=None,
n_initial_points=10,
acq_func="EI",
initial_point_generator="random",
x0=None,
y0=None,
random_state=None,
verbose=False,
callback=None,
n_points=10000,
xi=0.01,
kappa=1.96,
n_jobs=1,
model_queue_size=None,
):
"""Sequential optimisation using decision trees.
A tree based regression model is used to model the expensive to evaluate
function `func`. The model is improved by sequentially evaluating
the expensive function at the next best point. Thereby finding the
minimum of `func` with as few evaluations as possible.
The total number of evaluations, `n_calls`, are performed like the
following. If `x0` is provided but not `y0`, then the elements of `x0`
are first evaluated, followed by `n_initial_points` evaluations.
Finally, `n_calls - len(x0) - n_initial_points` evaluations are
made guided by the surrogate model. If `x0` and `y0` are both
provided then `n_initial_points` evaluations are first made then
`n_calls - n_initial_points` subsequent evaluations are made
guided by the surrogate model.
The first `n_initial_points` are generated by the
`initial_point_generator`.
Parameters
----------
func : callable
Function to minimize. Should take a single list of parameters
and return the objective value.
If you have a search-space where all dimensions have names,
then you can use :func:`deephyper.skopt.utils.use_named_args` as a decorator
on your objective function, in order to call it directly
with the named arguments. See :func:`deephyper.skopt.utils.use_named_args`
for an example.
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, prior)` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
.. note:: The upper and lower bounds are inclusive for `Integer`
dimensions.
base_estimator : string or `Regressor`, default: `"ET"`
The regressor to use as surrogate model. Can be either
- `"RF"` for random forest regressor
- `"ET"` for extra trees regressor
- instance of regressor with support for `return_std` in its predict
method
The predefined models are initialized with good defaults. If you
want to adjust the model parameters pass your own instance of
a regressor which returns the mean and standard deviation when
making predictions.
n_calls : int, default: 100
Number of calls to `func`.
n_random_starts : int, default: None
Number of evaluations of `func` with random points before
approximating it with `base_estimator`.
.. deprecated:: 0.8
use `n_initial_points` instead.
n_initial_points : int, default: 10
Number of evaluations of `func` with initialization points
before approximating it with `base_estimator`. Initial point
generator can be changed by setting `initial_point_generator`.
initial_point_generator : str, InitialPointGenerator instance, \
default: `"random"`
Sets a initial points generator. Can be either
- `"random"` for uniform random numbers,
- `"sobol"` for a Sobol' sequence,
- `"halton"` for a Halton sequence,
- `"hammersly"` for a Hammersly sequence,
- `"lhs"` for a latin hypercube sequence,
- `"grid"` for a uniform grid sequence
acq_func : string, default: `"LCB"`
Function to minimize over the forest posterior. Can be either
- `"LCB"` for lower confidence bound.
- `"EI"` for negative expected improvement.
- `"PI"` for negative probability of improvement.
- `"EIps"` for negated expected improvement per second to take into
account the function compute time. Then, the objective function is
assumed to return two values, the first being the objective value and
the second being the time taken in seconds.
- `"PIps"` for negated probability of improvement per second. The
return type of the objective function is assumed to be similar to
that of `"EIps"`
x0 : list, list of lists or `None`
Initial input points.
- If it is a list of lists, use it as a list of input points.
- If it is a list, use it as a single initial input point.
- If it is `None`, no initial input points are used.
y0 : list, scalar or `None`
Evaluation of initial input points.
- If it is a list, then it corresponds to evaluations of the function
at each element of `x0` : the i-th element of `y0` corresponds
to the function evaluated at the i-th element of `x0`.
- If it is a scalar, then it corresponds to the evaluation of the
function at `x0`.
- If it is None and `x0` is provided, then the function is evaluated
at each element of `x0`.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
verbose : boolean, default: False
Control the verbosity. It is advised to set the verbosity to True
for long optimization runs.
callback : callable, optional
If provided, then `callback(res)` is called after call to func.
n_points : int, default: 10000
Number of points to sample when minimizing the acquisition function.
xi : float, default: 0.01
Controls how much improvement one wants over the previous best
values. Used when the acquisition is either `"EI"` or `"PI"`.
kappa : float, default: 1.96
Controls how much of the variance in the predicted values should be
taken into account. If set to be very high, then we are favouring
exploration over exploitation and vice versa.
Used when the acquisition is `"LCB"`.
n_jobs : int, default: 1
The number of jobs to run in parallel for `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
model_queue_size : int or None, default: None
Keeps list of models only as long as the argument given. In the
case of None, the list has no capped length.
Returns
-------
res : `OptimizeResult`, scipy object
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [list]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `models`: surrogate models used for each iteration.
- `x_iters` [list of lists]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimization space.
- `specs` [dict]`: the call specifications.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
.. seealso:: functions :class:`deephyper.skopt.gp_minimize`,
:class:`deephyper.skopt.dummy_minimize`, :class:`deephyper.skopt.gbrt_minimize`
"""
return base_minimize(
func,
dimensions,
base_estimator,
n_calls=n_calls,
n_points=n_points,
n_random_starts=n_random_starts,
n_initial_points=n_initial_points,
initial_point_generator=initial_point_generator,
x0=x0,
y0=y0,
random_state=random_state,
n_jobs=n_jobs,
acq_func=acq_func,
xi=xi,
kappa=kappa,
verbose=verbose,
callback=callback,
acq_optimizer="sampling",
model_queue_size=model_queue_size,
)
| 8,325 | 37.368664 | 94 | py |
deephyper | deephyper-master/deephyper/skopt/optimizer/optimizer.py | import sys
import warnings
from math import log
from numbers import Number
import ConfigSpace as CS
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from scipy.optimize import fmin_l_bfgs_b
from sklearn.base import clone, is_regressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.utils import check_random_state
from ..acquisition import _gaussian_acquisition, gaussian_acquisition_1D
from ..learning import GaussianProcessRegressor
from ..moo import (
MoAugmentedChebyshevFunction,
MoChebyshevFunction,
MoLinearFunction,
MoPBIFunction,
MoQuadraticFunction,
)
from ..space import Categorical, Space
from ..utils import (
check_x_in_space,
cook_estimator,
cook_initial_point_generator,
create_result,
has_gradients,
is_2Dlistlike,
is_listlike,
normalize_dimensions,
cook_objective_scaler,
)
class ExhaustedSearchSpace(RuntimeError):
""" "Raised when the search cannot sample new points from the ConfigSpace."""
def __str__(self):
return "The search space is exhausted and cannot sample new unique points!"
class ExhaustedFailures(RuntimeError):
"""Raised when the search has seen ``max_failures`` failures without any valid objective value."""
def __str__(self):
return "The search has reached its quota of failures! Check if the type of failure is expected or the value of ``max_failures`` in the search algorithm."
def boltzman_distribution(x, beta=1):
x = np.exp(beta * x)
x = x / np.sum(x)
return x
OBJECTIVE_VALUE_FAILURE = "F"
class Optimizer(object):
"""Run bayesian optimisation loop.
An `Optimizer` represents the steps of a bayesian optimisation loop. To
use it you need to provide your own loop mechanism. The various
optimisers provided by `skopt` use this class under the hood.
Use this class directly if you want to control the iterations of your
bayesian optimisation loop.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
base_estimator : `"GP"`, `"RF"`, `"ET"`, `"GBRT"` or sklearn regressor, \
default: `"GP"`
Should inherit from :obj:`sklearn.base.RegressorMixin`.
In addition the `predict` method, should have an optional `return_std`
argument, which returns `std(Y | x)` along with `E[Y | x]`.
If base_estimator is one of ["GP", "RF", "ET", "GBRT"], a default
surrogate model of the corresponding type is used corresponding to what
is used in the minimize functions.
n_random_starts : int, default: 10
.. deprecated:: 0.6
use `n_initial_points` instead.
n_initial_points : int, default: 10
Number of evaluations of `func` with initialization points
before approximating it with `base_estimator`. Initial point
generator can be changed by setting `initial_point_generator`.
initial_points : list, default: None
initial_point_generator : str, InitialPointGenerator instance, \
default: `"random"`
Sets a initial points generator. Can be either
- `"random"` for uniform random numbers,
- `"sobol"` for a Sobol' sequence,
- `"halton"` for a Halton sequence,
- `"hammersly"` for a Hammersly sequence,
- `"lhs"` for a latin hypercube sequence,
- `"grid"` for a uniform grid sequence
acq_func : string, default: `"gp_hedge"`
Function to minimize over the posterior distribution. Can be either
- `"LCB"` for lower confidence bound.
- `"EI"` for negative expected improvement.
- `"PI"` for negative probability of improvement.
- `"gp_hedge"` Probabilistically choose one of the above three
acquisition functions at every iteration.
- The gains `g_i` are initialized to zero.
- At every iteration,
- Each acquisition function is optimised independently to
propose an candidate point `X_i`.
- Out of all these candidate points, the next point `X_best` is
chosen by :math:`softmax(\\eta g_i)`
- After fitting the surrogate model with `(X_best, y_best)`,
the gains are updated such that :math:`g_i -= \\mu(X_i)`
- `"EIps"` for negated expected improvement per second to take into
account the function compute time. Then, the objective function is
assumed to return two values, the first being the objective value and
the second being the time taken in seconds.
- `"PIps"` for negated probability of improvement per second. The
return type of the objective function is assumed to be similar to
that of `"EIps"`
acq_optimizer : string, `"sampling"` or `"lbfgs"`, default: `"auto"`
Method to minimize the acquisition function. The fit model
is updated with the optimal value obtained by optimizing `acq_func`
with `acq_optimizer`.
- If set to `"auto"`, then `acq_optimizer` is configured on the
basis of the base_estimator and the space searched over.
If the space is Categorical or if the estimator provided based on
tree-models then this is set to be `"sampling"`.
- If set to `"sampling"`, then `acq_func` is optimized by computing
`acq_func` at `n_points` randomly sampled points.
- If set to `"lbfgs"`, then `acq_func` is optimized by
- Sampling `n_restarts_optimizer` points randomly.
- `"lbfgs"` is run for 20 iterations with these points as initial
points to find local minima.
- The optimal of these local minima is used to update the prior.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
n_jobs : int, default: 1
The number of jobs to run in parallel in the base_estimator,
if the base_estimator supports n_jobs as parameter and
base_estimator was given as string.
If -1, then the number of jobs is set to the number of cores.
acq_func_kwargs : dict
Additional arguments to be passed to the acquisition function.
acq_optimizer_kwargs : dict
Additional arguments to be passed to the acquisition optimizer.
model_queue_size : int or None, default: None
Keeps list of models only as long as the argument given. In the
case of None, the list has no capped length.
model_sdv : Model or None, default None
A Model from Synthetic-Data-Vault.
moo_scalarization_strategy : string, default: `"Chebyshev"`
Function to convert multiple objectives into a single scalar value. Can be either
- `"Linear"` for linear/convex combination.
- `"Chebyshev"` for Chebyshev or weighted infinity norm.
- `"AugChebyshev"` for Chebyshev norm augmented with a weighted 1-norm.
- `"PBI"` for penalized boundary intersection.
- `"Quadratic"` for quadratic combination (2-norm).
- `"rLinear"`, `"rChebyshev"`, `"rAugChebyshev"`, `"rPBI"`, `"rQuadratic"` where the corresponding weights are randomly perturbed in every iteration.
moo_scalarization_weight: array, default: `None`
Scalarization weights to be used in multiobjective optimization with length equal to the number of objective functions.
Attributes
----------
Xi : list
Points at which objective has been evaluated.
yi : scalar
Values of objective at corresponding points in `Xi`.
models : list
Regression models used to fit observations and compute acquisition
function.
space : Space
An instance of :class:`deephyper.skopt.space.Space`. Stores parameter search
space used to sample points, bounds, and type of parameters.
"""
def __init__(
self,
dimensions,
base_estimator="gp",
n_random_starts=None,
n_initial_points=10,
initial_points=None,
initial_point_generator="random",
n_jobs=1,
acq_func="gp_hedge",
acq_optimizer="auto",
random_state=None,
model_queue_size=None,
acq_func_kwargs=None,
acq_optimizer_kwargs=None,
model_sdv=None,
sample_max_size=-1,
sample_strategy="quantile",
moo_scalarization_strategy="Chebyshev",
moo_scalarization_weight=None,
objective_scaler="auto",
):
args = locals().copy()
del args["self"]
self.specs = {"args": args, "function": "Optimizer"}
self.rng = check_random_state(random_state)
# Configure acquisition function
# Store and creat acquisition function set
self.acq_func = acq_func
self.acq_func_kwargs = acq_func_kwargs
allowed_acq_funcs = ["gp_hedge", "EI", "LCB", "qLCB", "PI", "EIps", "PIps"]
if self.acq_func not in allowed_acq_funcs:
raise ValueError(
"expected acq_func to be in %s, got %s"
% (",".join(allowed_acq_funcs), self.acq_func)
)
# treat hedging method separately
if self.acq_func == "gp_hedge":
self.cand_acq_funcs_ = ["EI", "LCB", "PI"]
self.gains_ = np.zeros(3)
else:
self.cand_acq_funcs_ = [self.acq_func]
if acq_func_kwargs is None:
acq_func_kwargs = dict()
self.eta = acq_func_kwargs.get("eta", 1.0)
# Configure counters of points
# Check `n_random_starts` deprecation first
if n_random_starts is not None:
warnings.warn(
("n_random_starts will be removed in favour of " "n_initial_points."),
DeprecationWarning,
)
n_initial_points = n_random_starts
if n_initial_points < 0:
raise ValueError(
"Expected `n_initial_points` >= 0, got %d" % n_initial_points
)
self._n_initial_points = n_initial_points
self.n_initial_points_ = n_initial_points
# Configure estimator
# build base_estimator if doesn't exist
if isinstance(base_estimator, str):
base_estimator = cook_estimator(
base_estimator,
space=dimensions,
random_state=self.rng.randint(0, np.iinfo(np.int32).max),
n_jobs=n_jobs,
)
# check if regressor
if not is_regressor(base_estimator) and base_estimator is not None:
raise ValueError("%s has to be a regressor." % base_estimator)
# treat per second acqusition function specially
is_multi_regressor = isinstance(base_estimator, MultiOutputRegressor)
if "ps" in self.acq_func and not is_multi_regressor:
self.base_estimator_ = MultiOutputRegressor(base_estimator)
else:
self.base_estimator_ = base_estimator
# preprocessing of target variable
self.objective_scaler_ = objective_scaler
self.objective_scaler = cook_objective_scaler(
objective_scaler, self.base_estimator_
)
# Configure optimizer
# decide optimizer based on gradient information
if acq_optimizer == "auto":
if has_gradients(self.base_estimator_):
acq_optimizer = "lbfgs"
else:
acq_optimizer = "sampling"
if acq_optimizer not in ["lbfgs", "sampling", "boltzmann_sampling"]:
raise ValueError(
"Expected acq_optimizer to be 'lbfgs' or "
"'sampling' or 'softmax_sampling', got {0}".format(acq_optimizer)
)
if not has_gradients(self.base_estimator_) and not (
"sampling" in acq_optimizer
):
raise ValueError(
"The regressor {0} should run with a 'sampling' "
"acq_optimizer such as "
"'sampling' or 'softmax_sampling'.".format(type(base_estimator))
)
self.acq_optimizer = acq_optimizer
# record other arguments
if acq_optimizer_kwargs is None:
acq_optimizer_kwargs = dict()
self.n_points = acq_optimizer_kwargs.get("n_points", 10000)
self.n_restarts_optimizer = acq_optimizer_kwargs.get("n_restarts_optimizer", 5)
self.n_jobs = acq_optimizer_kwargs.get("n_jobs", 1)
self.filter_duplicated = acq_optimizer_kwargs.get("filter_duplicated", True)
self.boltzmann_gamma = acq_optimizer_kwargs.get("boltzmann_gamma", 1)
self.boltzmann_psucc = acq_optimizer_kwargs.get("boltzmann_psucc", 0)
self.filter_failures = acq_optimizer_kwargs.get("filter_failures", "mean")
self.max_failures = acq_optimizer_kwargs.get("max_failures", 100)
self.acq_optimizer_kwargs = acq_optimizer_kwargs
# Configure search space
if type(dimensions) is CS.ConfigurationSpace:
# Save the config space to do a real copy of the Optimizer
self.config_space = dimensions
self.config_space.seed(self.rng.get_state()[1][0])
if isinstance(self.base_estimator_, GaussianProcessRegressor):
raise RuntimeError("GP estimator is not available with ConfigSpace!")
else:
# normalize space if GP regressor
if isinstance(self.base_estimator_, GaussianProcessRegressor):
dimensions = normalize_dimensions(dimensions)
# keep track of the generative model from sdv
self.model_sdv = model_sdv
self.space = Space(dimensions, model_sdv=self.model_sdv)
self._initial_samples = [] if initial_points is None else initial_points[:]
self._initial_point_generator = cook_initial_point_generator(
initial_point_generator
)
if self._initial_point_generator is not None:
transformer = self.space.get_transformer()
self._initial_samples = (
self._initial_samples
+ self._initial_point_generator.generate(
self.space.dimensions,
n_initial_points - len(self._initial_samples),
random_state=self.rng.randint(0, np.iinfo(np.int32).max),
)
)
self.space.set_transformer(transformer)
# record categorical and non-categorical indices
self._cat_inds = []
self._non_cat_inds = []
for ind, dim in enumerate(self.space.dimensions):
if isinstance(dim, Categorical):
self._cat_inds.append(ind)
else:
self._non_cat_inds.append(ind)
# Initialize storage for optimization
if not isinstance(model_queue_size, (int, type(None))):
raise TypeError(
"model_queue_size should be an int or None, "
"got {}".format(type(model_queue_size))
)
# For multiobjective optimization
moo_scalarization_strategy_allowed = [
"Linear",
"Chebyshev",
"AugChebyshev",
"PBI",
"Quadratic",
]
moo_scalarization_strategy_allowed = moo_scalarization_strategy_allowed + [
f"r{s}" for s in moo_scalarization_strategy_allowed
]
if not (moo_scalarization_strategy in moo_scalarization_strategy_allowed):
raise ValueError(
f"Parameter 'moo_scalarization_strategy={acq_func}' should have a value in {moo_scalarization_strategy_allowed}!"
)
self._moo_scalarization_strategy = moo_scalarization_strategy
self._moo_scalarization_weight = moo_scalarization_weight
self._moo_scalar_function = None
self.max_model_queue_size = model_queue_size
self.models = []
self.Xi = []
self.yi = []
# Initialize cache for `ask` method responses
# This ensures that multiple calls to `ask` with n_points set
# return same sets of points. Reset to {} at every call to `tell`.
self.cache_ = {}
# to avoid duplicated samples
self.sampled = []
# for botlzmann strategy
self._min_value = 0
self._max_value = 0
# parameters to stabilize the size of the dataset used to fit the surrogate model
self._sample_max_size = sample_max_size
self._sample_strategy = sample_strategy
# parameters for multifidelity
self._use_multifidelity = False
self.bi = []
self.bi_count = {}
def copy(self, random_state=None):
"""Create a shallow copy of an instance of the optimizer.
Parameters
----------
random_state : int, RandomState instance, or None (default)
Set the random state of the copy.
"""
idx = 1 if self._moo_scalarization_strategy.startswith("r") else 0
optimizer = Optimizer(
dimensions=self.config_space
if hasattr(self, "config_space")
else self.space.dimensions,
base_estimator=self.base_estimator_,
n_initial_points=self.n_initial_points_,
initial_point_generator=self._initial_point_generator,
acq_func=self.acq_func,
acq_optimizer=self.acq_optimizer,
acq_func_kwargs=self.acq_func_kwargs,
acq_optimizer_kwargs=self.acq_optimizer_kwargs,
random_state=random_state,
model_sdv=self.model_sdv,
sample_max_size=self._sample_max_size,
sample_strategy=self._sample_strategy,
moo_scalarization_strategy=self._moo_scalarization_strategy[idx:],
objective_scaler=self.objective_scaler_,
)
optimizer._initial_samples = self._initial_samples
optimizer.sampled = self.sampled[:]
optimizer._moo_scalar_function = self._moo_scalar_function
if hasattr(self, "gains_"):
optimizer.gains_ = np.copy(self.gains_)
if self.Xi:
budget = None if len(self.bi) == 0 else self.bi
optimizer._tell(self.Xi, self.yi, budget=budget)
return optimizer
def ask(self, n_points=None, strategy="cl_min"):
"""Query point or multiple points at which objective should be evaluated.
n_points : int or None, default: None
Number of points returned by the ask method.
If the value is None, a single point to evaluate is returned.
Otherwise a list of points to evaluate is returned of size
n_points. This is useful if you can evaluate your objective in
parallel, and thus obtain more objective function evaluations per
unit of time.
strategy : string, default: "cl_min"
Method to use to sample multiple points (see also `n_points`
description). This parameter is ignored if n_points = None.
Supported options are `"cl_min"`, `"cl_mean"` or `"cl_max"`.
- If set to `"cl_min"`, then constant liar strategy is used
with lie objective value being minimum of observed objective
values. `"cl_mean"` and `"cl_max"` means mean and max of values
respectively. For details on this strategy see:
https://hal.archives-ouvertes.fr/hal-00732512/document
With this strategy a copy of optimizer is created, which is
then asked for a point, and the point is told to the copy of
optimizer with some fake objective (lie), the next point is
asked from copy, it is also told to the copy with fake
objective and so on. The type of lie defines different
flavours of `cl_x` strategies.
"""
if n_points is None or n_points == 1:
x = self._ask()
self.sampled.append(x)
if n_points is None:
return x
else:
return [x]
if n_points > 0 and (
self._n_initial_points > 0 or self.base_estimator_ is None
):
if len(self._initial_samples) == 0:
X = self._ask_random_points(size=n_points)
else:
n = min(len(self._initial_samples), n_points)
X = self._initial_samples[:n]
self._initial_samples = self._initial_samples[n:]
X = X + self._ask_random_points(size=(n_points - n))
self.sampled.extend(X)
return X
if self.acq_func == "qLCB":
strategy = "qLCB"
supported_strategies = [
"cl_min",
"cl_mean",
"cl_max",
"topk",
"boltzmann",
"qLCB",
]
if not (isinstance(n_points, int) and n_points > 0):
raise ValueError("n_points should be int > 0, got " + str(n_points))
if strategy not in supported_strategies:
raise ValueError(
"Expected parallel_strategy to be one of "
+ str(supported_strategies)
+ ", "
+ "got %s" % strategy
)
# handle one-shot strategies (topk, softmax)
if hasattr(self, "_last_X") and strategy in ["topk", "boltzmann"]:
if strategy == "topk":
idx = np.argsort(self._last_values)[:n_points]
next_samples = self._last_X[idx].tolist()
# to track sampled values and avoid duplicates
self.sampled.extend(next_samples)
return next_samples
elif strategy == "boltmann":
values = -self._last_values
self._min_value = (
self._min_value
if self._min_value is None
else min(values.min(), self._min_value)
)
self._max_value = (
self._max_value
if self._max_value is None
else max(values.max(), self._max_value)
)
idx = [np.argmax(values)]
max_trials = 100
trials = 0
while len(idx) < n_points:
t = len(self.sampled)
if t == 0:
beta = 0
else:
beta = (
self.boltzmann_gamma
* np.log(t)
/ np.abs(self._max_value - self._min_value)
)
probs = boltzman_distribution(values, beta)
new_idx = np.argmax(self.rng.multinomial(1, probs))
if (
self.filter_duplicated
and new_idx in idx
and trials < max_trials
):
trials += 1
else:
idx.append(new_idx)
self.sampled.append(self._last_X[new_idx].tolist())
return self._last_X[idx].tolist()
else:
raise ValueError(
f"'{strategy}' is not a valid multi-point acquisition strategy!"
)
# q-ACQ multi point acquisition for centralized setting
if hasattr(self, "_est") and self.acq_func == "qLCB":
X_s = self.space.rvs(n_samples=self.n_points, random_state=self.rng)
X_s = self._filter_duplicated(X_s)
X_c = self.space.imp_const.fit_transform(
self.space.transform(X_s)
) # candidates
# add budget to input space if used
if len(self.bi) > 0:
max_budget = np.full((len(X_c), 1), fill_value=np.max(self.bi))
X_c = np.hstack((X_c, max_budget))
mu, std = self._est.predict(X_c, return_std=True)
kappa = self.acq_func_kwargs.get("kappa", 1.96)
kappas = self.rng.exponential(kappa, size=n_points)
X = []
for kappa in kappas:
values = mu - kappa * std
idx = np.argmin(values)
X.append(X_s[idx])
return X
# Caching the result with n_points not None. If some new parameters
# are provided to the ask, the cache_ is not used.
if (n_points, strategy) in self.cache_:
return self.cache_[(n_points, strategy)]
# Copy of the optimizer is made in order to manage the
# deletion of points with "lie" objective (the copy of
# optimizer is simply discarded)
opt = self.copy(random_state=self.rng.randint(0, np.iinfo(np.int32).max))
X = []
max_budget = None if len(self.bi) == 0 else np.max(self.bi)
for i in range(n_points):
x = opt.ask()
self.sampled.append(x)
X.append(x)
# the optimizer copy `opt` is discarded anyway
if i == n_points - 1:
break
ti_available = "ps" in self.acq_func and len(opt.yi) > 0
ti = [t for (_, t) in opt.yi] if ti_available else None
opt_yi = self._filter_failures(opt.yi)
if strategy == "cl_min":
y_lie = np.min(opt_yi, axis=0) if opt_yi else 0.0 # CL-min lie
t_lie = np.min(ti) if ti is not None else log(sys.float_info.max)
elif strategy == "cl_mean":
y_lie = np.mean(opt_yi, axis=0) if opt_yi else 0.0 # CL-mean lie
t_lie = np.mean(ti) if ti is not None else log(sys.float_info.max)
else:
y_lie = np.max(opt_yi, axis=0) if opt_yi else 0.0 # CL-max lie
t_lie = np.max(ti) if ti is not None else log(sys.float_info.max)
# Lie to the optimizer.
if "ps" in self.acq_func:
# Use `_tell()` instead of `tell()` to prevent repeated
# log transformations of the computation times.
opt._tell(x, (y_lie, t_lie), budget=max_budget)
else:
opt._tell(x, y_lie, budget=max_budget)
self.cache_ = {(n_points, strategy): X} # cache_ the result
return X
def _filter_duplicated(self, samples):
"""Filter out duplicated values in ``samples``.
Args:
samples (list): the list of samples to filter.
Returns:
list: the filtered list of samples
"""
if self.filter_duplicated:
# check duplicated values
if hasattr(self, "config_space"):
hps_names = self.config_space.get_hyperparameter_names()
else:
hps_names = self.space.dimension_names
df_samples = pd.DataFrame(data=samples, columns=hps_names, dtype="O")
df_samples = df_samples[~df_samples.duplicated(keep="first")]
if len(self.sampled) > 0:
df_history = pd.DataFrame(data=self.sampled, columns=hps_names)
df_merge = pd.merge(df_samples, df_history, on=None, how="inner")
df_samples = pd.concat([df_samples, df_merge])
df_samples = df_samples[~df_samples.duplicated(keep=False)]
if len(df_samples) > 0:
samples = df_samples.values.tolist()
return samples
def _filter_failures(self, yi):
"""Filter or replace failed objectives.
Args:
yi (list): a list of objectives.
Returns:
list: the filtered list.
"""
if self.filter_failures in ["mean", "max"]:
yi_no_failure = [
v for v in yi if np.ndim(v) > 0 or v != OBJECTIVE_VALUE_FAILURE
]
# when yi_no_failure is empty all configurations are failures
if len(yi_no_failure) == 0:
if len(yi) >= self.max_failures:
raise ExhaustedFailures
# constant value for the acq. func. to return anything
yi_failed_value = 0
elif self.filter_failures == "mean":
yi_failed_value = np.mean(yi_no_failure, axis=0).tolist()
else:
yi_failed_value = np.max(yi_no_failure, axis=0).tolist()
yi = [
v if np.ndim(v) > 0 or v != OBJECTIVE_VALUE_FAILURE else yi_failed_value
for v in yi
]
return yi
def _sample(self, X, y):
X = np.asarray(X, dtype="O")
y = np.asarray(y)
size = y.shape[0]
if self._sample_max_size > 0 and size > self._sample_max_size:
if self._sample_strategy == "quantile":
quantiles = np.quantile(y, [0.10, 0.25, 0.50, 0.75, 0.90])
int_size = self._sample_max_size // (len(quantiles) + 1)
Xs, ys = [], []
for i in range(len(quantiles) + 1):
if i == 0:
s = y < quantiles[i]
elif i == len(quantiles):
s = quantiles[i - 1] <= y
else:
s = (quantiles[i - 1] <= y) & (y < quantiles[i])
idx = np.where(s)[0]
idx = np.random.choice(idx, size=int_size, replace=True)
Xi = X[idx]
yi = y[idx]
Xs.append(Xi)
ys.append(yi)
X = np.concatenate(Xs, axis=0)
y = np.concatenate(ys, axis=0)
X = X.tolist()
y = y.tolist()
return X, y
def _ask_random_points(self, size=None):
samples = self.space.rvs(n_samples=self.n_points, random_state=self.rng)
samples = self._filter_duplicated(samples)
if size is None:
return samples[0]
else:
return samples[:size]
def _ask(self):
"""Suggest next point at which to evaluate the objective.
Return a random point while not at least `n_initial_points`
observations have been `tell`ed, after that `base_estimator` is used
to determine the next point.
"""
if self._n_initial_points > 0 or self.base_estimator_ is None:
# this will not make a copy of `self.rng` and hence keep advancing
# our random state.
if len(self._initial_samples) == 0:
return self._ask_random_points()
else:
# The samples are evaluated starting form initial_samples[0]
x = self._initial_samples[0]
self._initial_samples = self._initial_samples[1:]
return x
else:
if not self.models:
raise RuntimeError(
"Random evaluations exhausted and no " "model has been fit."
)
next_x = self._next_x
if next_x is not None:
if not self.space.is_config_space:
min_delta_x = min(
[self.space.distance(next_x, xi) for xi in self.Xi]
)
if abs(min_delta_x) <= 1e-8:
warnings.warn(
"The objective has been evaluated " "at this point before."
)
# return point computed from last call to tell()
return next_x
def tell(self, x, y, fit=True, budget=None):
"""Record an observation (or several) of the objective function.
Provide values of the objective function at points suggested by
`ask()` or other points. By default a new model will be fit to all
observations. The new model is used to suggest the next point at
which to evaluate the objective. This point can be retrieved by calling
`ask()`.
To add observations without fitting a new model set `fit` to False.
To add multiple observations in a batch pass a list-of-lists for `x`
and a list of scalars for `y`.
Parameters
----------
x : list or list-of-lists
Point at which objective was evaluated.
y : scalar or list
Value of objective at `x`.
fit : bool, default: True
Fit a model to observed evaluations of the objective. A model will
only be fitted after `n_initial_points` points have been told to
the optimizer irrespective of the value of `fit`.
budget : scalar or list, default: None
Value of the budget used to observe the corresponding objective.
"""
if self.space.is_config_space:
pass
else:
check_x_in_space(x, self.space)
self._check_y_is_valid(x, y)
# budget and y are checked in similar ways
if budget is not None:
self._check_y_is_valid(x, budget)
# take the logarithm of the computation times
if "ps" in self.acq_func:
if is_2Dlistlike(x):
y = [[val, log(t)] for (val, t) in y]
elif is_listlike(x):
y = list(y)
y[1] = log(y[1])
return self._tell(x, y, fit=fit, budget=budget)
def _get_predict_budget(self):
"""Compute budget to use to maximise the acquisition function."""
bi_count = sorted([(b, c) for b, c in self.bi_count.items()], reverse=True)
budget_list, count_list = list(zip(*bi_count))
enough_observed = np.asarray(count_list) >= self.n_initial_points_
if any(enough_observed):
idx = np.argmax(enough_observed)
else:
idx = np.argmax(count_list)
pred_budget = budget_list[idx]
return pred_budget
def _compute_sample_weigths(self, bi):
"""Compute weights with respect to budgets (imbalance)."""
# n_samples = len(bi)
# n_budgets = len(self.bi_count)
# max_budget = max(self.bi_count.keys())
# print(f"{max_budget}")
# v2
# budget_weight = {
# k: (n_samples / (n_budgets * b)) * (np.exp(k) / np.exp(max_budget))
# for k, b in self.bi_count.items()
# }
# v1
# budget_weight = {
# b: (np.exp(b) / np.exp(max_budget)) for b, c in self.bi_count.items()
# }
# sample_weight = [budget_weight[b] for b in bi]
# print(budget_weight)
# print(f"c={self.bi_count}")
# print(f"b={bi[:10]}")
# print(f"w={sample_weight[:10]}")
# v3
sample_weight = None
return sample_weight
def _resample_with_budget(self, Xi, yi, bi):
return Xi, yi, bi
groups = {}
for i, b in enumerate(bi):
g = groups.get(b, [])
if len(g) == 0:
groups[b] = g
g.append(i)
max_b = max(groups.keys())
num_with_max_b = len(groups[max_b])
nXi, nyi, nbi = [], [], []
for b, g in groups.items():
if b == max_b:
indexes = np.arange(len(g))
else:
indexes = self.rng.randint(low=0, high=len(g), size=num_with_max_b)
for idx in indexes:
nXi.append(Xi[g[idx]])
nyi.append(yi[g[idx]])
nbi.append(b)
return nXi, nyi, nbi
def _tell(self, x, y, fit=True, budget=None):
"""Perform the actual work of incorporating one or more new points.
See `tell()` for the full description.
This method exists to give access to the internals of adding points
by side stepping all input validation and transformation."""
if "ps" in self.acq_func:
if is_2Dlistlike(x):
self.Xi.extend(x)
self.yi.extend(y)
self._n_initial_points -= len(y)
elif is_listlike(x):
self.Xi.append(x)
self.yi.append(y)
self._n_initial_points -= 1
# if y isn't a scalar it means we have been handed a batch of points
elif is_listlike(y) and is_2Dlistlike(x):
self.Xi.extend(x)
self.yi.extend(y)
self._n_initial_points -= len(y)
elif is_listlike(x):
self.Xi.append(x)
self.yi.append(y)
self._n_initial_points -= 1
else:
raise ValueError(
"Type of arguments `x` (%s) and `y` (%s) "
"not compatible." % (type(x), type(y))
)
if budget is not None:
if type(budget) is list:
pass
elif type(budget) is float or type(budget) is int:
budget = [budget]
else:
raise ValueError(
"The 'budget' should be composed of int or float values matching the shape of 'y'."
)
self.bi.extend(budget)
for budget_i in budget:
self.bi_count[budget_i] = self.bi_count.get(budget_i, 0) + 1
assert len(self.bi) == len(self.yi)
# optimizer learned something new - discard cache
self.cache_ = {}
# after being "told" n_initial_points we switch from sampling
# random points to using a surrogate model
if fit and self._n_initial_points <= 0 and self.base_estimator_ is not None:
transformed_bounds = self.space.transformed_bounds
est = clone(self.base_estimator_)
# handle failures
yi = self._filter_failures(self.yi)
# convert multiple objectives to single scalar
if np.ndim(yi) > 1 and np.shape(yi)[1] > 1:
yi = self._moo_scalarize(yi)
# handle size of the sample fit to the estimator
Xi, yi = self._sample(self.Xi, yi)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# preprocessing of input space
Xtt = self.space.imp_const.fit_transform(self.space.transform(Xi))
Xtt = np.asarray(Xtt)
# add budget as input
if len(self.bi) > 0:
bi = self.bi
# print(len(Xtt))
# Xtt, yi, bi = self._resample_with_budget(Xtt, yi, self.bi)
# print(len(Xtt))
# print()
pred_budget = self._get_predict_budget()
transformed_bounds.append((pred_budget, pred_budget))
# sample_weight = self._compute_sample_weigths(bi)
sample_weight = None
bi = np.asarray(bi).reshape(-1, 1)
Xtt = np.hstack((Xtt, bi))
else:
sample_weight = None
# preprocessing of output space
yi = self.objective_scaler.fit_transform(
np.reshape(yi, (-1, 1))
).reshape(-1)
est.fit(Xtt, yi, sample_weight=sample_weight)
# for qLCB save the fitted estimator and skip the selection
if self.acq_func == "qLCB":
self._est = est
else:
if hasattr(self, "next_xs_") and self.acq_func == "gp_hedge":
self.gains_ -= est.predict(np.vstack(self.next_xs_))
if self.max_model_queue_size is None:
self.models.append(est)
elif len(self.models) < self.max_model_queue_size:
self.models.append(est)
else:
# Maximum list size obtained, remove oldest model.
self.models.pop(0)
self.models.append(est)
# even with BFGS as optimizer we want to sample a large number
# of points and then pick the best ones as starting points
X_s = self.space.rvs(n_samples=self.n_points, random_state=self.rng)
X_s = self._filter_duplicated(X_s)
X = self.space.imp_const.fit_transform(self.space.transform(X_s))
# add max budget as input for all samples
if len(self.bi) > 0:
pred_budget = np.full((len(X), 1), fill_value=pred_budget)
X = np.hstack((X, pred_budget))
self.next_xs_ = []
for cand_acq_func in self.cand_acq_funcs_:
values = _gaussian_acquisition(
X=X,
model=est,
y_opt=np.min(yi),
acq_func=cand_acq_func,
acq_func_kwargs=self.acq_func_kwargs,
)
# cache these values in case the strategy of ask is one-shot
# if budget is used we need to remove it from input space
self._last_X = X[:, :-1] if len(self.bi) > 0 else X
self._last_values = values
# Find the minimum of the acquisition function by randomly
# sampling points from the space
if self.acq_optimizer == "sampling":
next_x = X[np.argmin(values)]
elif self.acq_optimizer == "boltzmann_sampling":
p = self.rng.uniform()
if p <= self.boltzmann_psucc:
next_x = X[np.argmin(values)]
else:
values = -values
self._min_value = (
self._min_value
if self._min_value is None
else min(values.min(), self._min_value)
)
self._max_value = (
self._max_value
if self._max_value is None
else max(values.max(), self._max_value)
)
t = len(self.Xi)
if t == 0:
beta = 0
else:
beta = (
self.boltzmann_gamma
* np.log(t)
/ np.abs(self._max_value - self._min_value)
)
probs = boltzman_distribution(values, beta)
idx = np.argmax(self.rng.multinomial(1, probs))
next_x = X[idx]
# Use BFGS to find the mimimum of the acquisition function, the
# minimization starts from `n_restarts_optimizer` different
# points and the best minimum is used
elif self.acq_optimizer == "lbfgs":
x0 = X[np.argsort(values)[: self.n_restarts_optimizer]]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
results = Parallel(n_jobs=self.n_jobs)(
delayed(fmin_l_bfgs_b)(
gaussian_acquisition_1D,
x,
args=(
est,
np.min(yi),
cand_acq_func,
self.acq_func_kwargs,
),
bounds=transformed_bounds,
approx_grad=False,
maxiter=20,
)
for x in x0
)
cand_xs = np.array([r[0] for r in results])
cand_acqs = np.array([r[1] for r in results])
next_x = cand_xs[np.argmin(cand_acqs)]
# lbfgs should handle this but just in case there are
# precision errors.
if not self.space.is_categorical:
if not self.space.is_config_space:
transformed_bounds = np.asarray(transformed_bounds)
next_x = np.clip(
next_x,
transformed_bounds[:, 0],
transformed_bounds[:, 1],
)
# if budget is used we need to remove the feature from the input space
if len(self.bi) > 0:
self.next_xs_.append(next_x[:-1])
else:
self.next_xs_.append(next_x)
if self.acq_func == "gp_hedge":
logits = np.array(self.gains_)
logits -= np.max(logits)
exp_logits = np.exp(self.eta * logits)
probs = exp_logits / np.sum(exp_logits)
next_x = self.next_xs_[np.argmax(self.rng.multinomial(1, probs))]
else:
next_x = self.next_xs_[0]
# note the need for [0] at the end
self._next_x = self.space.inverse_transform(next_x.reshape((1, -1)))[0]
# Pack results
result = create_result(
self.Xi, self.yi, self.space, self.rng, models=self.models
)
result.specs = self.specs
return result
def _check_y_is_valid(self, x, y):
"""Check if the shape and types of x and y are consistent."""
if "ps" in self.acq_func:
if is_2Dlistlike(x):
if not (np.ndim(y) == 2 and np.shape(y)[1] == 2):
raise TypeError("expected y to be a list of (func_val, t)")
elif is_listlike(x):
if not (np.ndim(y) == 1 and len(y) == 2):
raise TypeError("expected y to be (func_val, t)")
# if y isn't a scalar it means we have been handed a batch of points
elif is_listlike(y) and is_2Dlistlike(x):
for y_value in y:
if (
not isinstance(y_value, Number)
and not is_listlike(y_value)
and y_value != OBJECTIVE_VALUE_FAILURE
):
raise ValueError("expected y to be a 1-D or 2-D list of scalars")
elif is_listlike(x):
if not isinstance(y, Number) and not is_listlike(y):
raise ValueError("`func` should return a scalar or tuple of scalars")
else:
raise ValueError(
"Type of arguments `x` (%s) and `y` (%s) "
"not compatible." % (type(x), type(y))
)
def run(self, func, n_iter=1):
"""Execute ask() + tell() `n_iter` times"""
for _ in range(n_iter):
x = self.ask()
self.tell(x, func(x))
result = create_result(
self.Xi, self.yi, self.space, self.rng, models=self.models
)
result.specs = self.specs
return result
def update_next(self):
"""Updates the value returned by opt.ask(). Useful if a parameter
was updated after ask was called."""
self.cache_ = {}
# Ask for a new next_x.
# We only need to overwrite _next_x if it exists.
if hasattr(self, "_next_x"):
opt = self.copy(random_state=self.rng)
self._next_x = opt._next_x
def get_result(self):
"""Returns the same result that would be returned by opt.tell()
but without calling tell
Returns
-------
res : `OptimizeResult`, scipy object
OptimizeResult instance with the required information.
"""
result = create_result(
self.Xi, self.yi, self.space, self.rng, models=self.models
)
result.specs = self.specs
return result
def _moo_scalarize(self, yi):
if (
self._moo_scalar_function is None
or self._moo_scalarization_strategy.startswith("r")
):
moo_function = {
"Linear": MoLinearFunction,
"Chebyshev": MoChebyshevFunction,
"AugChebyshev": MoAugmentedChebyshevFunction,
"PBI": MoPBIFunction,
"Quadratic": MoQuadraticFunction,
"rLinear": MoLinearFunction,
"rChebyshev": MoChebyshevFunction,
"rAugChebyshev": MoAugmentedChebyshevFunction,
"rPBI": MoPBIFunction,
"rQuadratic": MoQuadraticFunction,
}
n_objectives = 1 if np.ndim(yi[0]) == 0 else len(yi[0])
if self._moo_scalarization_weight is not None:
if (
not is_listlike(self._moo_scalarization_weight)
or len(self._moo_scalarization_weight) != n_objectives
):
raise ValueError(
"expected moo_scalarization_weight to be a list of length equal to the number of objectives"
)
weight = np.asarray_chkfinite(self._moo_scalarization_weight)
elif self._moo_scalarization_strategy.startswith("r"):
weight = None
else:
weight = np.ones(n_objectives) / n_objectives
self._moo_scalar_function = moo_function[self._moo_scalarization_strategy](
n_objectives=n_objectives,
weight=weight,
random_state=self.rng,
)
# compute normalization constants
self._moo_scalar_function.normalize(yi)
return [self._moo_scalar_function.scalarize(yv) for yv in yi]
| 50,659 | 37.612805 | 161 | py |
deephyper | deephyper-master/deephyper/skopt/optimizer/dummy.py | """Random search."""
from .base import base_minimize
def dummy_minimize(
func,
dimensions,
n_calls=100,
initial_point_generator="random",
x0=None,
y0=None,
random_state=None,
verbose=False,
callback=None,
model_queue_size=None,
init_point_gen_kwargs=None,
):
"""Random search by uniform sampling within the given bounds.
Parameters
----------
func : callable
Function to minimize. Should take a single list of parameters
and return the objective value.
If you have a search-space where all dimensions have names,
then you can use :func:`deephyper.skopt.utils.use_named_args` as a decorator
on your objective function, in order to call it directly
with the named arguments. See `use_named_args` for an example.
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, prior)` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
n_calls : int, default: 100
Number of calls to `func` to find the minimum.
initial_point_generator : str, InitialPointGenerator instance, \
default: `"random"`
Sets a initial points generator. Can be either
- `"random"` for uniform random numbers,
- `"sobol"` for a Sobol' sequence,
- `"halton"` for a Halton sequence,
- `"hammersly"` for a Hammersly sequence,
- `"lhs"` for a latin hypercube sequence,
- `"grid"` for a uniform grid sequence
x0 : list, list of lists or `None`
Initial input points.
- If it is a list of lists, use it as a list of input points.
- If it is a list, use it as a single initial input point.
- If it is `None`, no initial input points are used.
y0 : list, scalar or `None`
Evaluation of initial input points.
- If it is a list, then it corresponds to evaluations of the function
at each element of `x0` : the i-th element of `y0` corresponds
to the function evaluated at the i-th element of `x0`.
- If it is a scalar, then it corresponds to the evaluation of the
function at `x0`.
- If it is None and `x0` is provided, then the function is evaluated
at each element of `x0`.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
verbose : boolean, default: False
Control the verbosity. It is advised to set the verbosity to True
for long optimization runs.
callback : callable, list of callables, optional
If callable then `callback(res)` is called after each call to `func`.
If list of callables, then each callable in the list is called.
model_queue_size : int or None, default: None
Keeps list of models only as long as the argument given. In the
case of None, the list has no capped length.
Returns
-------
res : `OptimizeResult`, scipy object
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [list]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `x_iters` [list of lists]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimisation space.
- `specs` [dict]: the call specifications.
- `rng` [RandomState instance]: State of the random state
at the end of minimization.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
.. seealso:: functions :class:`deephyper.skopt.gp_minimize`,
:class:`deephyper.skopt.forest_minimize`, :class:`deephyper.skopt.gbrt_minimize`
"""
# all our calls want random suggestions, except if we need to evaluate
# some initial points
if x0 is not None and y0 is None:
n_initial_points = n_calls - len(x0)
else:
n_initial_points = n_calls
return base_minimize(
func,
dimensions,
base_estimator="dummy",
# explicitly set optimizer to sampling as "dummy"
# minimizer does not provide gradients.
acq_optimizer="sampling",
n_calls=n_calls,
n_initial_points=n_initial_points,
initial_point_generator=initial_point_generator,
x0=x0,
y0=y0,
random_state=random_state,
verbose=verbose,
callback=callback,
model_queue_size=model_queue_size,
)
| 5,020 | 35.384058 | 94 | py |
deephyper | deephyper-master/deephyper/evaluator/_encoder.py | import json
import re
import types
import uuid
from inspect import isclass
import ConfigSpace as cs
import ConfigSpace.hyperparameters as csh
import deephyper.skopt
import numpy as np
from ConfigSpace.read_and_write import json as cs_json
class Encoder(json.JSONEncoder):
"""
Enables JSON dump of numpy data, python functions.
"""
def default(self, obj):
if isinstance(obj, uuid.UUID):
return str(obj)
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.bool_):
return bool(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, types.FunctionType) or isclass(obj):
return f"{obj.__module__}.{obj.__name__}"
elif isinstance(obj, deephyper.skopt.space.Dimension):
return str(obj)
elif isinstance(obj, csh.Hyperparameter):
return str(obj)
elif isinstance(obj, cs.ConfigurationSpace):
return json.loads(cs_json.write(obj))
else:
return super(Encoder, self).default(obj)
def to_json(d: dict):
return json.dumps(d, cls=Encoder)
def parse_subprocess_result(result):
"""Utility to parse a result from a subprocess of the format `"DH-OUTPUT:..."`.
Args:
result: object returned by a subpross with ``stdout`` and ``stderr`` attributes.
Return:
The parsed value or raise an exception if an error happened.
"""
stdout = result.stdout
stderr = result.stderr
try:
retval_bytes = re.search(b"DH-OUTPUT:(.+)\n", stdout).group(1)
except AttributeError:
error = stderr.decode("utf-8")
raise RuntimeError(
f"{error}\n\n Could not collect any result from the run_function in the main process because an error happened in the subprocess."
)
# Finally, parse whether the return value from the user-defined function is a scalar, a list, or a dictionary.
retval = retval_bytes.replace(
b"'", b'"'
) # For dictionaries, replace single quotes with double quotes!
sol = json.loads(retval)
return sol
| 2,225 | 30.8 | 142 | py |
deephyper | deephyper-master/deephyper/evaluator/_process_pool.py | import asyncio
import functools
import logging
from concurrent.futures import ProcessPoolExecutor
from typing import Callable, Hashable
from deephyper.evaluator._evaluator import Evaluator
from deephyper.evaluator._job import Job
from deephyper.evaluator.storage import Storage
logger = logging.getLogger(__name__)
class ProcessPoolEvaluator(Evaluator):
"""This evaluator uses the ``ProcessPoolExecutor`` as backend.
Args:
run_function (callable): functions to be executed by the ``Evaluator``.
num_workers (int, optional): Number of parallel processes used to compute the ``run_function``. Defaults to 1.
callbacks (list, optional): A list of callbacks to trigger custom actions at the creation or completion of jobs. Defaults to None.
"""
def __init__(
self,
run_function: Callable,
num_workers: int = 1,
callbacks: list = None,
run_function_kwargs: dict = None,
storage: Storage = None,
search_id: Hashable = None,
):
super().__init__(
run_function=run_function,
num_workers=num_workers,
callbacks=callbacks,
run_function_kwargs=run_function_kwargs,
storage=storage,
search_id=search_id,
)
self.sem = asyncio.Semaphore(num_workers)
# !creating the exector once here is crutial to avoid repetitive overheads
self.executor = ProcessPoolExecutor(max_workers=num_workers)
if hasattr(run_function, "__name__") and hasattr(run_function, "__module__"):
logger.info(
f"ProcessPool Evaluator will execute {self.run_function.__name__}() from module {self.run_function.__module__}"
)
else:
logger.info(f"ProcessPool Evaluator will execute {self.run_function}")
async def execute(self, job: Job) -> Job:
async with self.sem:
running_job = job.create_running_job(self._storage, self._stopper)
run_function = functools.partial(
job.run_function, running_job, **self.run_function_kwargs
)
output = await self.loop.run_in_executor(self.executor, run_function)
job.set_output(output)
return job
| 2,271 | 33.424242 | 138 | py |
deephyper | deephyper-master/deephyper/evaluator/_mpi_comm.py | import asyncio
import functools
import logging
import traceback
from typing import Callable, Hashable
from deephyper.core.exceptions import RunFunctionError
from deephyper.evaluator._evaluator import Evaluator
from deephyper.evaluator._job import Job
from deephyper.evaluator.storage import Storage
import mpi4py
# !To avoid initializing MPI when module is imported (MPI is optional)
mpi4py.rc.initialize = False
mpi4py.rc.finalize = True
from mpi4py import MPI # noqa: E402
from mpi4py.futures import MPICommExecutor # noqa: E402
logger = logging.getLogger(__name__)
def catch_exception(run_func):
"""A wrapper function to execute the ``run_func`` passed by the user. This way we can catch remote exception"""
try:
code = 0
result = run_func()
except Exception:
code = 1
result = traceback.format_exc()
return code, result
class MPICommEvaluator(Evaluator):
"""This evaluator uses the ``mpi4py`` library as backend.
This evaluator consider an already existing MPI-context (with running processes), therefore it has less overhead than ``MPIPoolEvaluator`` which spawn processes dynamically.
Args:
run_function (callable): functions to be executed by the ``Evaluator``.
num_workers (int, optional): Number of parallel Ray-workers used to compute the ``run_function``. Defaults to ``None`` which consider 1 rank as a worker (minus the master rank).
callbacks (list, optional): A list of callbacks to trigger custom actions at the creation or completion of jobs. Defaults to ``None``.
run_function_kwargs (dict, optional): Keyword-arguments to pass to the ``run_function``. Defaults to ``None``.
comm (optional): A MPI communicator, if ``None`` it will use ``MPI.COMM_WORLD``. Defaults to ``None``.
rank (int, optional): The rank of the master process. Defaults to ``0``.
abort_on_exit (bool): If ``True`` then it will call ``comm.Abort()`` to force all MPI processes to finish when closing the ``Evaluator`` (i.e., exiting the current ``with`` block).
"""
def __init__(
self,
run_function: Callable,
num_workers: int = None,
callbacks=None,
run_function_kwargs=None,
storage: Storage = None,
search_id: Hashable = None,
comm=None,
root=0,
abort_on_exit=False,
wait_on_exit=True,
cancel_jobs_on_exit=True,
):
super().__init__(
run_function=run_function,
num_workers=num_workers,
callbacks=callbacks,
run_function_kwargs=run_function_kwargs,
storage=storage,
search_id=search_id,
)
if not MPI.Is_initialized():
MPI.Init_thread()
self.comm = comm if comm else MPI.COMM_WORLD
self.root = root
self.abort_on_exit = abort_on_exit
self.wait_on_exit = wait_on_exit
self.cancel_jobs_on_exit = cancel_jobs_on_exit
self.num_workers = self.comm.Get_size() - 1 # 1 rank is the master
self.sem = asyncio.Semaphore(self.num_workers)
logging.info(f"Creating MPICommExecutor with {self.num_workers} max_workers...")
self.executor = MPICommExecutor(comm=self.comm, root=self.root)
self.master_executor = None
logging.info("Creation of MPICommExecutor done")
def __enter__(self):
self.master_executor = self.executor.__enter__()
if self.master_executor is not None:
return self
else:
return None
def __exit__(self, type, value, traceback):
if self.abort_on_exit:
self.comm.Abort(1)
else:
if (
self.master_executor
and hasattr(self.executor, "_executor")
and self.executor._executor is not None
):
self.executor._executor.shutdown(
wait=self.wait_on_exit, cancel_futures=self.cancel_jobs_on_exit
)
self.executor._executor = None
async def execute(self, job: Job) -> Job:
async with self.sem:
running_job = job.create_running_job(self._storage, self._stopper)
run_function = functools.partial(
job.run_function, running_job, **self.run_function_kwargs
)
code, sol = await self.loop.run_in_executor(
self.master_executor, catch_exception, run_function
)
# check if exception happened in worker
if code == 1:
if "SearchTerminationError" in sol:
pass
else:
format_msg = "\n\n/**** START OF REMOTE ERROR ****/\n\n"
format_msg += sol
format_msg += "\nException happening in remote rank was propagated to root process.\n"
format_msg += "\n/**** END OF REMOTE ERROR ****/\n"
raise RunFunctionError(format_msg)
job.set_output(sol)
return job
| 5,080 | 36.637037 | 188 | py |
deephyper | deephyper-master/deephyper/evaluator/_serial.py | import logging
from typing import Callable, Hashable
from deephyper.evaluator._evaluator import Evaluator
from deephyper.evaluator._job import Job
from deephyper.evaluator.storage import Storage
logger = logging.getLogger(__name__)
class SerialEvaluator(Evaluator):
"""This evaluator run evaluations one after the other (not parallel).
Args:
run_function (callable): functions to be executed by the ``Evaluator``.
num_workers (int, optional): Number of parallel Ray-workers used to compute the ``run_function``. Defaults to 1.
callbacks (list, optional): A list of callbacks to trigger custom actions at the creation or completion of jobs. Defaults to None.
run_function_kwargs (dict, optional): Static keyword arguments to pass to the ``run_function`` when executed.
storage (Storage, optional): Storage used by the evaluator. Defaults to ``MemoryStorage``.
search_id (Hashable, optional): The id of the search to use in the corresponding storage. If ``None`` it will create a new search identifier when initializing the search.
"""
def __init__(
self,
run_function: Callable,
num_workers: int = 1,
callbacks: list = None,
run_function_kwargs: dict = None,
storage: Storage = None,
search_id: Hashable = None,
):
super().__init__(
run_function=run_function,
num_workers=num_workers,
callbacks=callbacks,
run_function_kwargs=run_function_kwargs,
storage=storage,
search_id=search_id,
)
self.num_workers = num_workers
if hasattr(run_function, "__name__") and hasattr(run_function, "__module__"):
logger.info(
f"Serial Evaluator will execute {self.run_function.__name__}() from module {self.run_function.__module__}"
)
else:
logger.info(f"Serial Evaluator will execute {self.run_function}")
async def execute(self, job: Job) -> Job:
running_job = job.create_running_job(self._storage, self._stopper)
output = self.run_function(running_job, **self.run_function_kwargs)
job.set_output(output)
return job
| 2,238 | 36.316667 | 178 | py |
deephyper | deephyper-master/deephyper/evaluator/_evaluator.py | import asyncio
import copy
import csv
import functools
import importlib
import json
import logging
import os
import sys
import time
import warnings
from typing import Dict, List, Hashable
import numpy as np
from deephyper.evaluator._job import Job
from deephyper.skopt.optimizer import OBJECTIVE_VALUE_FAILURE
from deephyper.core.utils._timeout import terminate_on_timeout
from deephyper.evaluator.storage import Storage, MemoryStorage
EVALUATORS = {
"mpicomm": "_mpi_comm.MPICommEvaluator",
"process": "_process_pool.ProcessPoolEvaluator",
"ray": "_ray.RayEvaluator",
"serial": "_serial.SerialEvaluator",
"thread": "_thread_pool.ThreadPoolEvaluator",
}
def _test_ipython_interpretor() -> bool:
"""Test if the current Python interpretor is IPython or not.
Suggested by: https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook
"""
# names of shells/modules using jupyter
notebooks_shells = ["ZMQInteractiveShell"]
notebooks_modules = ["google.colab._shell"]
try:
shell_name = get_ipython().__class__.__name__ # type: ignore
shell_module = get_ipython().__class__.__module__ # type: ignore
if shell_name in notebooks_shells or shell_module in notebooks_modules:
return True # Jupyter notebook or qtconsole
elif shell_name == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
class Evaluator:
"""This ``Evaluator`` class asynchronously manages a series of Job objects to help execute given HPS or NAS tasks on various environments with differing system settings and properties.
Args:
run_function (callable): functions to be executed by the ``Evaluator``.
num_workers (int, optional): Number of parallel workers available for the ``Evaluator``. Defaults to 1.
callbacks (list, optional): A list of callbacks to trigger custom actions at the creation or completion of jobs. Defaults to None.
run_function_kwargs (dict, optional): Static keyword arguments to pass to the ``run_function`` when executed.
storage (Storage, optional): Storage used by the evaluator. Defaults to ``MemoryStorage``.
search_id (Hashable, optional): The id of the search to use in the corresponding storage. If ``None`` it will create a new search identifier when initializing the search.
"""
FAIL_RETURN_VALUE = OBJECTIVE_VALUE_FAILURE
NEST_ASYNCIO_PATCHED = False
PYTHON_EXE = os.environ.get("DEEPHYPER_PYTHON_BACKEND", sys.executable)
assert os.path.isfile(PYTHON_EXE)
def __init__(
self,
run_function,
num_workers: int = 1,
callbacks: list = None,
run_function_kwargs: dict = None,
storage: Storage = None,
search_id: Hashable = None,
):
self.run_function = run_function # User-defined run function.
self.run_function_kwargs = (
{} if run_function_kwargs is None else run_function_kwargs
)
# Number of parallel workers available
self.num_workers = num_workers
self.jobs = [] # Job objects currently submitted.
self._tasks_running = [] # List of AsyncIO Task objects currently running.
self._tasks_done = [] # Temp list to hold completed tasks from asyncio.
self._tasks_pending = [] # Temp list to hold pending tasks from asyncio.
self.jobs_done = [] # List used to store all jobs completed by the evaluator.
self.job_id_gathered = [] # List of jobs'id gathered by the evaluator.
self.timestamp = (
time.time()
) # Recorded time of when this evaluator interface was created.
self.loop = None # Event loop for asyncio.
self._start_dumping = False
self.num_objective = None # record if multi-objective are recorded
self._stopper = None # stopper object
self._callbacks = [] if callbacks is None else callbacks
self._lock = asyncio.Lock()
# manage timeout of the search
self._time_timeout_set = None
self._timeout = None
# storage mechanism
self._storage = MemoryStorage() if storage is None else storage
if not (self._storage.connected):
self._storage.connect()
if search_id is None:
self._search_id = self._storage.create_new_search()
else:
if search_id in self._storage.load_all_search_ids():
self._search_id = search_id
else:
raise ValueError(
f"The given search_id={search_id} does not exist in the linked storage."
)
# to avoid "RuntimeError: This event loop is already running"
if not (Evaluator.NEST_ASYNCIO_PATCHED) and _test_ipython_interpretor():
warnings.warn(
"Applying nest-asyncio patch for IPython Shell!", category=UserWarning
)
import deephyper.evaluator._nest_asyncio as nest_asyncio
nest_asyncio.apply()
Evaluator.NEST_ASYNCIO_PATCHED = True
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if hasattr(self, "executor"):
self.executor.__exit__(type, value, traceback)
def set_timeout(self, timeout):
"""Set a timeout for the Evaluator. It will create task with a "time budget" and will kill the the task if this budget
is exhausted."""
self._time_timeout_set = time.time()
self._timeout = timeout
def to_json(self):
"""Returns a json version of the evaluator."""
out = {"type": type(self).__name__, "num_workers": self.num_workers}
return out
@staticmethod
def create(run_function, method="serial", method_kwargs={}):
"""Create evaluator with a specific backend and configuration.
Args:
run_function (function): the function to execute in parallel.
method (str, optional): the backend to use in ``["serial", "thread", "process", "ray", "mpicomm"]``. Defaults to ``"serial"``.
method_kwargs (dict, optional): configuration dictionnary of the corresponding backend. Keys corresponds to the keyword arguments of the corresponding implementation. Defaults to "{}".
Raises:
ValueError: if the ``method is`` not acceptable.
Returns:
Evaluator: the ``Evaluator`` with the corresponding backend and configuration.
"""
logging.info(
f"Creating Evaluator({run_function}, method={method}, method_kwargs={method_kwargs}..."
)
if method not in EVALUATORS.keys():
val = ", ".join(EVALUATORS)
raise ValueError(
f'The method "{method}" is not a valid method for an Evaluator!'
f" Choose among the following evalutor types: "
f"{val}."
)
# create the evaluator
mod_name, attr_name = EVALUATORS[method].split(".")
mod = importlib.import_module(f"deephyper.evaluator.{mod_name}")
eval_cls = getattr(mod, attr_name)
evaluator = eval_cls(run_function, **method_kwargs)
logging.info("Creation done")
return evaluator
async def _get_at_least_n_tasks(self, n):
# If a user requests a batch size larger than the number of currently-running tasks, set n to the number of tasks running.
if n > len(self._tasks_running):
warnings.warn(
f"Requested a batch size ({n}) larger than currently running tasks ({len(self._tasks_running)}). Batch size has been set to the count of currently running tasks."
)
n = len(self._tasks_running)
# wait for all running tasks (sync.)
if n == len(self._tasks_running):
try:
self._tasks_done, self._tasks_pending = await asyncio.wait(
self._tasks_running, return_when="ALL_COMPLETED"
)
except ValueError:
raise ValueError("No jobs pending, call Evaluator.submit(jobs)!")
else:
while len(self._tasks_done) < n:
self._tasks_done, self._tasks_pending = await asyncio.wait(
self._tasks_running, return_when="FIRST_COMPLETED"
)
async def _run_jobs(self, configs):
for config in configs:
# Create a Job object from the input configuration
job_id = self._storage.create_new_job(self._search_id)
self._storage.store_job_in(job_id, args=(config,))
new_job = Job(job_id, config, self.run_function)
if self._timeout:
time_consumed = time.time() - self._time_timeout_set
time_left = self._timeout - time_consumed
logging.info(f"Submitting job with {time_left} sec. time budget")
new_job.run_function = functools.partial(
terminate_on_timeout, time_left, new_job.run_function
)
self.jobs.append(new_job)
self._on_launch(new_job)
task = self.loop.create_task(self._execute(new_job))
self._tasks_running.append(task)
def _on_launch(self, job):
"""Called after a job is started."""
job.status = job.RUNNING
job.output["metadata"]["timestamp_submit"] = time.time() - self.timestamp
# call callbacks
for cb in self._callbacks:
cb.on_launch(job)
def _on_done(self, job):
"""Called after a job has completed."""
job.status = job.DONE
job.output["metadata"]["timestamp_gather"] = time.time() - self.timestamp
if np.isscalar(job.objective):
if np.isreal(job.objective) and not (np.isfinite(job.objective)):
job.output["objective"] = Evaluator.FAIL_RETURN_VALUE
# store data in storage
self._storage.store_job_out(job.id, job.objective)
for k, v in job.metadata.items():
self._storage.store_job_metadata(job.id, k, v)
# call callbacks
for cb in self._callbacks:
cb.on_done(job)
async def _execute(self, job):
job = await self.execute(job)
if not (isinstance(job.output, dict)):
raise ValueError(
"The output of the job is not standard. Check if `job.set_output(output) was correctly used when defining the Evaluator class."
)
return job
async def execute(self, job) -> Job:
"""Execute the received job. To be implemented with a specific backend.
Args:
job (Job): the ``Job`` to be executed.
"""
raise NotImplementedError
def submit(self, configs: List[Dict]):
"""Send configurations to be evaluated by available workers.
Args:
configs (List[Dict]): A list of dict which will be passed to the run function to be executed.
"""
logging.info(f"submit {len(configs)} job(s) starts...")
if self.loop is None:
try:
# works if `timeout` is not set and code is running in main thread
self.loop = asyncio.get_event_loop()
except RuntimeError:
# required when `timeout` is set because code is not running in main thread
self.loop = asyncio.new_event_loop()
self.loop.run_until_complete(self._run_jobs(configs))
logging.info("submit done")
def gather(self, type, size=1):
"""Collect the completed tasks from the evaluator in batches of one or more.
Args:
type (str):
Options:
``"ALL"``
Block until all jobs submitted to the evaluator are completed.
``"BATCH"``
Specify a minimum batch size of jobs to collect from the evaluator. The method will block until at least ``size`` evaluations are completed.
size (int, optional): The minimum batch size that we want to collect from the evaluator. Defaults to 1.
Raises:
Exception: Raised when a gather operation other than "ALL" or "BATCH" is provided.
Returns:
List[Job]: A batch of completed jobs that is at minimum the given size.
"""
logging.info(f"gather({type}, size={size}) starts...")
assert type in ["ALL", "BATCH"], f"Unsupported gather operation: {type}."
local_results = []
if type == "ALL":
size = len(self._tasks_running) # Get all tasks.
self.loop.run_until_complete(self._get_at_least_n_tasks(size))
for task in self._tasks_done:
job = task.result()
self._on_done(job)
local_results.append(job)
self.jobs_done.append(job)
self._tasks_running.remove(task)
self.job_id_gathered.append(job.id)
self._tasks_done = []
self._tasks_pending = []
# access storage to return results from other processes
job_id_all = self._storage.load_all_job_ids(self._search_id)
job_id_not_gathered = np.setdiff1d(job_id_all, self.job_id_gathered).tolist()
other_results = []
if len(job_id_not_gathered) > 0:
jobs_data = self._storage.load_jobs(job_id_not_gathered)
for job_id in job_id_not_gathered:
job_data = jobs_data[job_id]
if job_data and job_data["out"]:
job = Job(
id=job_id, config=job_data["in"]["args"][0], run_function=None
)
job.status = Job.DONE
job.output["metadata"].update(job_data["metadata"])
job.output["objective"] = job_data["out"]
self.job_id_gathered.append(job_id)
self.jobs_done.append(job)
other_results.append(job)
for cb in self._callbacks:
cb.on_done_other(job)
if len(other_results) == 0:
logging.info(f"gather done - {len(local_results)} job(s)")
return local_results
else:
logging.info(
f"gather done - {len(local_results)} local(s) and {len(other_results)} other(s) job(s), "
)
return local_results, other_results
def decode(self, key):
"""Decode the key following a JSON format to return a dict."""
x = json.loads(key)
if not isinstance(x, dict):
raise ValueError(f"Expected dict, but got {type(x)}")
return x
def convert_for_csv(self, val):
"""Convert an input value to an accepted format to be saved as a value of a CSV file (e.g., a list becomes it's str representation).
Args:
val (Any): The input value to convert.
Returns:
Any: The converted value.
"""
if type(val) is list:
return str(val)
else:
return val
def dump_evals(self, saved_keys=None, log_dir: str = ".", filename="results.csv"):
"""Dump evaluations to a CSV file.
Args:
saved_keys (list|callable): If ``None`` the whole ``job.config`` will be added as row of the CSV file. If a ``list`` filtered keys will be added as a row of the CSV file. If a ``callable`` the output dictionnary will be added as a row of the CSV file.
log_dir (str): directory where to dump the CSV file.
filename (str): name of the file where to write the data.
"""
logging.info("dump_evals starts...")
resultsList = []
for job in self.jobs_done:
if saved_keys is None:
result = copy.deepcopy(job.config)
elif type(saved_keys) is list:
decoded_key = copy.deepcopy(job.config)
result = {k: self.convert_for_csv(decoded_key[k]) for k in saved_keys}
elif callable(saved_keys):
result = copy.deepcopy(saved_keys(job))
# add prefix for all keys found in "config"
result = {f"p:{k}": v for k, v in result.items()}
# when the returned value of the run-function is a dict we flatten it to add in csv
result["objective"] = job.objective
# when the objective is a tuple (multi-objective) we create 1 column per tuple-element
if isinstance(result["objective"], tuple):
obj = result.pop("objective")
if self.num_objective is None:
self.num_objective = len(obj)
for i, objval in enumerate(obj):
result[f"objective_{i}"] = objval
else:
if self.num_objective is None:
self.num_objective = 1
if self.num_objective > 1:
obj = result.pop("objective")
for i in range(self.num_objective):
result[f"objective_{i}"] = obj
# job id and rank
result["job_id"] = int(job.id.split(".")[1])
if isinstance(job.rank, int):
result["rank"] = job.rank
# Profiling and other
# methdata keys starting with "_" are not saved (considered as internal)
metadata = {f"m:{k}": v for k, v in job.metadata.items() if k[0] != "_"}
result.update(metadata)
if hasattr(job, "dequed"):
result["m:dequed"] = ",".join(job.dequed)
resultsList.append(result)
self.jobs_done = []
if len(resultsList) != 0:
mode = "a" if self._start_dumping else "w"
with open(os.path.join(log_dir, filename), mode) as fp:
columns = resultsList[0].keys()
writer = csv.DictWriter(fp, columns)
if not (self._start_dumping):
writer.writeheader()
self._start_dumping = True
writer.writerows(resultsList)
logging.info("dump_evals done")
| 18,360 | 38.401288 | 263 | py |
deephyper | deephyper-master/deephyper/evaluator/_mochi_process_pool.py | import logging
import asyncio
import functools
import collections
import pymargo
import pymargo.core
from concurrent.futures import ProcessPoolExecutor
from deephyper.evaluator._evaluator import Evaluator
import mpi4py
# !To avoid initializing MPI when module is imported (MPI is optional)
mpi4py.rc.initialize = False
mpi4py.rc.finalize = True
from mpi4py import MPI # noqa: E402
logger = logging.getLogger(__name__)
def margo_client(protocol, target_address, func, *args, **kwargs):
with pymargo.core.Engine(protocol, mode=pymargo.client) as engine:
execute_function = engine.register("execute_function")
address = engine.lookup(target_address)
response = execute_function.on(address)(func, *args, **kwargs)
return response
def execute_function(handle: pymargo.core.Handle, func, *args, **kwargs):
res = func(*args, **kwargs)
handle.respond(res)
def margo_server(comm, protocol):
with pymargo.core.Engine(protocol, mode=pymargo.server) as engine:
comm.send(engine.address) # !temporary
engine.register("execute_function", execute_function)
engine.enable_remote_shutdown()
engine.wait_for_finalize()
class MochiEvaluator(Evaluator):
"""This evaluator uses the ``ProcessPoolExecutor`` as backend.
Args:
run_function (callable): functions to be executed by the ``Evaluator``.
num_workers (int, optional): Number of parallel processes used to compute the ``run_function``. Defaults to 1.
callbacks (list, optional): A list of callbacks to trigger custom actions at the creation or completion of jobs. Defaults to None.
"""
def __init__(
self,
run_function,
num_workers: int = 1,
callbacks: list = None,
run_function_kwargs: dict = None,
protocol="tcp",
):
super().__init__(run_function, num_workers, callbacks, run_function_kwargs)
self._protocol = protocol
# !use of MPI is temporary to initialise addresses
if not MPI.Is_initialized():
MPI.Init_thread()
self._comm = MPI.COMM_WORLD
self._rank = self._comm.Get_rank()
self._size = self._comm.Get_size()
self.executor = None
if self._rank == 0: # master
self.sem = asyncio.Semaphore(num_workers)
# !creating the exector once here is crutial to avoid repetitive overheads
self.executor = ProcessPoolExecutor(max_workers=num_workers)
if hasattr(run_function, "__name__") and hasattr(
run_function, "__module__"
):
logger.info(
f"Mochi Evaluator will execute {self.run_function.__name__}() from module {self.run_function.__module__}"
)
else:
logger.info(f"Mochi Evaluator will execute {self.run_function}")
# queue of worker addresses
self._worker_addresses = []
for i in range(1, self._size):
address = self._comm.recv(source=i)
self._worker_addresses.append(address)
self._qworker_addresses = collections.deque(self._worker_addresses)
else: # workers
margo_server(self._comm, self._protocol)
def __enter__(self):
if self.executor:
self.executor = self.executor.__enter__()
return self
else:
return None
def __exit__(self, type, value, traceback):
if self.executor:
self.executor.__exit__(type, value, traceback)
# shutdown pymargo servers
with pymargo.core.Engine(self._protocol, mode=pymargo.client) as engine:
for target_address in self._worker_addresses:
address = engine.lookup(target_address)
address.shutdown()
async def execute(self, job):
async with self.sem:
target_address = self._qworker_addresses.popleft()
running_job = job.create_running_job(self._storage, self._stopper)
run_function = functools.partial(
margo_client,
self._protocol,
target_address,
running_job,
**self.run_function_kwargs,
)
sol = await self.loop.run_in_executor(self.executor, run_function)
job.result = sol
self._qworker_addresses.append(target_address)
return job
| 4,490 | 31.309353 | 138 | py |
deephyper | deephyper-master/deephyper/evaluator/_queued.py | import collections
def queued(evaluator_class):
"""Decorator transforming an Evaluator into a ``Queued{Evaluator}``. The ``run_function`` used with a ``Queued{Evaluator}`` needs to have a ``dequed`` keyword-argument where the dequed resources from the queue will be passed.
Args:
queue (list): A list of queued resources.
queue_pop_per_task (int, optional): The number of resources popped out of the queue each time a task is submitted. Defaults to ``1``.
"""
def __init__(
self, *args, queue: list = None, queue_pop_per_task: int = 1, **kwargs
):
evaluator_class.__init__(self, *args, **kwargs)
self.queue = collections.deque(queue[:])
self.queue_pop_per_task = queue_pop_per_task
async def execute(self, job):
dequed = [self.queue.popleft() for _ in range(self.queue_pop_per_task)]
self.run_function_kwargs["dequed"] = dequed
job = await evaluator_class.execute(self, job)
setattr(job, "dequed", dequed)
self.queue.extend(dequed)
return job
cls_attrs = {"__init__": __init__, "execute": execute}
queued_evaluator_class = type(
f"Queued{evaluator_class.__name__}", (evaluator_class,), cls_attrs
)
return queued_evaluator_class
| 1,285 | 31.974359 | 229 | py |
deephyper | deephyper-master/deephyper/evaluator/_distributed.py | import logging
import time
import pickle
from typing import List, Tuple
from deephyper.evaluator import Job
import mpi4py
# !To avoid initializing MPI when module is imported (MPI is optional)
mpi4py.rc.initialize = False
mpi4py.rc.finalize = True
from mpi4py import MPI # noqa: E402
TAG_INIT = 20
TAG_DATA = 30
def distributed(backend: str):
"""Decorator transforming an Evaluator into a ``Distributed{Evaluator}``.
For the decorator:
Args:
backend (str): Use ``"mpi"`` for pure MPI backend. Use ``"s4m"`` for Share4Me backend.
For the returned evaluator:
Args:
comm: An MPI communicator. Defaults to ``None`` for ``MPI.COMM_WORLD``.
share_freq (int): The frequency at which data should be shared between ranks of the distributed evaluator.
"""
def wrapper(evaluator_class):
if not (backend in ["mpi", "s4m"]):
raise ValueError(f"Unknown backend={backend} for distributed Evaluator!")
logging.info(
f"Creating Distributed{evaluator_class.__name__} with backend='{backend}'."
)
if backend == "mpi":
def __init__(self, *args, comm=None, share_freq=1, **kwargs):
evaluator_class.__init__(self, *args, **kwargs)
if not MPI.Is_initialized():
MPI.Init_thread()
self.comm = comm if comm else MPI.COMM_WORLD
# number of local jobs to evaluate before sharing with other ranks
self.share_freq = share_freq
# number of local jobs done since last sharing with other ranks
self.num_local_done = 0
self.size = self.comm.Get_size()
self.rank = self.comm.Get_rank()
self.num_total_workers = self.num_workers * self.size
elif backend == "s4m":
def __init__(self, *args, comm=None, share_freq=1, **kwargs):
evaluator_class.__init__(self, *args, **kwargs)
if not MPI.Is_initialized():
MPI.Init_thread()
self.comm = comm if comm else MPI.COMM_WORLD
# number of local jobs to evaluate before sharing with other ranks
self.share_freq = share_freq
# number of local jobs done since last sharing with other ranks
self.num_local_done = 0
self.size = self.comm.Get_size()
self.rank = self.comm.Get_rank()
self.num_total_workers = self.num_workers * self.size
# The constructor is going to do some collective communication
# across processes of the provided MPI communicator, so make
# sure this call is done by all the processes at the same time.
logging.info("Starting S4M service...")
self._s4m_service = s4m.S4MService(self.comm, "verbs://")
logging.info("S4M service running!")
# Wait for all s4m services to be started
logging.info("MPI Barrier...")
self.comm.Barrier()
logging.info("MPI Barrier done!")
def _on_launch(self, job):
"""Called after a job is started."""
job.rank = self.rank
evaluator_class._on_launch(self, job)
def _on_done(self, job):
"""Called after a job has completed."""
evaluator_class._on_done(self, job)
job.run_function = None
self.num_local_done += 1
def allgather(self, jobs: List[Job]) -> List[Job]:
logging.info("Broadcasting to all...")
t1 = time.time()
all_data = self.comm.allgather(jobs)
received_jobs = []
for i, chunk in enumerate(all_data):
if i != self.rank:
received_jobs.extend(chunk)
n_received = len(received_jobs)
self.jobs_done.extend(received_jobs)
logging.info(
f"Broadcast received {n_received} configurations in {time.time() - t1:.4f} sec."
)
return received_jobs
if backend == "mpi":
def broadcast(self, jobs: List[Job]):
logging.info("Broadcasting jobs to all...")
t1 = time.time()
data = MPI.pickle.dumps(jobs)
req_send = [
self.comm.Isend(data, dest=i, tag=TAG_DATA)
for i in range(self.size)
if i != self.rank
]
MPI.Request.waitall(req_send)
logging.info(f"Broadcasting to all done in {time.time() - t1:.4f} sec.")
def receive(self) -> List[Job]:
logging.info("Receiving jobs from any...")
t1 = time.time()
received_any = self.size > 1
received_jobs = []
while received_any:
received_any = False
req_recv = [
self.comm.irecv(source=i, tag=TAG_DATA)
for i in range(self.size)
if i != self.rank
]
# asynchronous
for i, req in enumerate(req_recv):
try:
done, jobs = req.test()
if done:
received_any = True
received_jobs.extend(jobs)
else:
req.cancel()
except pickle.UnpicklingError:
logging.error(f"UnpicklingError for request {i}")
self.jobs_done.extend(received_jobs)
logging.info(
f"Received {len(received_jobs)} configurations in {time.time() - t1:.4f} sec."
)
return received_jobs
elif backend == "s4m":
import s4m
def broadcast(self, jobs: List[Job]):
logging.info("Broadcasting jobs to all...")
t1 = time.time()
data = MPI.pickle.dumps(jobs)
self._s4m_service.broadcast(data)
logging.info(f"Sending to all done in {time.time() - t1:.4f} sec.")
def receive(self):
logging.info("Receiving jobs from any...")
t1 = time.time()
# The receive function is non-blocking and will check
# for available data sent by other processes. If data
# is available, the function will return a pair (source, data)
# where source is the rank that sent the data, and data is a
# bytes object. If no data is available, the function will
# return None.
received_any = True
received_jobs = []
while received_any:
data = self._s4m_service.receive()
if data is None:
received_any = False
else:
source_rank, data = data
try:
jobs = MPI.pickle.loads(data)
except pickle.UnpicklingError:
logging.error(
f"UnpicklingError for request source {source_rank}"
)
continue
received_jobs.extend(jobs)
self.jobs_done.extend(received_jobs)
logging.info(
f"Received {len(received_jobs)} configurations in {time.time() - t1:.4f} sec."
)
return received_jobs
def share(
self, jobs: List[Job], sync_communication=False
) -> Tuple[List[Job], List[Job]]:
if self.num_local_done % self.share_freq == 0:
if sync_communication:
other_jobs = self.allgather(jobs)
else:
self.broadcast(jobs)
other_jobs = self.receive()
return jobs, other_jobs
def gather(self, *args, sync_communication=False, **kwargs):
jobs = evaluator_class.gather(self, *args, **kwargs)
jobs, other_jobs = self.share(jobs, sync_communication)
return jobs, other_jobs
def dump_evals(self, *args, **kwargs):
if self.rank == 0:
evaluator_class.dump_evals(self, *args, **kwargs)
cls_attrs = {
"__init__": __init__,
"_on_launch": _on_launch,
"_on_done": _on_done,
"allgather": allgather,
"broadcast": broadcast,
"receive": receive,
"share": share,
"gather": gather,
"dump_evals": dump_evals,
}
distributed_evaluator_class = type(
f"Distributed{evaluator_class.__name__}", (evaluator_class,), cls_attrs
)
return distributed_evaluator_class
return wrapper
| 9,193 | 35.054902 | 114 | py |
deephyper | deephyper-master/deephyper/evaluator/_run_function_utils.py | from typing import Union
from numbers import Number
import numpy as np
def standardize_run_function_output(
output: Union[str, float, tuple, list, dict]
) -> dict:
"""Transform the output of the run-function to its standard form.
Possible return values of the run-function are:
>>> 0
>>> 0, 0
>>> "F_something"
>>> {"objective": 0 }
>>> {"objective": (0, 0), "metadata": {...}}
Args:
output (_type_): _description_
Returns:
dict: standardized output of the function.
"""
# output returned a single objective value
if np.isscalar(output):
if isinstance(output, str):
output = {"objective": output}
elif isinstance(output, Number):
output = {"objective": float(output)}
else:
raise TypeError(
f"The output of the run-function cannot be of type {type(output)} it should be either a string or a number."
)
# output only returned objective values as tuple or list
elif isinstance(output, (tuple, list)):
output = {"objective": output}
elif isinstance(output, dict):
pass
else:
raise TypeError(
f"The output of the run-function cannot be of type {type(output)}"
)
output["metadata"] = output.get("metadata", dict())
# check if multiple observations returned
objective = np.asarray(output["objective"])
if objective.ndim == 2:
output["objective"] = objective[1, -1].tolist()
output["observations"] = objective.tolist()
return output
| 1,592 | 26 | 124 | py |
deephyper | deephyper-master/deephyper/evaluator/_ray.py | import logging
import ray
from typing import Callable, Hashable
from deephyper.evaluator._evaluator import Evaluator
from deephyper.evaluator._job import Job
from deephyper.evaluator.storage import Storage
ray_initializer = None
logger = logging.getLogger(__name__)
class RayEvaluator(Evaluator):
"""This evaluator uses the ``ray`` library as backend.
Args:
run_function (callable): functions to be executed by the ``Evaluator``.
callbacks (list, optional): A list of callbacks to trigger custom actions at the creation or completion of jobs. Defaults to None.
run_function_kwargs (dict, optional): Static keyword arguments to pass to the ``run_function`` when executed.
storage (Storage, optional): Storage used by the evaluator. Defaults to ``MemoryStorage``.
search_id (Hashable, optional): The id of the search to use in the corresponding storage. If ``None`` it will create a new search identifier when initializing the search.
address (str, optional): address of the Ray-head. Defaults to None, if no Ray-head was started.
password (str, optional): password to connect ot the Ray-head. Defaults to None, if the default Ray-password is used.
num_cpus (int, optional): number of CPUs available in the Ray-cluster. Defaults to None, if the Ray-cluster was already started it will be automatically computed.
num_gpus (int, optional): number of GPUs available in the Ray-cluster. Defaults to None, if the Ray-cluster was already started it will be automatically computed.
num_cpus_per_task (float, optional): number of CPUs used per remote task. Defaults to 1.
num_gpus_per_task (float, optional): number of GPUs used per remote task. Defaults to None.
ray_kwargs (dict, optional): other ray keyword arguments passed to ``ray.init(...)``. Defaults to {}.
num_workers (int, optional): number of workers available to compute remote-tasks in parallel. Defaults to ``None``, or if it is ``-1`` it is automatically computed based with ``num_workers = int(num_cpus // num_cpus_per_task)``.
"""
def __init__(
self,
run_function: Callable,
callbacks: list = None,
run_function_kwargs: dict = None,
storage: Storage = None,
search_id: Hashable = None,
address: str = None,
password: str = None,
num_cpus: int = None,
num_gpus: int = None,
include_dashboard: bool = False,
num_cpus_per_task: float = 1,
num_gpus_per_task: float = None,
ray_kwargs: dict = None,
num_workers: int = None,
):
super().__init__(
run_function=run_function,
num_workers=num_workers,
callbacks=callbacks,
run_function_kwargs=run_function_kwargs,
storage=storage,
search_id=search_id,
)
# get the __init__ parameters
self._init_params = locals()
ray_kwargs = {} if ray_kwargs is None else ray_kwargs
if address is not None:
ray_kwargs["address"] = address
if password is not None:
ray_kwargs["_redis_password"] = password
if num_cpus is not None:
ray_kwargs["num_cpus"] = num_cpus
if num_gpus is not None:
ray_kwargs["num_gpus"] = num_gpus
if include_dashboard is not None:
ray_kwargs["include_dashboard"] = include_dashboard
if not (ray.is_initialized()):
ray.init(**ray_kwargs)
self.num_cpus_per_task = num_cpus_per_task
self.num_gpus_per_task = num_gpus_per_task
self.num_cpus = int(
sum([node["Resources"].get("CPU", 0) for node in ray.nodes()])
)
self.num_gpus = int(
sum([node["Resources"].get("GPU", 0) for node in ray.nodes()])
)
if self.num_workers is None or self.num_workers == -1:
self.num_workers = int(self.num_cpus // self.num_cpus_per_task)
if hasattr(run_function, "__name__") and hasattr(run_function, "__module__"):
logger.info(
f"Ray Evaluator will execute {self.run_function.__name__}() from module {self.run_function.__module__}"
)
else:
logger.info(f"Ray Evaluator will execute {self.run_function}")
self._remote_run_function = ray.remote(
num_cpus=self.num_cpus_per_task,
num_gpus=self.num_gpus_per_task,
# max_calls=1,
)(self.run_function)
async def execute(self, job: Job) -> Job:
running_job = job.create_running_job(self._storage, self._stopper)
output = await self._remote_run_function.remote(
running_job, **self.run_function_kwargs
)
job.set_output(output)
return job
| 4,826 | 42.098214 | 236 | py |
deephyper | deephyper-master/deephyper/evaluator/_job.py | import copy
from collections.abc import MutableMapping
from typing import Hashable
from deephyper.evaluator.storage import Storage, MemoryStorage
from deephyper.evaluator._run_function_utils import standardize_run_function_output
from deephyper.stopper._stopper import Stopper
class Job:
"""Represents an evaluation executed by the ``Evaluator`` class.
Args:
id (Any): unique identifier of the job. Usually an integer.
config (dict): argument dictionnary of the ``run_function``.
run_function (callable): function executed by the ``Evaluator``
"""
# Job status states.
READY = 0
RUNNING = 1
DONE = 2
def __init__(self, id, config: dict, run_function):
self.id = id
self.rank = None
self.config = copy.deepcopy(config)
self.run_function = run_function
self.status = self.READY
self.output = {
"objective": None,
"metadata": {"timestamp_submit": None, "timestamp_gather": None},
}
self.observations = None
def __repr__(self) -> str:
if self.rank is not None:
return f"Job(id={self.id}, rank={self.rank}, status={self.status}, config={self.config})"
else:
return f"Job(id={self.id}, status={self.status}, config={self.config})"
def __getitem__(self, index):
cfg = copy.deepcopy(self.config)
return (cfg, self.objective)[index]
@property
def result(self):
return self.objective
@property
def objective(self):
"""Objective returned by the run-function."""
return self.output["objective"]
@property
def metadata(self):
"""Metadata of the job stored in the output of run-function."""
return self.output["metadata"]
def set_output(self, output):
output = standardize_run_function_output(output)
self.output["objective"] = output["objective"]
self.output["metadata"].update(output["metadata"])
self.observations = output.get("observations", None)
def create_running_job(self, storage, stopper):
stopper = copy.deepcopy(stopper)
rjob = RunningJob(self.id, self.config, storage, stopper)
if stopper is not None and hasattr(stopper, "job"):
stopper.job = rjob
return rjob
class RunningJob(MutableMapping):
def __init__(
self,
id: Hashable = None,
parameters: dict = None,
storage: Storage = None,
stopper: Stopper = None,
) -> None:
self.id = id
self.parameters = parameters
if storage is None:
self.storage = MemoryStorage()
search_id = self.storage.create_new_search()
self.id = self.storage.create_new_job(search_id)
else:
self.storage = storage
self.stopper = stopper
self.obs = None
# @property
# def config(self):
# return self.parameters
def __getitem__(self, key):
if key == "job_id":
return int(self.id.split(".")[-1])
return self.parameters[key]
def __setitem__(self, key, value):
if key == "job_id":
raise KeyError("Cannot change the 'job_id' of a running job.")
self.parameters[key] = value
def __delitem__(self, key):
del self.parameters[key]
def __iter__(self):
return iter(self.parameters)
def __len__(self):
return len(self.parameters)
# def __getitem__(self, k):
# """This method is present to simulate the behaviour of a dict. It is used in the ``run_function`` of the ``Evaluator`` class."""
# if k == "job_id":
# return int(self.id.split(".")[-1])
# return self.parameters[k]
# def get(self, k, default=None):
# """This method is present to simulate the behaviour of a dict. It is used in the ``run_function`` of the ``Evaluator`` class."""
# return self.parameters.get(k, default)
# def __contains__(self, k):
# """This method is present to simulate the behaviour of a dict. It is used in the ``run_function`` of the ``Evaluator`` class."""
# return k in self.parameters
# def keys(self):
# """This method is present to simulate the behaviour of a dict. It is used in the ``run_function`` of the ``Evaluator`` class."""
# return self.parameters.keys()
# def values(self):
# """This method is present to simulate the behaviour of a dict. It is used in the ``run_function`` of the ``Evaluator`` class."""
# return self.parameters.values()
# def items(self):
# """This method is present to simulate the behaviour of a dict. It is used in the ``run_function`` of the ``Evaluator`` class."""
# return self.parameters.items()
def record(self, budget: float, objective: float):
if self.stopper:
self.stopper.observe(budget, objective)
else:
self.obs = objective
def stopped(self):
if self.stopper:
return self.stopper.stop()
else:
return False
@property
def objective(self):
"""If the RunningJob is using a Stopper then it will return observations from the it. Otherwise it will simply return the last objective value recorded."""
if self.stopper:
return self.stopper.objective
else:
return self.obs
| 5,441 | 31.011765 | 163 | py |
deephyper | deephyper-master/deephyper/evaluator/_thread_pool.py | import asyncio
import functools
import logging
from concurrent.futures import ThreadPoolExecutor
from typing import Callable, Hashable
from deephyper.evaluator._evaluator import Evaluator
from deephyper.evaluator._job import Job
from deephyper.evaluator.storage import Storage
logger = logging.getLogger(__name__)
class ThreadPoolEvaluator(Evaluator):
"""This evaluator uses the ``ThreadPoolExecutor`` as backend.
.. warning:: This evaluator is interesting with I/O intensive tasks, do not expect a speed-up with compute intensive tasks.
Args:
run_function (callable): functions to be executed by the ``Evaluator``.
num_workers (int, optional): Number of concurrent threads used to compute the ``run_function``. Defaults to 1.
callbacks (list, optional): A list of callbacks to trigger custom actions at the creation or completion of jobs. Defaults to None.
run_function_kwargs (dict, optional): Static keyword arguments to pass to the ``run_function`` when executed.
storage (Storage, optional): Storage used by the evaluator. Defaults to ``MemoryStorage``.
search_id (Hashable, optional): The id of the search to use in the corresponding storage. If ``None`` it will create a new search identifier when initializing the search.
"""
def __init__(
self,
run_function: Callable,
num_workers: int = 1,
callbacks: list = None,
run_function_kwargs: dict = None,
storage: Storage = None,
search_id: Hashable = None,
):
super().__init__(
run_function=run_function,
num_workers=num_workers,
callbacks=callbacks,
run_function_kwargs=run_function_kwargs,
storage=storage,
search_id=search_id,
)
self.sem = asyncio.Semaphore(num_workers)
self.executor = ThreadPoolExecutor(max_workers=num_workers)
if hasattr(run_function, "__name__") and hasattr(run_function, "__module__"):
logger.info(
f"ThreadPool Evaluator will execute {self.run_function.__name__}() from module {self.run_function.__module__}"
)
else:
logger.info(f"Thread Evaluator will execute {self.run_function}")
async def execute(self, job: Job) -> Job:
async with self.sem:
running_job = job.create_running_job(self._storage, self._stopper)
run_function = functools.partial(
job.run_function, running_job, **self.run_function_kwargs
)
output = await self.loop.run_in_executor(self.executor, run_function)
job.set_output(output)
return job
| 2,702 | 38.173913 | 178 | py |
deephyper | deephyper-master/deephyper/evaluator/_decorator.py | import time
from functools import wraps
# !info [why is it important to use "wraps"]
# !http://gael-varoquaux.info/programming/decoration-in-python-done-right-decorating-and-pickling.html
from deephyper.evaluator._run_function_utils import standardize_run_function_output
def profile(run_function):
"""Decorator to use on a ``run_function`` to profile its execution-time. It is to be used such as:
.. code-block::
@profile
def run(config):
...
return y
Args:
run_function (function): the function to decorate.
Returns:
function: a decorated function.
"""
@wraps(run_function)
def wrapper(job, *args, **kwargs):
timestamp_start = time.time()
output = run_function(job, *args, **kwargs)
timestamp_end = time.time()
output = standardize_run_function_output(output)
metadata = {"timestamp_start": timestamp_start, "timestamp_end": timestamp_end}
metadata.update(output["metadata"])
output["metadata"] = metadata
return output
return wrapper
| 1,101 | 25.878049 | 102 | py |
deephyper | deephyper-master/deephyper/evaluator/__init__.py | """
This evaluator sub-package provides a common interface to execute isolated tasks with different parallel backends and system properties. This interface is used by search algorithm to perform black-box optimization (the black-box being represented by the ``run``-function).
An ``Evaluator``, when instanciated, is bound to a ``run``-function which takes as first argument a dictionnary and optionally has other keyword-arguments. The ``run``-function has to return a Python serializable value (under ``pickle`` protocol). In it's most basic form the return value is a ``float``.
An example ``run``-function is:
.. code-block:: python
def run(config: dict) -> float:
y = config["x"]**2
return y
The return value of the ``run``-function respect the following standards (but the feature is not necessarily supported by all search algorithms, such as multi-objective optimization):
.. code-block:: python
# float for single objective optimization
return 42.0
# str with "F" prefix for failed evaluation
return "F_out_of_memory"
# dict
return {"objective": 42.0}
# dict with additional information
return {"objective": 42.0, "metadata": {"num_epochs_trained": 25, "num_parameters": 420000}}
# dict with reserved keywords (when @profile decorator is used)
return {"objective": 42.0, "metadata": {"timestamp_start": ..., "timestamp_end": ...}"
# tuple of float for multi-objective optimization (will appear as "objective_0" and "objective_1" in the resulting dataframe)
return 42.0, 0.42
"""
from deephyper.evaluator._evaluator import EVALUATORS, Evaluator
from deephyper.evaluator._job import Job, RunningJob
from deephyper.evaluator._process_pool import ProcessPoolEvaluator
from deephyper.evaluator._serial import SerialEvaluator
from deephyper.evaluator._thread_pool import ThreadPoolEvaluator
from deephyper.evaluator._queued import queued
from deephyper.evaluator._decorator import profile
from deephyper.evaluator._encoder import to_json, parse_subprocess_result
__all__ = [
"Evaluator",
"EVALUATORS",
"Job",
"RunningJob",
"ProcessPoolEvaluator",
"profile",
"queued",
"SerialEvaluator",
"ThreadPoolEvaluator",
"to_json",
"parse_subprocess_result",
]
try:
from deephyper.evaluator._ray import RayEvaluator # noqa: F401
__all__.append("RayEvaluator")
except ImportError:
pass
try:
from deephyper.evaluator._distributed import distributed # noqa: F401
from deephyper.evaluator._mpi_comm import MPICommEvaluator # noqa: F401
__all__.append("MPICommEvaluator")
__all__.append("distributed")
except ImportError:
pass
| 2,676 | 36.180556 | 304 | py |
deephyper | deephyper-master/deephyper/evaluator/_nest_asyncio.py | """From https://github.com/erdewit/nest_asyncio"""
import asyncio
import asyncio.events as events
import os
import sys
import threading
from contextlib import contextmanager, suppress
from heapq import heappop
def apply(loop=None):
"""Patch asyncio to make its event loop reentrant."""
_patch_asyncio()
_patch_task()
_patch_tornado()
loop = loop or asyncio.get_event_loop()
_patch_loop(loop)
def _patch_asyncio():
"""
Patch asyncio module to use pure Python tasks and futures,
use module level _current_tasks, all_tasks and patch run method.
"""
def run(main, *, debug=False):
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
_patch_loop(loop)
loop.set_debug(debug)
task = asyncio.ensure_future(main)
try:
return loop.run_until_complete(task)
finally:
if not task.done():
task.cancel()
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
def _get_event_loop(stacklevel=3):
loop = events._get_running_loop()
if loop is None:
loop = events.get_event_loop_policy().get_event_loop()
return loop
if hasattr(asyncio, "_nest_patched"):
return
if sys.version_info >= (3, 6, 0):
asyncio.Task = asyncio.tasks._CTask = asyncio.tasks.Task = asyncio.tasks._PyTask
asyncio.Future = (
asyncio.futures._CFuture
) = asyncio.futures.Future = asyncio.futures._PyFuture
if sys.version_info < (3, 7, 0):
asyncio.tasks._current_tasks = asyncio.tasks.Task._current_tasks
asyncio.all_tasks = asyncio.tasks.Task.all_tasks
if sys.version_info >= (3, 9, 0):
events._get_event_loop = (
events.get_event_loop
) = asyncio.get_event_loop = _get_event_loop
_get_event_loop
asyncio.run = run
asyncio._nest_patched = True
def _patch_loop(loop):
"""Patch loop to make it reentrant."""
def run_forever(self):
with manage_run(self), manage_asyncgens(self):
while True:
self._run_once()
if self._stopping:
break
self._stopping = False
def run_until_complete(self, future):
with manage_run(self):
f = asyncio.ensure_future(future, loop=self)
if f is not future:
f._log_destroy_pending = False
while not f.done():
self._run_once()
if self._stopping:
break
if not f.done():
raise RuntimeError("Event loop stopped before Future completed.")
return f.result()
def _run_once(self):
"""
Simplified re-implementation of asyncio's _run_once that
runs handles as they become ready.
"""
ready = self._ready
scheduled = self._scheduled
while scheduled and scheduled[0]._cancelled:
heappop(scheduled)
timeout = (
0
if ready or self._stopping
else min(max(scheduled[0]._when - self.time(), 0), 86400)
if scheduled
else None
)
event_list = self._selector.select(timeout)
self._process_events(event_list)
end_time = self.time() + self._clock_resolution
while scheduled and scheduled[0]._when < end_time:
handle = heappop(scheduled)
ready.append(handle)
for _ in range(len(ready)):
if not ready:
break
handle = ready.popleft()
if not handle._cancelled:
handle._run()
handle = None
@contextmanager
def manage_run(self):
"""Set up the loop for running."""
self._check_closed()
old_thread_id = self._thread_id
old_running_loop = events._get_running_loop()
try:
self._thread_id = threading.get_ident()
events._set_running_loop(self)
self._num_runs_pending += 1
if self._is_proactorloop:
if self._self_reading_future is None:
self.call_soon(self._loop_self_reading)
yield
finally:
self._thread_id = old_thread_id
events._set_running_loop(old_running_loop)
self._num_runs_pending -= 1
if self._is_proactorloop:
if (
self._num_runs_pending == 0
and self._self_reading_future is not None
):
ov = self._self_reading_future._ov
self._self_reading_future.cancel()
if ov is not None:
self._proactor._unregister(ov)
self._self_reading_future = None
@contextmanager
def manage_asyncgens(self):
if not hasattr(sys, "get_asyncgen_hooks"):
# Python version is too old.
return
old_agen_hooks = sys.get_asyncgen_hooks()
try:
self._set_coroutine_origin_tracking(self._debug)
if self._asyncgens is not None:
sys.set_asyncgen_hooks(
firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook,
)
yield
finally:
self._set_coroutine_origin_tracking(False)
if self._asyncgens is not None:
sys.set_asyncgen_hooks(*old_agen_hooks)
def _check_running(self):
"""Do not throw exception if loop is already running."""
pass
if hasattr(loop, "_nest_patched"):
return
if not isinstance(loop, asyncio.BaseEventLoop):
raise ValueError("Can't patch loop of type %s" % type(loop))
cls = loop.__class__
cls.run_forever = run_forever
cls.run_until_complete = run_until_complete
cls._run_once = _run_once
cls._check_running = _check_running
cls._check_runnung = _check_running # typo in Python 3.7 source
cls._num_runs_pending = 0
cls._is_proactorloop = os.name == "nt" and issubclass(
cls, asyncio.ProactorEventLoop
)
if sys.version_info < (3, 7, 0):
cls._set_coroutine_origin_tracking = cls._set_coroutine_wrapper
cls._nest_patched = True
def _patch_task():
"""Patch the Task's step and enter/leave methods to make it reentrant."""
def step(task, exc=None):
curr_task = curr_tasks.get(task._loop)
try:
step_orig(task, exc)
finally:
if curr_task is None:
curr_tasks.pop(task._loop, None)
else:
curr_tasks[task._loop] = curr_task
Task = asyncio.Task
if hasattr(Task, "_nest_patched"):
return
if sys.version_info >= (3, 7, 0):
def enter_task(loop, task):
curr_tasks[loop] = task
def leave_task(loop, task):
curr_tasks.pop(loop, None)
asyncio.tasks._enter_task = enter_task
asyncio.tasks._leave_task = leave_task
curr_tasks = asyncio.tasks._current_tasks
step_orig = Task._Task__step
Task._Task__step = step
else:
curr_tasks = Task._current_tasks
step_orig = Task._step
Task._step = step
Task._nest_patched = True
def _patch_tornado():
"""
If tornado is imported before nest_asyncio, make tornado aware of
the pure-Python asyncio Future.
"""
if "tornado" in sys.modules:
import tornado.concurrent as tc
tc.Future = asyncio.Future
if asyncio.Future not in tc.FUTURES:
tc.FUTURES += (asyncio.Future,)
| 7,828 | 31.086066 | 88 | py |
deephyper | deephyper-master/deephyper/evaluator/callback.py | """The callback module contains sub-classes of the ``Callback`` class used to trigger custom actions on the start and completion of jobs by the ``Evaluator``. Callbacks can be used with any Evaluator implementation.
"""
import deephyper.core.exceptions
import numpy as np
import pandas as pd
from deephyper.evaluator._evaluator import _test_ipython_interpretor
if _test_ipython_interpretor():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
class Callback:
def on_launch(self, job):
"""Called each time a ``Job`` is created by the ``Evaluator``.
Args:
job (Job): The created job.
"""
def on_done(self, job):
"""Called each time a Job is completed by the Evaluator.
Args:
job (Job): The completed job.
"""
def on_done_other(self, job):
"""Called each time a Job is collected from an other process.
Args:
job (Job): The completed Job.
"""
class ProfilingCallback(Callback):
"""Collect profiling data. Each time a ``Job`` is completed by the ``Evaluator`` a the different timestamps corresponding to the submit and gather (and run function start and end if the ``profile`` decorator is used on the run function) are collected.
An example usage can be:
>>> profiler = ProfilingCallback()
>>> evaluator.create(method="ray", method_kwargs={..., "callbacks": [profiler]})
...
>>> profiler.profile
"""
def __init__(self):
self.history = []
def on_launch(self, job):
...
def on_done(self, job):
start = job.timestamp_submit
end = job.timestamp_gather
if job.timestamp_start is not None and job.timestamp_end is not None:
start = job.timestamp_start
end = job.timestamp_end
self.history.append((start, 1))
self.history.append((end, -1))
@property
def profile(self):
n_jobs = 0
profile = []
for t, incr in sorted(self.history):
n_jobs += incr
profile.append([t, n_jobs])
cols = ["timestamp", "n_jobs_running"]
df = pd.DataFrame(profile, columns=cols)
return df
class LoggerCallback(Callback):
"""Print information when jobs are completed by the ``Evaluator``.
An example usage can be:
>>> evaluator.create(method="ray", method_kwargs={..., "callbacks": [LoggerCallback()]})
"""
def __init__(self):
self._best_objective = None
self._n_done = 0
def on_done_other(self, job):
self.on_done(job)
def on_done(self, job):
self._n_done += 1
# Test if multi objectives are received
if np.ndim(job.objective) > 0:
if np.isreal(job.objective).all():
if self._best_objective is None:
self._best_objective = np.sum(job.objective)
else:
self._best_objective = max(
np.sum(job.objective), self._best_objective
)
print(
f"[{self._n_done:05d}] -- best sum(objective): {self._best_objective:.5f} -- received sum(objective): {np.sum(job.objective):.5f}"
)
elif np.any(type(res) is str and "F" == res[0] for res in job.objective):
print(f"[{self._n_done:05d}] -- received failure: {job.objective}")
elif np.isreal(job.objective):
if self._best_objective is None:
self._best_objective = job.objective
else:
self._best_objective = max(job.objective, self._best_objective)
print(
f"[{self._n_done:05d}] -- best objective: {self._best_objective:.5f} -- received objective: {job.objective:.5f}"
)
elif type(job.objective) is str and "F" == job.objective[0]:
print(f"[{self._n_done:05d}] -- received failure: {job.objective}")
class TqdmCallback(Callback):
"""Print information when jobs are completed by the ``Evaluator``.
An example usage can be:
>>> evaluator.create(method="ray", method_kwargs={..., "callbacks": [TqdmCallback()]})
"""
def __init__(self):
self._best_objective = None
self._n_done = 0
self._n_failures = 0
self._max_evals = None
self._tqdm = None
def set_max_evals(self, max_evals):
self._max_evals = max_evals
self._tqdm = None
def on_done_other(self, job):
self.on_done(job)
def on_done(self, job):
if self._tqdm is None:
if self._max_evals:
self._tqdm = tqdm(total=self._max_evals)
else:
self._tqdm = tqdm()
self._n_done += 1
self._tqdm.update(1)
# Test if multi objectives are received
if np.ndim(job.objective) > 0:
if np.isreal(job.objective).all():
if self._best_objective is None:
self._best_objective = np.sum(job.objective)
else:
self._best_objective = max(
np.sum(job.objective), self._best_objective
)
else:
self._n_failures += 1
self._tqdm.set_postfix(
{"failures": self._n_failures, "sum(objective)": self._best_objective}
)
else:
if np.isreal(job.objective):
if self._best_objective is None:
self._best_objective = job.objective
else:
self._best_objective = max(job.objective, self._best_objective)
else:
self._n_failures += 1
self._tqdm.set_postfix(
objective=self._best_objective, failures=self._n_failures
)
class SearchEarlyStopping(Callback):
"""Stop the search gracefully when it does not improve for a given number of evaluations.
Args:
patience (int, optional): The number of not improving evaluations to wait for before stopping the search. Defaults to 10.
objective_func (callable, optional): A function that takes a ``Job`` has input and returns the maximized scalar value monitored by this callback. Defaults to ``lambda j: j.result``.
"""
def __init__(self, patience: int = 10, objective_func=lambda j: j.result):
self._best_objective = None
self._n_lower = 0
self._patience = patience
self._objective_func = objective_func
def on_done_other(self, job):
self.on_done(job)
def on_done(self, job):
job_objective = self._objective_func(job)
# if multi objectives are received
if np.ndim(job_objective) > 0:
job_objective = np.sum(job_objective)
if self._best_objective is None:
self._best_objective = job_objective
else:
if job_objective > self._best_objective:
print(
f"Objective has improved from {self._best_objective:.5f} -> {job_objective:.5f}"
)
self._best_objective = job_objective
self._n_lower = 0
else:
self._n_lower += 1
if self._n_lower >= self._patience:
print(
f"Stopping the search because it did not improve for the last {self._patience} evaluations!"
)
raise deephyper.core.exceptions.SearchTerminationError
| 7,498 | 33.399083 | 255 | py |
deephyper | deephyper-master/deephyper/evaluator/storage/_memory_storage.py | import copy
from typing import Any, Dict, Hashable, List, Tuple
from deephyper.evaluator.storage._storage import Storage
class MemoryStorage(Storage):
"""Storage client for local in-memory storage.
This backend does not allow to share the data between evaluators running in different processes.
"""
def __init__(self) -> None:
super().__init__()
self._search_id_counter = 0
self._data = {}
def _connect(self):
self.connected = True
def __getstate__(self):
state = {"_search_id_counter": 0, "_data": {}, "connected": False}
return state
def __setstate__(self, newstate):
self.__dict__.update(newstate)
self.connect()
def create_new_search(self) -> Hashable:
"""Create a new search in the store and returns its identifier.
Returns:
Hashable: The identifier of the search.
"""
search_id = f"{self._search_id_counter}" # converting to str
self._search_id_counter += 1
self._data[search_id] = {"job_id_counter": 0, "data": {}}
return search_id
def create_new_job(self, search_id: Hashable) -> Hashable:
"""Creates a new job in the store and returns its identifier.
Args:
search_id (Hashable): The identifier of the search in which a new job
is created.
Returns:
Hashable: The created identifier of the job.
"""
partial_id = self._data[search_id]["job_id_counter"]
partial_id = f"{partial_id}" # converting to str
job_id = f"{search_id}.{partial_id}"
self._data[search_id]["job_id_counter"] += 1
self._data[search_id]["data"][partial_id] = {
"in": None,
"out": None,
"metadata": {},
"intermediate": {"budget": [], "objective": []},
}
return job_id
def store_job(self, job_id: Hashable, key: Hashable, value: Any) -> None:
"""Stores the value corresponding to key for job_id.
Args:
job_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the value.
value (Any): The value to store.
"""
search_id, partial_id = job_id.split(".")
self._data[search_id]["data"][partial_id][key] = value
def store_job_in(
self, job_id: Hashable, args: Tuple = None, kwargs: Dict = None
) -> None:
"""Stores the input arguments of the executed job.
Args:
job_id (Hashable): The identifier of the job.
args (Optional[Tuple], optional): The positional arguments. Defaults to None.
kwargs (Optional[Dict], optional): The keyword arguments. Defaults to None.
"""
self.store_job(job_id, key="in", value={"args": args, "kwargs": kwargs})
def store_job_out(self, job_id: Hashable, value: Any) -> None:
"""Stores the output value of the executed job.
Args:
job_id (Hashable): The identifier of the job.
value (Any): The value to store.
"""
self.store_job(job_id, key="out", value=value)
def store_job_metadata(self, job_id: Hashable, key: Hashable, value: Any) -> None:
"""Stores other metadata related to the execution of the job.
Args:
job_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the metadata of the given job.
value (Any): The value to store.
"""
search_id, partial_id = job_id.split(".")
self._data[search_id]["data"][partial_id]["metadata"][key] = value
def load_all_search_ids(self) -> List[Hashable]:
"""Loads the identifiers of all recorded searches.
Returns:
List[Hashable]: A list of identifiers of all the recorded searches.
"""
return list(self._data.keys())
def load_all_job_ids(self, search_id: Hashable) -> List[Hashable]:
"""Loads the identifiers of all recorded jobs in the search.
Args:
search_id (Hashable): The identifier of the search.
Returns:
List[Hashable]: A list of identifiers of all the jobs.
"""
partial_ids = self._data[search_id]["data"].keys()
job_ids = [f"{search_id}.{p_id}" for p_id in partial_ids]
return job_ids
def load_search(self, search_id: Hashable) -> dict:
"""Loads the data of a search.
Args:
search_id (Hashable): The identifier of the search.
Returns:
dict: The corresponding data of the search.
"""
data = self._data[search_id]["data"]
return copy.deepcopy(data)
def load_job(self, job_id: Hashable) -> dict:
"""Loads the data of a job.
Args:
job_id (Hashable): The identifier of the job.
Returns:
dict: The corresponding data of the job.
"""
search_id, partial_id = job_id.split(".")
data = self._data[search_id]["data"][partial_id]
return copy.deepcopy(data)
def store_search_value(
self, search_id: Hashable, key: Hashable, value: Any
) -> None:
"""Stores the value corresponding to key for search_id.
Args:
search_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the value.
value (Any): The value to store.
"""
self._data[search_id][key] = value
def load_search_value(self, search_id: Hashable, key: Hashable) -> Any:
"""Loads the value corresponding to key for search_id.
Args:
search_id (Hashable): The identifier of the job.
key (Hashable): A key to use to access the value.
"""
return self._data[search_id][key]
def load_metadata_from_all_jobs(
self, search_id: Hashable, key: Hashable
) -> List[Any]:
"""Loads a given metadata value from all jobs.
Args:
search_id (Hashable): The identifier of the search.
key (Hashable): The identifier of the value.
Returns:
List[Any]: A list of all the retrieved metadata values.
"""
search_id
values = []
for job_data_i in self._data[search_id]["data"].values():
value_i = job_data_i["metadata"].get(key, None)
if value_i is not None:
values.append(value_i)
return values
def load_out_from_all_jobs(self, search_id: Hashable) -> List[Any]:
"""Loads the output value from all jobs.
Args:
search_id (Hashable): The identifier of the search.
Returns:
List[Any]: A list of all the retrieved output values.
"""
values = []
for job_data_i in self._data[search_id]["data"].values():
value_i = job_data_i["out"]
if value_i is not None:
values.append(value_i)
return values
def load_jobs(self, job_ids: List[Hashable]) -> dict:
"""Load all data from a given list of jobs' identifiers.
Args:
job_ids (list): The list of job identifiers.
Returns:
dict: A dictionnary of the retrieved values where the keys are the identifier of jobs.
"""
data = {}
for job_id in job_ids:
search_id, partial_id = job_id.split(".")
job_data = self._data[search_id]["data"][partial_id]
data[job_id] = job_data
return data
| 7,576 | 32.675556 | 100 | py |
deephyper | deephyper-master/deephyper/evaluator/storage/_redis_storage.py | import pickle
from typing import Any, Dict, Hashable, List, Tuple
import redis
from deephyper.evaluator.storage._storage import Storage
class RedisStorage(Storage):
"""Storage client for Redis.
The Redis server should be started with the Redis-JSON module loaded.
Args:
host (str, optional): The host of the Redis server. Defaults to "localhost".
port (int, optional): The port of the Redis server. Defaults to 6379.
db (int, optional): The database of the Redis server. Defaults to 0.
"""
def __init__(self, host="localhost", port=6379, db=0) -> None:
super().__init__()
self._host = host
self._port = port
self._db = db
self._redis = None
def _connect(self):
self._redis = redis.Redis(
host=self._host,
port=self._port,
db=self._db,
charset="utf-8",
decode_responses=True,
)
self.connected = True
self._redis.setnx("search_id_counter", 0)
def __getstate__(self):
state = {
"_host": self._host,
"_port": self._port,
"_db": self._db,
"_redis": None,
"connected": False,
}
return state
def __setstate__(self, newstate):
self.__dict__.update(newstate)
self.connect()
def create_new_search(self) -> Hashable:
"""Create a new search in the store and returns its identifier.
Returns:
Hashable: The identifier of the search.
"""
search_id_counter = self._redis.incr("search_id_counter", amount=1) - 1
search_id = f"{search_id_counter}" # converting to str
self._redis.rpush("search_id_list", search_id)
return search_id
def create_new_job(self, search_id: Hashable) -> Hashable:
"""Creates a new job in the store and returns its identifier.
Args:
search_id (Hashable): The identifier of the search in which a new job
is created.
Returns:
Hashable: The created identifier of the job.
"""
partial_id = (
self._redis.incr(f"search:{search_id}.job_id_counter", amount=1) - 1
)
partial_id = f"{partial_id}" # converting to str
job_id = f"{search_id}.{partial_id}"
self._redis.rpush(f"search:{search_id}.job_id_list", job_id)
self._redis.json().set(
f"job:{job_id}", ".", {"in": None, "metadata": {}, "out": None}
)
return job_id
def store_job(self, job_id: Hashable, key: Hashable, value: Any) -> None:
"""Stores the value corresponding to key for job_id.
Args:
job_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the value.
value (Any): The value to store.
"""
self._redis.json().set(f"job:{job_id}", f".{key}", value)
def store_job_in(
self, job_id: Hashable, args: Tuple = None, kwargs: Dict = None
) -> None:
"""Stores the input arguments of the executed job.
Args:
job_id (Hashable): The identifier of the job.
args (Optional[Tuple], optional): The positional arguments. Defaults to None.
kwargs (Optional[Dict], optional): The keyword arguments. Defaults to None.
"""
self.store_job(job_id, key="in", value={"args": args, "kwargs": kwargs})
def store_job_out(self, job_id: Hashable, value: Any) -> None:
"""Stores the output value of the executed job.
Args:
job_id (Hashable): The identifier of the job.
value (Any): The value to store.
"""
self.store_job(job_id, key="out", value=value)
def store_job_metadata(self, job_id: Hashable, key: Hashable, value: Any) -> None:
"""Stores other metadata related to the execution of the job.
Args:
job_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the metadata of the given job.
value (Any): The value to store.
"""
self._redis.json().set(f"job:{job_id}", f".metadata.{key}", value)
def load_all_search_ids(self) -> List[Hashable]:
"""Loads the identifiers of all recorded searches.
Returns:
List[Hashable]: A list of identifiers of all the recorded searches.
"""
search_ids = self._redis.lrange("search_id_list", 0, -1)
return search_ids
def load_all_job_ids(self, search_id: Hashable) -> List[Hashable]:
"""Loads the identifiers of all recorded jobs in the search.
Args:
search_id (Hashable): The identifier of the search.
Returns:
List[Hashable]: A list of identifiers of all the jobs.
"""
job_ids = self._redis.lrange(f"search:{search_id}.job_id_list", 0, -1)
return job_ids
def load_search(self, search_id: Hashable) -> dict:
"""Loads the data of a search.
Args:
search_id (Hashable): The identifier of the search.
Returns:
dict: The corresponding data of the search.
"""
job_ids = self.load_all_job_ids(search_id)
with self._redis.pipeline() as pipe:
for job_id in job_ids:
pipe.json().get(f"job:{job_id}", ".")
data = pipe.execute()
for i, job_id in enumerate(job_ids):
data[i]["job_id"] = job_id
return data
def load_job(self, job_id: Hashable) -> dict:
"""Loads the data of a job.
Args:
job_id (Hashable): The identifier of the job.
Returns:
dict: The corresponding data of the job.
"""
data = self._redis.json().get(f"job:{job_id}", ".")
return data
def store_search_value(
self, search_id: Hashable, key: Hashable, value: Any
) -> None:
"""Stores the value corresponding to key for search_id.
Args:
search_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the value.
value (Any): The value to store.
"""
key = f"{search_id}.{key}"
value = pickle.dumps(value)
self._redis.set(key, value)
def load_search_value(self, search_id: Hashable, key: Hashable) -> Any:
"""Loads the value corresponding to key for search_id.
Args:
search_id (Hashable): The identifier of the job.
key (Hashable): A key to use to access the value.
"""
key = f"{search_id}.{key}"
value = self._redis.get(key)
value = pickle.loads(value)
return value
def load_metadata_from_all_jobs(
self, search_id: Hashable, key: Hashable
) -> List[Any]:
"""Loads a given metadata value from all jobs.
Args:
search_id (Hashable): The identifier of the search.
key (Hashable): The identifier of the value.
Returns:
List[Any]: A list of all the retrieved metadata values.
"""
search_id
jobs_ids = self.load_all_job_ids(search_id)
values = []
for job_id in jobs_ids:
try:
value = self._redis.json().get(f"job:{job_id}", f".metadata.{key}")
except redis.exceptions.ResponseError:
value = None
if value is not None:
values.append(value)
return values
def load_out_from_all_jobs(self, search_id: Hashable) -> List[Any]:
"""Loads the output value from all jobs.
Args:
search_id (Hashable): The identifier of the search.
Returns:
List[Any]: A list of all the retrieved output values.
"""
jobs_ids = self.load_all_job_ids(search_id)
values = []
for job_id in jobs_ids:
try:
value = self._redis.json().get(f"job:{job_id}", ".out")
except redis.exceptions.ResponseError:
value = None
if value is not None:
values.append(value)
return values
def load_jobs(self, job_ids: List[Hashable]) -> dict:
"""Load all data from a given list of jobs' identifiers.
Args:
job_ids (list): The list of job identifiers.
Returns:
dict: A dictionnary of the retrieved values where the keys are the identifier of jobs.
"""
redis_job_ids = map(lambda jid: f"job:{jid}", job_ids)
data = self._redis.json().mget(redis_job_ids, ".")
data = {k: v for k, v in zip(job_ids, data)}
return data
| 8,727 | 32.060606 | 98 | py |
deephyper | deephyper-master/deephyper/evaluator/storage/__init__.py | from deephyper.evaluator.storage._storage import Storage
from deephyper.evaluator.storage._memory_storage import MemoryStorage
__all__ = ["Storage", "MemoryStorage"]
# optional import for RedisStorage
try:
from deephyper.evaluator.storage._redis_storage import RedisStorage # noqa: F401
__all__.append("RedisStorage")
except ImportError:
pass
| 360 | 24.785714 | 85 | py |
deephyper | deephyper-master/deephyper/evaluator/storage/_storage.py | import abc
import importlib
import logging
from typing import Any, Dict, Hashable, List, Tuple, TypeVar
StorageType = TypeVar("StorageType", bound="Storage")
STORAGES = {
"memory": "_memory_storage.MemoryStorage",
"redis": "_redis_storage.RedisStorage",
}
class Storage(abc.ABC):
"""An abstract interface representing a storage client."""
def __init__(self) -> None:
self.connected = False
def connect(self) -> StorageType:
"""Connect the storage client to the storage service."""
self._connect()
return self
@staticmethod
def create(method: str = "memory", method_kwargs: Dict = None) -> StorageType:
"""Static method allowing the creation of a storage client.
Args:
method (str, optional): the type of storage client in ``["memory", "redis"]``. Defaults to "memory".
method_kwargs (Dict, optional): the client keyword-arguments parameters. Defaults to None.
Raises:
ValueError: if the type of requested storage client is not valid.
Returns:
Storage: the created storage client.
"""
method_kwargs = method_kwargs if method_kwargs else {}
logging.info(
f"Creating Storage(method={method}, method_kwargs={method_kwargs}..."
)
if method not in STORAGES.keys():
val = ", ".join(STORAGES)
raise ValueError(
f'The method "{method}" is not a valid method for an Evaluator!'
f" Choose among the following evalutor types: "
f"{val}."
)
# create the evaluator
mod_name, attr_name = STORAGES[method].split(".")
mod = importlib.import_module(f"deephyper.evaluator.storage.{mod_name}")
storage_cls = getattr(mod, attr_name)
storage = storage_cls(**method_kwargs)
logging.info("Creation done")
return storage
@abc.abstractmethod
def _connect(self):
"""Connect the storage client to the storage service."""
@abc.abstractmethod
def create_new_search(self) -> Hashable:
"""Create a new search in the store and returns its identifier.
Returns:
Hashable: The identifier of the search.
"""
@abc.abstractmethod
def create_new_job(self, search_id: Hashable) -> Hashable:
"""Creates a new job in the store and returns its identifier.
Args:
search_id (Hashable): The identifier of the search in which a new job
is created.
Returns:
Hashable: The created identifier of the job.
"""
@abc.abstractmethod
def store_search_value(
self, search_id: Hashable, key: Hashable, value: Any
) -> None:
"""Stores the value corresponding to key for search_id.
Args:
search_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the value.
value (Any): The value to store.
"""
@abc.abstractmethod
def load_search_value(self, search_id: Hashable, key: Hashable) -> Any:
"""Loads the value corresponding to key for search_id.
Args:
search_id (Hashable): The identifier of the job.
key (Hashable): A key to use to access the value.
"""
@abc.abstractmethod
def store_job(self, job_id: Hashable, key: Hashable, value: Any) -> None:
"""Stores the value corresponding to key for job_id.
Args:
job_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the value.
value (Any): The value to store.
"""
@abc.abstractmethod
def store_job_in(
self, job_id: Hashable, args: Tuple = None, kwargs: Dict = None
) -> None:
"""Stores the input arguments of the executed job.
Args:
job_id (Hashable): The identifier of the job.
args (Optional[Tuple], optional): The positional arguments. Defaults to None.
kwargs (Optional[Dict], optional): The keyword arguments. Defaults to None.
"""
@abc.abstractmethod
def store_job_out(self, job_id: Hashable, value: Any) -> None:
"""Stores the output value of the executed job.
Args:
job_id (Hashable): The identifier of the job.
value (Any): The value to store.
"""
@abc.abstractmethod
def store_job_metadata(self, job_id: Hashable, key: Hashable, value: Any) -> None:
"""Stores other metadata related to the execution of the job.
Args:
job_id (Hashable): The identifier of the job.
key (Hashable): A key to use to store the metadata of the given job.
value (Any): The value to store.
"""
@abc.abstractmethod
def load_all_search_ids(self) -> List[Hashable]:
"""Loads the identifiers of all recorded searches.
Returns:
List[Hashable]: A list of identifiers of all the recorded searches.
"""
@abc.abstractmethod
def load_all_job_ids(self, search_id: Hashable) -> List[Hashable]:
"""Loads the identifiers of all recorded jobs in the search.
Args:
search_id (Hashable): The identifier of the search.
Returns:
List[Hashable]: A list of identifiers of all the jobs.
"""
@abc.abstractmethod
def load_search(self, search_id: Hashable) -> dict:
"""Loads the data of a search.
Args:
search_id (Hashable): The identifier of the search.
Returns:
dict: The corresponding data of the search.
"""
@abc.abstractmethod
def load_job(self, job_id: Hashable) -> dict:
"""Loads the data of a job.
Args:
job_id (Hashable): The identifier of the job.
Returns:
dict: The corresponding data of the job.
"""
@abc.abstractmethod
def load_metadata_from_all_jobs(
self, search_id: Hashable, key: Hashable
) -> List[Any]:
"""Loads a given metadata value from all jobs.
Args:
search_id (Hashable): The identifier of the search.
key (Hashable): The identifier of the value.
Returns:
List[Any]: A list of all the retrieved metadata values.
"""
@abc.abstractmethod
def load_out_from_all_jobs(self, search_id: Hashable) -> List[Any]:
"""Loads the output value from all jobs.
Args:
search_id (Hashable): The identifier of the search.
Returns:
List[Any]: A list of all the retrieved output values.
"""
@abc.abstractmethod
def load_jobs(self, job_ids: List[Hashable]) -> dict:
"""Load all data from a given list of jobs' identifiers.
Args:
job_ids (list): The list of job identifiers.
Returns:
dict: A dictionnary of the retrieved values where the keys are the identifier of jobs.
"""
| 7,061 | 30.247788 | 112 | py |
deephyper | deephyper-master/deephyper/stopper/_idle_stopper.py | from deephyper.stopper._stopper import Stopper
class IdleStopper(Stopper):
"""Idle stopper which nevers stops the evaluation."""
| 135 | 21.666667 | 57 | py |
deephyper | deephyper-master/deephyper/stopper/_const_stopper.py | from deephyper.stopper._stopper import Stopper
class ConstantStopper(Stopper):
"""Constant stopping policy which will stop the evaluation of a configuration at a fixed step.
Args:
max_steps (int): the maximum number of steps which should be performed to evaluate the configuration fully.
stop_step (int): the step at which to stop the evaluation.
"""
def __init__(self, max_steps: int, stop_step: int) -> None:
super().__init__(max_steps)
self.stop_step = stop_step
def stop(self) -> bool:
return super().stop() or self.stop_step <= self._count_steps
| 616 | 33.277778 | 115 | py |
deephyper | deephyper-master/deephyper/stopper/_asha_stopper.py | import numpy as np
from deephyper.stopper._stopper import Stopper
class SuccessiveHalvingStopper(Stopper):
"""Stopper based on the asynchronous successive halving algorithm."""
def __init__(
self,
max_steps: int,
min_steps: float = 1,
reduction_factor: float = 3,
min_early_stopping_rate: float = 0,
min_competing: int = 0,
min_fully_completed=0,
) -> None:
super().__init__(max_steps=max_steps)
self.min_steps = min_steps
self._reduction_factor = reduction_factor
self._min_early_stopping_rate = min_early_stopping_rate
self._min_competing = min_competing
self._min_fully_completed = min_fully_completed
self._rung = 0
def _compute_halting_budget(self):
return (self.min_steps - 1) + self._reduction_factor ** (
self._min_early_stopping_rate + self._rung
)
def _get_competiting_objectives(self) -> list:
search_id, _ = self.job.id.split(".")
values = self.job.storage.load_metadata_from_all_jobs(
search_id, f"_completed_rung_{self._rung}"
)
values = [float(v) for v in values]
return values
def _num_fully_completed(self) -> int:
search_id, _ = self.job.id.split(".")
stopped = self.job.storage.load_metadata_from_all_jobs(search_id, "stopped")
num = sum(int(not (s)) for s in stopped)
return num
def observe(self, budget: float, objective: float):
super().observe(budget, objective)
self._budget = budget
self._objective = objective
halting_budget = self._compute_halting_budget()
if self._budget >= halting_budget:
# casting float to str to avoid numerical rounding of database
# e.g. for Redis: The precision of the output is fixed at 17 digits
# after the decimal point regardless of the actual internal precision
# of the computation.
self.job.storage.store_job_metadata(
self.job.id, f"_completed_rung_{self._rung}", str(self._objective)
)
def stop(self) -> bool:
# Enforce Pre-conditions Before Applying Successive Halving
if super().stop():
return True
# Compute when is the next Halting Budget
halting_budget = self._compute_halting_budget()
if self._budget < halting_budget:
return False
# Check if the minimum number of fully completed budgets has been done
if self._num_fully_completed() < self._min_fully_completed:
self._rung += 1
return False
# Check if the minimum number of competitors is verified
competing_objectives = np.sort(self._get_competiting_objectives())
num_competing = len(competing_objectives)
if num_competing < self._min_competing:
return True
# Performe Successive Halving
k = num_competing // self._reduction_factor
# Promote if best when there is less than reduction_factor competing values
if k == 0:
k = 1
top_k_worst_objective = competing_objectives[-k]
promotable = self._objective >= top_k_worst_objective
if promotable:
self._rung += 1
return not (promotable)
| 3,345 | 32.79798 | 84 | py |
deephyper | deephyper-master/deephyper/stopper/__init__.py | """The ``stopper`` module provides features to observe intermediate performances of iterative algorithm and decide dynamically if its evaluation should be stopped or continued.
This module was inspired from the Pruner interface and implementation of `Optuna <https://optuna.readthedocs.io/en/stable/reference/pruners.html>`_.
The ``Stopper`` class is the base class for all stoppers. It provides the interface for the ``observe`` and ``stop`` methods that should be implemented by all stoppers. The ``observe`` method is called at each iteration of the iterative algorithm and the ``stop`` method is called at the end of each iteration to decide if the evaluation should be stopped or continued. The stopper object is not used directly but through the ``RunningJob`` received by the ``run``-function. In the following example we demonstrate with a simulation how it can be used:
.. code-block:: python
import time
from deephyper.problem import HpProblem
from deephyper.search.hps import CBO
from deephyper.stopper import SuccessiveHalvingStopper
def run(job):
x = job.parameters["x"]
# Simulation of iteration
cum = 0
for i in range(100):
cum += x
time.sleep(0.01) # each iteration cost 0.1 secondes
# Record the intermediate performance
# Calling stopper.observe(budget, objective) under the hood
job.record(budget=i + 1, objective=cum)
# Check if the evaluation should be stopped
# Calling stopper.stop() under the hood
if job.stopped():
break
# Return objective and metadata to save what is the maximum step reached
return {"objective": cum, "metadata": {"i_stopped": i}}
problem = HpProblem()
problem.add_hyperparameter((0.0, 100.0), "x")
stopper = SuccessiveHalvingStopper(min_steps=1, max_steps=100)
search = CBO(problem, run, stopper=stopper, log_dir="multi-fidelity-exp")
results = search.search(timeout=10)
As it can be observed in the following results many evaluation stopped after the first iteration which saved
a lot of computation time. If evaluated fully, each configuration would take about 1 seconds and we would be able
to compute only a maximum of 10 configurations (because we set a timeout of 10). However, with the stopper we managed
to perform 15 evaluations instead.
.. code-block:: verbatim
p:x objective job_id m:timestamp_submit m:timestamp_gather m:i_stopped
0 79.654299 7965.429869 0 0.016269 1.234227 99
1 74.266072 74.266072 1 1.256349 1.269175 0
2 74.491125 74.491125 2 1.281712 1.294496 0
3 10.245385 10.245385 3 1.305979 1.317513 0
4 4.229917 4.229917 4 1.417226 1.430005 0
5 53.690895 53.690895 5 1.437582 1.450419 0
6 54.902216 54.902216 6 1.458042 1.470806 0
7 22.945529 22.945529 7 1.478365 1.491140 0
8 94.051310 9405.130978 8 1.498538 2.733619 99
9 23.024237 23.024237 9 2.753319 2.766194 0
10 97.121528 9712.152792 10 2.884685 4.114600 99
11 97.192445 9719.244491 11 4.241939 5.467425 99
12 98.844525 9884.452486 12 5.598530 6.833938 99
13 99.722437 9972.243688 13 6.946300 8.172941 99
14 99.988566 9998.856623 14 8.376363 9.615355 99
"""
from deephyper.stopper._stopper import Stopper
from deephyper.stopper._asha_stopper import SuccessiveHalvingStopper
from deephyper.stopper._median_stopper import MedianStopper
from deephyper.stopper._idle_stopper import IdleStopper
from deephyper.stopper._const_stopper import ConstantStopper
__all__ = [
"IdleStopper",
"Stopper",
"SuccessiveHalvingStopper",
"MedianStopper",
"ConstantStopper",
]
try:
from deephyper.stopper._lcmodel_stopper import LCModelStopper # noqa: F401
__all__.append("LCModelStopper")
except ImportError:
pass
| 4,488 | 46.252632 | 552 | py |
deephyper | deephyper-master/deephyper/stopper/_lcmodel_stopper.py | import sys
from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
from scipy.optimize import least_squares
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
from deephyper.stopper._stopper import Stopper
# Budget allocation models
def b_lin2(z, nu=[1, 1]):
return nu[1] * (z - 1) + nu[0]
def b_exp2(z, nu=[1, 2]):
return nu[0] * jnp.power(nu[1], z - 1)
# Learning curves models
def f_lin2(z, b, rho):
return rho[1] * b(z) + rho[0]
def f_loglin2(z, b, rho):
Z = jnp.log(b(z))
Y = rho[1] * Z + rho[0]
y = jnp.exp(Y)
return -y
def f_loglin3(z, b, rho):
Z = jnp.log(b(z))
Y = rho[2] * jnp.power(Z, 2) + rho[1] * Z + rho[0]
y = jnp.exp(Y)
return -y
def f_loglin4(z, b, rho):
Z = jnp.log(b(z))
Y = rho[3] * jnp.power(Z, 3) + rho[2] * jnp.power(Z, 2) + rho[1] * Z + rho[0]
y = jnp.exp(Y)
return -y
def f_pow3(z, b, rho):
return rho[0] - rho[1] * b(z) ** -rho[2]
def f_mmf4(z, b, rho):
return (rho[0] * rho[1] + rho[2] * jnp.power(b(z), rho[3])) / (
rho[1] + jnp.power(b(z), rho[3])
)
def f_vapor3(z, b, rho):
return rho[0] + rho[1] / b(z) + rho[2] * np.log(b(z))
def f_logloglin2(z, b, rho):
return jnp.log(rho[0] * jnp.log(b(z)) + rho[1])
def f_hill3(z, b, rho):
ymax, eta, kappa = rho
return ymax * (b(z) ** eta) / (kappa * eta + b(z) ** eta)
def f_logpow3(z, b, rho):
return rho[0] / (1 + (b(z) / jnp.exp(rho[1])) ** rho[2])
def f_pow4(z, b, rho):
return rho[2] - (rho[0] * b(z) + rho[1]) ** (-rho[3])
def f_exp4(z, b, rho):
return rho[2] - jnp.exp(-rho[0] * (b(z) ** rho[3]) + rho[1])
def f_janoschek4(z, b, rho):
return rho[0] - (rho[0] - rho[1]) * jnp.exp(-rho[2] * (b(z) ** rho[3]))
def f_weibull4(z, b, rho):
return rho[0] - (rho[0] - rho[1]) * jnp.exp(-((rho[2] * b(z)) ** rho[3]))
def f_ilog2(z, b, rho):
return rho[1] - (rho[0] / jnp.log(b(z) + 1))
# Utility to estimate parameters of learning curve model
# The combination of "partial" and "static_argnums" is necessary
# with the "f" lambda function passed as argument
@partial(jax.jit, static_argnums=(1,))
def residual_least_square(rho, f, z, y):
"""Residual for least squares."""
return f(z, rho) - y
def prob_model(z=None, y=None, f=None, rho_mu_prior=None, num_obs=None):
rho_mu_prior = jnp.array(rho_mu_prior)
rho_sigma_prior = 1.0
rho = numpyro.sample("rho", dist.Normal(rho_mu_prior, rho_sigma_prior))
sigma = numpyro.sample("sigma", dist.Exponential(1.0)) # introducing noise
# sigma = 0.1
mu = f(z[:num_obs], rho)
numpyro.sample("obs", dist.Normal(mu, sigma), obs=y[:num_obs])
@partial(jax.jit, static_argnums=(0,))
def predict_moments_from_posterior(f, X, posterior_samples):
vf_model = jax.vmap(f, in_axes=(None, 0))
posterior_mu = vf_model(X, posterior_samples)
mean_mu = jnp.mean(posterior_mu, axis=0)
std_mu = jnp.std(posterior_mu, axis=0)
return mean_mu, std_mu
class BayesianLearningCurveRegressor(BaseEstimator, RegressorMixin):
def __init__(
self,
f_model=f_loglin3,
f_model_num_params=3,
b_model=b_lin2,
max_trials_ls_fit=10,
mcmc_num_warmup=200,
mcmc_num_samples=1_000,
n_jobs=-1,
random_state=None,
verbose=0,
batch_size=100,
):
self.b_model = b_model
self.f_model = lambda z, rho: f_model(z, self.b_model, rho)
self.f_nparams = f_model_num_params
self.mcmc_num_warmup = mcmc_num_warmup
self.mcmc_num_samples = mcmc_num_samples
self.max_trials_ls_fit = max_trials_ls_fit
self.n_jobs = n_jobs
self.random_state = check_random_state(random_state)
self.verbose = verbose
self.rho_mu_prior_ = np.zeros((self.f_nparams,))
self.batch_size = batch_size
self.X_ = np.zeros((self.batch_size,))
self.y_ = np.zeros((self.batch_size,))
def fit(self, X, y, update_prior=True):
check_X_y(X, y, ensure_2d=False)
# !Trick for performance to avoid performign JIT again and again
# !This will fix the shape of inputs of the model for numpyro
# !see https://github.com/pyro-ppl/numpyro/issues/441
num_samples = len(X)
assert num_samples <= self.batch_size
self.X_[:num_samples] = X[:]
self.y_[:num_samples] = y[:]
if update_prior:
self.rho_mu_prior_[:] = self._fit_learning_curve_model_least_square(X, y)[:]
if not (hasattr(self, "kernel_")):
self.kernel_ = NUTS(
model=lambda z, y, rho_mu_prior: prob_model(
z, y, self.f_model, rho_mu_prior, num_obs=num_samples
),
)
self.mcmc_ = MCMC(
self.kernel_,
num_warmup=self.mcmc_num_warmup,
num_samples=self.mcmc_num_samples,
progress_bar=self.verbose,
jit_model_args=True,
)
seed = self.random_state.randint(low=0, high=2**32)
rng_key = jax.random.PRNGKey(seed)
self.mcmc_.run(rng_key, z=self.X_, y=self.y_, rho_mu_prior=self.rho_mu_prior_)
if self.verbose:
self.mcmc_.print_summary()
return self
def predict(self, X, return_std=True):
posterior_samples = self.predict_posterior_samples(X)
mean_mu = jnp.mean(posterior_samples, axis=0)
if return_std:
std_mu = jnp.std(posterior_samples, axis=0)
return mean_mu, std_mu
return mean_mu
def predict_posterior_samples(self, X):
# Check if fit has been called
check_is_fitted(self)
# Input validation
X = check_array(X, ensure_2d=False)
posterior_samples = self.mcmc_.get_samples()
vf_model = jax.vmap(self.f_model, in_axes=(None, 0))
posterior_mu = vf_model(X, posterior_samples["rho"])
return posterior_mu
def prob(self, X, condition):
"""Compute the approximate probability of P(cond(m(X_i), y_i))
where m is the current fitted model and cond a condition.
Args:
X (np.array): An array of inputs.
condition (callable): A function defining the condition to test.
Returns:
array: an array of shape X.
"""
# Check if fit has been called
check_is_fitted(self)
# Input validation
X = check_array(X, ensure_2d=False)
posterior_samples = self.mcmc_.get_samples()
vf_model = jax.vmap(self.f_model, in_axes=(None, 0))
posterior_mu = vf_model(X, posterior_samples["rho"])
prob = jnp.mean(condition(posterior_mu), axis=0)
return prob
def _fit_learning_curve_model_least_square(
self,
z_train,
y_train,
):
"""The learning curve model is assumed to be modeled by 'f' with
interface f(z, rho).
"""
seed = self.random_state.randint(low=0, high=2**32)
random_state = check_random_state(seed)
z_train = np.asarray(z_train)
y_train = np.asarray(y_train)
# compute the jacobian
# using the true jacobian is important to avoid problems
# with numerical errors and approximations! indeed the scale matters
# a lot when approximating with finite differences
def fun_wrapper(rho, f, z, y):
return np.array(residual_least_square(rho, f, z, y))
if not (hasattr(self, "jac_residual_ls_")):
self.jac_residual_ls_ = partial(jax.jit, static_argnums=(1,))(
jax.jacfwd(residual_least_square, argnums=0)
)
def jac_wrapper(rho, f, z, y):
return np.array(self.jac_residual_ls_(rho, f, z, y))
results = []
mse_hist = []
for _ in range(self.max_trials_ls_fit):
rho_init = random_state.randn(self.f_nparams)
try:
res_lsq = least_squares(
fun_wrapper,
rho_init,
args=(self.f_model, z_train, y_train),
method="lm",
jac=jac_wrapper,
)
except ValueError:
continue
mse_res_lsq = np.mean(res_lsq.fun**2)
mse_hist.append(mse_res_lsq)
results.append(res_lsq.x)
i_best = np.nanargmin(mse_hist)
res = results[i_best]
return res
def area_learning_curve(z, f, z_max) -> float:
assert len(z) == len(f)
assert z[-1] <= z_max
area = 0
for i in range(1, len(z)):
# z: is always monotinic increasing but not f!
area += (z[i] - z[i - 1]) * f[i - 1]
if z[-1] < z_max:
area += (z_max - z[-1]) * f[-1]
return area
class LCModelStopper(Stopper):
"""Stopper based on learning curve extrapolation (LCE) to evaluate if the iterations of the learning algorithm
should be stopped. The LCE is based on a parametric learning curve model (LCM) which is modeling the score as a function of the number of training steps. Training steps can correspond to the number of training epochs, the number of training batches, the number of observed samples or any other quantity that is iterated through during the training process. The LCE is based on the following steps:
1. An early stopping condition is always checked first. If the early stopping condition is met, the LCE is not applied.
2. Then, some safeguard conditions are checked to ensure that the LCE can be applied (number of observed steps must be greater or equal to the number of parameters of the LCM).
3. If the LCM cannot be fitted (number of observed steps is less than number of parameters of the model), then the last observed step is compared to hitorical performance of others at the same step to check if it is a low-performing outlier (outlier in the direction of performing worse!) using the IQR criterion.
4. If the LCM can be fitted, a least square fit is performed to estimate the parameters of the LCM.
5. The probability of the current LC to perform worse than the best observed score at the maximum iteration is computed using Monte-Carlo Markov Chain (MCMC).
To use this stopper, you need to install the following dependencies:
.. code-block:: bash
$ jax>=0.3.25
$ numpyro
Args:
max_steps (int): The maximum number of training steps which can be performed.
min_steps (int, optional): The minimum number of training steps which can be performed. Defaults to ``1``.
lc_model (str, optional): The parameteric learning model to use. It should be a string in the following list: ``["lin2", "loglin2", "loglin3", "loglin4", "pow3","mmf4", "vapor3", "logloglin2", "hill3", "logpow3", "pow4", "exp4", "janoschek4", "weibull4", "ilog2"]``. The number in the name corresponds to the number of parameters of the parametric model. Defaults to ``"mmf4"``.
min_done_for_outlier_detection (int, optional): The minimum number of observed scores at the same step to check for if it is a lower-bound outlier. Defaults to ``10``.
iqr_factor_for_outlier_detection (float, optional): The IQR factor for outlier detection. The higher it is the more inclusive the condition will be (i.e. if set very large it is likely not going to detect any outliers). Defaults to ``1.5``.
prob_promotion (float, optional): The threshold probabily to stop the iterations. If the current learning curve has a probability greater than ``prob_promotion`` to be worse that the best observed score accross all evaluations then the current iterations are stopped. Defaults to ``0.9`` (i.e. probability of 0.9 of being worse).
early_stopping_patience (float, optional): The patience of the early stopping condition. If it is an ``int`` it is directly corresponding to a number of iterations. If it is a ``float`` then it is corresponding to a proportion between [0,1] w.r.t. ``max_steps``. Defaults to ``0.25`` (i.e. 25% of ``max_steps``).
objective_returned (str, optional): The returned objective. It can be a value in ``["last", "max", "alc"]`` where ``"last"`` corresponds to the last observed score, ``"max"`` corresponds to the maximum observed score and ``"alc"`` corresponds to the area under the learning curve. Defaults to "last".
random_state (int or np.RandomState, optional): The random state of estimation process. Defaults to ``None``.
Raises:
ValueError: parameters are not valid.
"""
def __init__(
self,
max_steps: int,
min_steps: int = 1,
lc_model="mmf4",
min_done_for_outlier_detection=10,
iqr_factor_for_outlier_detection=1.5,
prob_promotion=0.9,
early_stopping_patience=0.25,
objective_returned="last",
random_state=None,
) -> None:
super().__init__(max_steps=max_steps)
self.min_steps = min_steps
lc_model = "f_" + lc_model
lc_model_num_params = int(lc_model[-1])
lc_model = getattr(sys.modules[__name__], lc_model)
self.min_obs_to_fit = lc_model_num_params
self.min_done_for_outlier_detection = min_done_for_outlier_detection
self.iqr_factor_for_outlier_detection = iqr_factor_for_outlier_detection
self.prob_promotion = prob_promotion
if type(early_stopping_patience) is int:
self.early_stopping_patience = early_stopping_patience
elif type(early_stopping_patience) is float:
self.early_stopping_patience = int(early_stopping_patience * self.max_steps)
else:
raise ValueError("early_stopping_patience must be int or float")
self.objective_returned = objective_returned
self._rung = 0
# compute the step at which to stop based on steps allocation policy
max_rung = np.floor(
np.log(self.max_steps / self.min_steps) / np.log(self.min_obs_to_fit)
)
self.max_steps_ = int(self.min_steps * self.min_obs_to_fit**max_rung)
self.lc_model = BayesianLearningCurveRegressor(
f_model=lc_model,
f_model_num_params=lc_model_num_params,
random_state=random_state,
batch_size=self.max_steps_,
)
self._lc_objectives = []
def _compute_halting_step(self):
return self.min_steps * self.min_obs_to_fit**self._rung
def _retrieve_best_objective(self) -> float:
search_id, _ = self.job.id.split(".")
objectives = []
for obj in self.job.storage.load_out_from_all_jobs(search_id):
try:
objectives.append(float(obj))
except ValueError:
pass
if len(objectives) > 0:
return np.max(objectives)
else:
return np.max(self.observations[1])
def _get_competiting_objectives(self, rung) -> list:
search_id, _ = self.job.id.split(".")
values = self.job.storage.load_metadata_from_all_jobs(
search_id, f"_completed_rung_{rung}"
)
values = [float(v) for v in values]
return values
def observe(self, budget: float, objective: float):
super().observe(budget, objective)
self._budget = self.observed_budgets[-1]
self._lc_objectives.append(self.objective)
self._objective = self._lc_objectives[-1]
# For Early-Stopping based on Patience
if (
not (hasattr(self, "_local_best_objective"))
or self._objective > self._local_best_objective
):
self._local_best_objective = self._objective
self._local_best_step = self.step
halting_step = self._compute_halting_step()
if self._budget >= halting_step:
self.job.storage.store_job_metadata(
self.job.id, f"_completed_rung_{self._rung}", str(self._objective)
)
def stop(self) -> bool:
# Enforce Pre-conditions Before Learning-Curve based Early Discarding
if super().stop():
print("Stopped after reaching the maximum number of steps.")
self.infos_stopped = "max steps reached"
return True
if self.step - self._local_best_step >= self.early_stopping_patience:
print(
f"Stopped after reaching {self.early_stopping_patience} steps without improvement."
)
self.infos_stopped = "early stopping"
return True
# This condition will enforce the stopper to stop the evaluation at the first step
# for the first evaluation (The FABOLAS method does the same, bias the first samples with
# small budgets)
self.best_objective = self._retrieve_best_objective()
halting_step = self._compute_halting_step()
if self.step < max(self.min_steps, self.min_obs_to_fit):
if self.step >= halting_step:
competing_objectives = self._get_competiting_objectives(self._rung)
if len(competing_objectives) > self.min_done_for_outlier_detection:
q1 = np.quantile(
competing_objectives,
q=0.25,
)
q3 = np.quantile(
competing_objectives,
q=0.75,
)
iqr = q3 - q1
# lower than the minimum of a box plot
if (
self._objective
< q1 - self.iqr_factor_for_outlier_detection * iqr
):
print(
f"Stopped early because of abnormally low objective: {self._objective}"
)
self.infos_stopped = "outlier"
return True
self._rung += 1
return False
# Check if the halting budget condition is met
if self.step < halting_step:
return False
# Check if the evaluation should be stopped based on LC-Model
# Fit and predict the performance of the learning curve model
z_train = self.observed_budgets
y_train = self._lc_objectives
z_train, y_train = np.asarray(z_train), np.asarray(y_train)
self.lc_model.fit(z_train, y_train, update_prior=True)
# Check if the configuration is promotable based on its predicted objective value
p = self.lc_model.prob(
X=[self.max_steps], condition=lambda y_hat: y_hat <= self.best_objective
)[0]
# Return whether the configuration should be stopped
if p <= self.prob_promotion:
self._rung += 1
else:
print(
f"Stopped because the probability of performing worse is {p} > {self.prob_promotion}"
)
self.infos_stopped = f"prob={p:.3f}"
return True
@property
def objective(self):
if self.objective_returned == "last":
return self.observations[-1][-1]
elif self.objective_returned == "max":
return max(self.observations[-1])
elif self.objective_returned == "alc":
z, y = self.observations
return area_learning_curve(z, y, z_max=self.max_steps)
else:
raise ValueError("objective_returned must be one of 'last', 'best', 'alc'")
| 19,786 | 36.263653 | 401 | py |
deephyper | deephyper-master/deephyper/stopper/_stopper.py | import abc
import copy
class Stopper(abc.ABC):
"""An abstract class describing the interface of a Stopper.
Args:
max_steps (int): the maximum number of calls to ``observe(budget, objective)``.
"""
def __init__(self, max_steps: int) -> None:
assert max_steps > 0
self.max_steps = max_steps
self._count_steps = 0
self.job = None
# Initialize list to collect observations
self.observed_budgets = []
self.observed_objectives = []
def to_json(self):
"""Returns a dict version of the stopper which can be saved as JSON."""
json_format = type(self).__name__
return json_format
def transform_objective(self, objective: float):
"""Replaces the currently observed objective by the maximum objective observed from the
start. Identity transformation by default."""
# prev_objective = (
# self.observed_objectives[-1] if len(self.observed_objectives) > 0 else None
# )
# if prev_objective is not None:
# objective = max(prev_objective, objective)
return objective
@property
def step(self):
"""Last observed step."""
return self.observed_budgets[-1]
def observe(self, budget: float, objective: float) -> None:
"""Observe a new objective value.
Args:
budget (float): the budget used to obtain the objective (e.g., the number of epochs).
objective (float): the objective value to observe (e.g, the accuracy).
"""
objective = self.transform_objective(objective)
self.observed_budgets.append(budget)
self.observed_objectives.append(objective)
def stop(self) -> bool:
"""Returns ``True`` if the evaluation should be stopped and ``False`` otherwise.
Returns:
bool: ``(step >= max_steps)``.
"""
return self.step >= self.max_steps
@property
def observations(self) -> list:
"""Returns a copy of the list of observations with 0-index the budgets and 1-index the objectives."""
obs = [self.observed_budgets, self.observed_objectives]
return copy.deepcopy(obs)
@property
def objective(self):
"""Last observed objective."""
return self.observations[-1][-1]
| 2,333 | 31.416667 | 109 | py |
deephyper | deephyper-master/deephyper/stopper/_median_stopper.py | import numpy as np
from deephyper.stopper._stopper import Stopper
class MedianStopper(Stopper):
"""Stopper based on the median of observed objectives at similar budgets."""
def __init__(
self,
max_steps: int,
min_steps: int = 1,
min_competing: int = 0,
min_fully_completed: int = 0,
interval_steps: int = 1,
) -> None:
super().__init__(max_steps=max_steps)
self.min_steps = min_steps
self._min_competing = min_competing
self._min_fully_completed = min_fully_completed
self._interval_steps = interval_steps
self._rung = 0
def _is_halting_budget(self):
if self.step < self.min_steps:
return False
else:
return (self.step - self.min_steps) % self._interval_steps == 0
def _get_competiting_objectives(self) -> list:
search_id, _ = self.job.id.split(".")
values = self.job.storage.load_metadata_from_all_jobs(
search_id, f"_completed_rung_{self._rung}"
)
values = [float(v) for v in values]
return values
def _num_fully_completed(self) -> int:
search_id, _ = self.job.id.split(".")
stopped = self.job.storage.load_metadata_from_all_jobs(search_id, "stopped")
num = sum(int(not (s)) for s in stopped)
return num
def observe(self, budget: float, objective: float):
super().observe(budget, objective)
self._budget = self.observed_budgets[-1]
self._objective = self.observed_objectives[-1]
if self._is_halting_budget():
# casting float to str to avoid numerical rounding of database
# e.g. for Redis: The precision of the output is fixed at 17 digits
# after the decimal point regardless of the actual internal precision
# of the computation.
self.job.storage.store_job_metadata(
self.job.id, f"_completed_rung_{self._rung}", str(self._objective)
)
def stop(self) -> bool:
# Enforce Pre-conditions
if super().stop():
return True
if not (self._is_halting_budget()):
return False
if self._num_fully_completed() < self._min_fully_completed:
return False
# Apply Median Pruning
competing_objectives = np.sort(self._get_competiting_objectives())
num_competing = len(competing_objectives)
if num_competing < self._min_competing:
return False
median_objective = np.median(competing_objectives)
promotable = self._objective >= median_objective
if promotable:
self._rung += 1
return not (promotable)
| 2,729 | 31.117647 | 84 | py |
deephyper | deephyper-master/deephyper/test/_command.py | import subprocess
import sys
def run(command, live_output=False):
"""Test command line interface.
Args:
command (str): the command line as a str.
"""
command = command.split()
try:
if live_output:
result = subprocess.run(
command,
check=True,
capture_output=False,
text=True,
stdout=sys.stdout,
stderr=sys.stderr,
)
else:
result = subprocess.run(command, check=True, capture_output=True, text=True)
return result
except subprocess.CalledProcessError as e:
print(e.stdout)
print(e.stderr)
raise e
| 711 | 23.551724 | 88 | py |
deephyper | deephyper-master/deephyper/test/_parse_result.py | import parse
def parse_result(stream: str) -> float:
"""Parse the output of a DeepHyper test. The format of the parsed output should be as follows:
.. code-block::
DEEPHYPER-OUTPUT: <float>
Args:
stream (str): The output of a DeepHyper test.
Returns:
float: The parsed output.
"""
res = parse.search("DEEPHYPER-OUTPUT: {:f}", stream)
return res[0]
| 405 | 21.555556 | 98 | py |
deephyper | deephyper-master/deephyper/test/__init__.py | """Sub-package dedicated to reusable testing tools for DeepHyper"""
from ._command import run
from ._parse_result import parse_result
__all__ = ["run", "parse_result"]
| 170 | 23.428571 | 67 | py |
deephyper | deephyper-master/deephyper/test/nas/__init__.py | 0 | 0 | 0 | py | |
deephyper | deephyper-master/deephyper/test/nas/linearRegHybrid/problem.py | from deephyper.nas.spacelib.tabular import OneLayerSpace
from deephyper.problem import NaProblem
from deephyper.test.nas.linearReg.load_data import load_data
Problem = NaProblem()
Problem.load_data(load_data)
Problem.search_space(OneLayerSpace)
Problem.hyperparameters(
batch_size=Problem.add_hyperparameter((1, 100), "batch_size"),
learning_rate=Problem.add_hyperparameter(
(1e-4, 1e-1, "log-uniform"), "learning_rate"
),
optimizer=Problem.add_hyperparameter(["adam", "nadam", "rmsprop"], "optimizer"),
num_epochs=1,
)
Problem.loss("mse")
Problem.metrics(["r2"])
Problem.objective("val_r2")
# Just to print your problem, to test its definition and imports in the current python environment.
if __name__ == "__main__":
print(Problem)
model = Problem.get_keras_model([1])
| 817 | 24.5625 | 99 | py |
deephyper | deephyper-master/deephyper/test/nas/linearRegHybrid/load_data.py | import numpy as np
def load_data(dim=10, verbose=0):
"""
Generate data for linear function -sum(x_i).
Return:
Tuple of Numpy arrays: ``(train_X, train_y), (valid_X, valid_y)``.
"""
rng = np.random.RandomState(42)
size = 10000
prop = 0.80
a, b = 0, 100
d = b - a
x = np.array([a + rng.random(dim) * d for i in range(size)])
y = np.array([[np.sum(v)] for v in x])
sep_index = int(prop * size)
train_X = x[:sep_index]
train_y = y[:sep_index]
valid_X = x[sep_index:]
valid_y = y[sep_index:]
if verbose:
print(f"train_X shape: {np.shape(train_X)}")
print(f"train_y shape: {np.shape(train_y)}")
print(f"valid_X shape: {np.shape(valid_X)}")
print(f"valid_y shape: {np.shape(valid_y)}")
return (train_X, train_y), (valid_X, valid_y)
if __name__ == "__main__":
load_data(verbose=1)
| 897 | 23.944444 | 74 | py |
deephyper | deephyper-master/deephyper/test/nas/linearRegHybrid/__init__.py | from .problem import Problem # noqa: F401
| 43 | 21 | 42 | py |
deephyper | deephyper-master/deephyper/test/nas/linearRegMultiInputsGen/problem.py | from deephyper.problem import NaProblem
from deephyper.test.nas.linearRegMultiInputsGen.load_data import load_data
from deephyper.nas.preprocessing import minmaxstdscaler
from deephyper.nas.spacelib.tabular import OneLayerSpace
Problem = NaProblem()
Problem.load_data(load_data)
Problem.preprocessing(minmaxstdscaler)
Problem.search_space(OneLayerSpace)
Problem.hyperparameters(
batch_size=100, learning_rate=0.1, optimizer="adam", num_epochs=10
)
Problem.loss("mse")
Problem.metrics(["r2"])
Problem.objective("val_r2")
# Just to print your problem, to test its definition and imports in the current python environment.
if __name__ == "__main__":
print(Problem)
| 680 | 23.321429 | 99 | py |
deephyper | deephyper-master/deephyper/test/nas/linearRegMultiInputsGen/load_data.py | from pprint import pformat
import numpy as np
import tensorflow as tf
def load_data(dim=10, size=100):
"""
Generate data for linear function -sum(x_i).
Return:
Tuple of Numpy arrays: ``(train_X, train_y), (valid_X, valid_y)``.
"""
rng = np.random.RandomState(42)
size = 1000
prop = 0.80
a, b = 0, 100
d = b - a
x = np.array([a + rng.random(dim) * d for i in range(size)], dtype=np.float64)
y = np.array([[np.sum(v)] for v in x], dtype=np.float64)
sep_index = int(prop * size)
sep_inputs = dim // 2 # we want two different inputs
tX0, tX1 = x[:sep_index, :sep_inputs], x[:sep_index, sep_inputs:]
vX0, vX1 = x[sep_index:, :sep_inputs], x[sep_index:, sep_inputs:]
ty = y[:sep_index]
vy = y[sep_index:]
def train_gen():
for x0, x1, y in zip(tX0, tX1, ty):
yield ({"input_0": x0, "input_1": x1}, y)
def valid_gen():
for x0, x1, y in zip(vX0, vX1, vy):
yield ({"input_0": x0, "input_1": x1}, y)
res = {
"train_gen": train_gen,
"train_size": len(ty),
"valid_gen": valid_gen,
"valid_size": len(vy),
"types": ({"input_0": tf.float64, "input_1": tf.float64}, tf.float64),
"shapes": ({"input_0": (5,), "input_1": (5,)}, (1,)),
}
print("load_data:\n", pformat(res))
return res
if __name__ == "__main__":
load_data()
| 1,408 | 26.096154 | 82 | py |
deephyper | deephyper-master/deephyper/test/nas/linearRegMultiInputsGen/__init__.py | from .problem import Problem # noqa: F401
| 43 | 21 | 42 | py |
deephyper | deephyper-master/deephyper/test/nas/linearReg/problem.py | from deephyper.nas.spacelib.tabular import OneLayerSpace
from deephyper.problem import NaProblem
from deephyper.test.nas.linearReg.load_data import load_data
Problem = NaProblem()
Problem.load_data(load_data)
Problem.search_space(OneLayerSpace)
Problem.hyperparameters(
batch_size=100, learning_rate=0.1, optimizer="adam", num_epochs=1
)
Problem.loss("mse")
Problem.metrics(["r2"])
Problem.objective("val_r2")
# Just to print your problem, to test its definition and imports in the current python environment.
if __name__ == "__main__":
print(Problem)
model = Problem.get_keras_model([1])
| 611 | 21.666667 | 99 | py |
deephyper | deephyper-master/deephyper/test/nas/linearReg/load_data.py | import numpy as np
def load_data(dim=10, verbose=0):
"""
Generate data for linear function -sum(x_i).
Return:
Tuple of Numpy arrays: ``(train_X, train_y), (valid_X, valid_y)``.
"""
rng = np.random.RandomState(42)
size = 10000
prop = 0.80
a, b = 0, 100
d = b - a
x = np.array([a + rng.random(dim) * d for i in range(size)])
y = np.array([[np.sum(v)] for v in x])
sep_index = int(prop * size)
train_X = x[:sep_index]
train_y = y[:sep_index]
valid_X = x[sep_index:]
valid_y = y[sep_index:]
if verbose:
print(f"train_X shape: {np.shape(train_X)}")
print(f"train_y shape: {np.shape(train_y)}")
print(f"valid_X shape: {np.shape(valid_X)}")
print(f"valid_y shape: {np.shape(valid_y)}")
return (train_X, train_y), (valid_X, valid_y)
if __name__ == "__main__":
load_data(verbose=1)
| 897 | 23.944444 | 74 | py |
deephyper | deephyper-master/deephyper/test/nas/linearReg/__init__.py | from .problem import Problem # noqa: F401
| 43 | 21 | 42 | py |
deephyper | deephyper-master/deephyper/test/nas/linearRegMultiInputs/problem.py | from deephyper.problem import NaProblem
from deephyper.test.nas.linearRegMultiInputs.load_data import load_data
from deephyper.nas.preprocessing import minmaxstdscaler
from deephyper.nas.spacelib.tabular import OneLayerSpace
Problem = NaProblem()
Problem.load_data(load_data)
Problem.preprocessing(minmaxstdscaler)
Problem.search_space(OneLayerSpace)
Problem.hyperparameters(
batch_size=100, learning_rate=0.1, optimizer="adam", num_epochs=10
)
Problem.loss("mse")
Problem.metrics(["r2"])
Problem.objective("val_r2")
# Just to print your problem, to test its definition and imports in the current python environment.
if __name__ == "__main__":
print(Problem)
| 678 | 22.413793 | 99 | py |
deephyper | deephyper-master/deephyper/test/nas/linearRegMultiInputs/load_data.py | import numpy as np
def load_data(dim=10, verbose=0):
"""
Generate data for linear function -sum(x_i).
Return:
Tuple of Numpy arrays: ``(train_X, train_y), (valid_X, valid_y)``.
"""
rng = np.random.RandomState(42)
size = 1000
prop = 0.80
a, b = 0, 100
d = b - a
x = np.array([a + rng.random(dim) * d for i in range(size)])
y = np.array([[np.sum(v)] for v in x])
sep_index = int(prop * size)
sep_inputs = dim // 2 # we want two different inputs
tX0, tX1 = x[:sep_index, :sep_inputs], x[:sep_index, sep_inputs:]
vX0, vX1 = x[sep_index:, :sep_inputs], x[sep_index:, sep_inputs:]
ty = y[:sep_index]
vy = y[sep_index:]
if verbose:
print(f"tX0 shape: {np.shape(tX0)} | tX1 shape: {np.shape(tX1)}")
print(f"ty shape: {np.shape(ty)}")
print(f"vX0 shape: {np.shape(vX0)} | vX1 shape: {np.shape(vX1)}")
print(f"vy shape: {np.shape(vy)}")
return ([tX0, tX1], ty), ([vX0, vX1], vy)
if __name__ == "__main__":
load_data(verbose=1)
| 1,047 | 26.578947 | 74 | py |
deephyper | deephyper-master/deephyper/test/nas/linearRegMultiInputs/__init__.py | from .problem import Problem # noqa: F401
| 43 | 21 | 42 | py |
deephyper | deephyper-master/deephyper/nas/_nx_search_space.py | import abc
import traceback
from collections.abc import Iterable
import networkx as nx
from deephyper.core.exceptions.nas.space import (
NodeAlreadyAdded,
StructureHasACycle,
WrongSequenceToSetOperations,
)
from deephyper.nas.node import MimeNode, Node, VariableNode
class NxSearchSpace(abc.ABC):
"""A NxSearchSpace is an search_space based on a networkx graph."""
def __init__(self, seed=None, **kwargs):
self.graph = nx.DiGraph()
self.seed = seed
self.output_node = None
def plot(self, path):
with open(path, "w") as f:
try:
nx.nx_agraph.write_dot(self.graph, f)
except Exception:
print("Error: can't create graphviz file...")
traceback.print_exc()
def __len__(self):
"""Number of VariableNodes in the current search_space.
Returns:
int: number of variable nodes in the current search_space.
"""
return len(self.nodes)
@property
def nodes(self):
"""Nodes of the current KSearchSpace.
Returns:
iterator: nodes of the current KSearchSpace.
"""
return list(self.graph.nodes)
def add_node(self, node):
"""Add a new node to the search_space.
Args:
node (Node): node to add to the search_space.
Raises:
TypeError: if 'node' is not an instance of Node.
NodeAlreadyAdded: if 'node' has already been added to the search_space.
"""
if not isinstance(node, Node):
raise TypeError("'node' argument should be an instance of Node!")
if node in self.nodes:
raise NodeAlreadyAdded(node)
self.graph.add_node(node)
def connect(self, node1, node2):
"""Create a new connection in the KSearchSpace graph.
The edge created corresponds to : node1 -> node2.
Args:
node1 (Node)
node2 (Node)
Raise:
StructureHasACycle: if the new edge is creating a cycle.
"""
assert isinstance(node1, Node)
assert isinstance(node2, Node)
self.graph.add_edge(node1, node2)
if not (nx.is_directed_acyclic_graph(self.graph)):
raise StructureHasACycle(
f"the connection between {node1} -> {node2} is creating a cycle in the search_space's graph."
)
@property
def size(self):
"""Size of the search space define by the search_space"""
s = 0
for n in filter(lambda n: isinstance(n, VariableNode), self.nodes):
if n.num_ops != 0:
if s == 0:
s = n.num_ops
else:
s *= n.num_ops
return s
@property
def max_num_ops(self):
"""Returns the maximum number of operations accross all VariableNodes of the struct.
Returns:
int: maximum number of Operations for a VariableNode in the current Structure.
"""
return max(map(lambda n: n.num_ops, self.variable_nodes))
@property
def num_nodes(self):
"""Returns the number of VariableNodes in the current Structure.
Returns:
int: number of VariableNodes in the current Structure.
"""
return len(list(self.variable_nodes))
@property
def variable_nodes(self):
"""Iterator of VariableNodes of the search_space.
Returns:
(Iterator(VariableNode)): generator of VariablesNodes of the search_space.
"""
return filter(lambda n: isinstance(n, VariableNode), self.nodes)
@property
def mime_nodes(self):
"""Iterator of MimeNodes of the search_space.
Returns:
(Iterator(MimeNode)): iterator of MimeNodes of the search_space.
"""
return filter(lambda n: isinstance(n, MimeNode), self.nodes)
def denormalize(self, indexes):
"""Denormalize a sequence of normalized indexes to get a sequence of absolute indexes. Useful when you want to compare the number of different search_spaces.
Args:
indexes (Iterable): a sequence of normalized indexes.
Returns:
list: A list of absolute indexes corresponding to operations choosen with relative indexes of `indexes`.
"""
assert isinstance(
indexes, Iterable
), 'Wrong argument, "indexes" should be of Iterable.'
if len(indexes) != self.num_nodes:
raise WrongSequenceToSetOperations(indexes, list(self.variable_nodes))
return [
vnode.denormalize(op_i) for op_i, vnode in zip(indexes, self.variable_nodes)
]
def set_output_node(self):
"""Set the output node of the search_space.
:meta private:
"""
if self.output_node is None:
nodes = list(self.graph.nodes())
self.output_node = []
for n in nodes:
if len(list(self.graph.successors(n))) == 0:
self.output_node.append(n)
if len(self.output_node) == 1:
self.output_node = self.output_node[0]
def create_tensor_aux(self, g, n, train=None):
"""Recursive function to create the tensors from the graph.
:meta private:
Args:
g (nx.DiGraph): a graph
n (nx.Node): a node
train (bool): True if the network is built for training, False if the network is built for validation/testing (for example False will deactivate Dropout).
Return:
the tensor represented by n.
"""
try:
if n._tensor is not None:
output_tensor = n._tensor
else:
pred = list(g.predecessors(n))
if len(pred) == 0:
output_tensor = n.create_tensor(train=train, seed=self.seed)
else:
tensor_list = list()
for s_i in pred:
tmp = self.create_tensor_aux(g, s_i, train=train)
if type(tmp) is list:
tensor_list.extend(tmp)
else:
tensor_list.append(tmp)
output_tensor = n.create_tensor(
tensor_list, train=train, seed=self.seed
)
return output_tensor
except TypeError:
raise RuntimeError(f"Failed to build tensors from :{n}")
@abc.abstractmethod
def choices(self):
"""Gives the possible choices for each decision variable of the search space.
Returns:
list: A list of tuple where each element corresponds to a discrete variable represented by ``(low, high)``.
"""
@abc.abstractmethod
def sample(self, choice=None):
"""Sample a ``tf.keras.Model`` from the search space.
Args:
choice (list, optional): A list of decision for the operations of this search space. Defaults to None, will generate a random sample.
Returns:
tf.keras.Model: A Tensorflow Keras model.
"""
@abc.abstractmethod
def build(self):
"""Build the current graph search space."""
| 7,301 | 30.747826 | 166 | py |
deephyper | deephyper-master/deephyper/nas/lr_scheduler.py | import tensorflow as tf
def exponential_decay(epoch, lr):
"""Keep the learning rate constant for the first 10 epochs. Then, decay the learning
rate exponentially."""
if epoch < 10:
return lr
else:
return lr * tf.math.exp(-0.1)
| 262 | 20.916667 | 88 | py |
deephyper | deephyper-master/deephyper/nas/losses.py | """This module provides different loss functions. A loss can be defined by a keyword (str) or a callable following the ``tensorflow.keras`` interface. If it is a keyword it has to be available in ``tensorflow.keras`` or in ``deephyper.losses``. The loss functions availble in ``deephyper.losses`` are:
* Negative Log Likelihood (compatible with Tensorflow Probability): ``tfp_negloglik`` or ``tfp_nll``
"""
from collections import OrderedDict
import tensorflow as tf
from deephyper.core.utils import load_attr
def tfp_negloglik(y, rv_y):
"""Negative log likelihood for Tensorflow probability."""
return -rv_y.log_prob(y)
losses_func = OrderedDict()
losses_func["tfp_negloglik"] = losses_func["tfp_nll"] = tfp_negloglik
losses_obj = OrderedDict()
def selectLoss(name: str):
"""Return the loss defined by name.
Args:
name (str): a string referenced in DeepHyper, one referenced in keras or an attribute name to import.
Returns:
str or callable: a string suppossing it is referenced in the keras framework or a callable taking (y_true, y_pred) as inputs and returning a tensor.
"""
if callable(name):
return name
if losses_func.get(name) is None and losses_obj.get(name) is None:
try:
loaded_obj = load_attr(name)
return loaded_obj
except Exception:
return tf.keras.losses.get(
name
) # supposing it is referenced in keras losses
else:
if name in losses_func:
return losses_func[name]
else:
return losses_obj[name]()
| 1,605 | 34.688889 | 301 | py |
deephyper | deephyper-master/deephyper/nas/node.py | """This module provides the available node types to build a ``KSearchSpace``.
"""
import tensorflow as tf
import deephyper.core.exceptions
from deephyper.nas.operation import Operation
class Node:
"""Represents a node of a ``KSearchSpace``.
Args:
name (str): node name.
"""
# Number of 'Node' instances created
num = 0
def __init__(self, name="", *args, **kwargs):
Node.num += 1
self._num = Node.num
self._tensor = None
self.name = name
def __str__(self):
return f"{self.name}[id={self._num}]"
@property
def id(self):
return self._num
@property
def op(self):
raise NotImplementedError
def create_tensor(self, *args, **kwargs):
raise NotImplementedError
@staticmethod
def verify_operation(op):
if isinstance(op, Operation):
return op
elif isinstance(op, tf.keras.layers.Layer):
return Operation(op)
else:
raise RuntimeError(
f"Can't add this operation '{op.__name__}'. An operation should be either of type Operation or tf.keras.layers.Layer when is of type: {type(op)}"
)
class OperationNode(Node):
def __init__(self, name="", *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
def create_tensor(self, inputs=None, train=True, seed=None, **kwargs):
if self._tensor is None:
if inputs is None:
try:
self._tensor = self.op(train=train, seed=None)
except TypeError:
raise RuntimeError(
f'Verify if node: "{self}" has incoming connexions!'
)
else:
self._tensor = self.op(inputs, train=train)
return self._tensor
class VariableNode(OperationNode):
"""This class represents a node of a graph where you have a set of possible operations. It means the agent will have to act to choose one of these operations.
>>> import tensorflow as tf
>>> from deephyper.nas.space.node import VariableNode
>>> vnode = VariableNode("VNode1")
>>> from deephyper.nas.space.op.op1d import Dense
>>> vnode.add_op(Dense(
... units=10,
... activation=tf.nn.relu))
>>> vnode.num_ops
1
>>> vnode.add_op(Dense(
... units=1000,
... activation=tf.nn.tanh))
>>> vnode.num_ops
2
>>> vnode.set_op(0)
>>> vnode.op.units
10
Args:
name (str): node name.
"""
def __init__(self, name=""):
super().__init__(name=name)
self._ops = list()
self._index = None
def __str__(self):
if self._index is not None:
return f"{super().__str__()}(Variable[{str(self.op)}])"
else:
return f"{super().__str__()}(Variable[?])"
def add_op(self, op):
self._ops.append(self.verify_operation(op))
@property
def num_ops(self):
return len(self._ops)
def set_op(self, index):
self.get_op(index).init(self)
def get_op(self, index):
assert "float" in str(type(index)) or "int" in str(
type(index)
), f"found type is : {type(index)}"
if "float" in str(type(index)):
self._index = self.denormalize(index)
else:
assert 0 <= index and index < len(
self._ops
), f"Number of possible operations is: {len(self._ops)}, but index given is: {index} (index starts from 0)!"
self._index = index
return self.op
def denormalize(self, index):
"""Denormalize a normalized index to get an absolute indexes. Useful when you want to compare the number of different search_spaces.
Args:
indexes (float|int): a normalized index.
Returns:
int: An absolute indexes corresponding to the operation choosen with the relative index of `index`.
"""
if type(index) is int:
return index
else:
assert 0.0 <= index and index <= 1.0
res = int(index * len(self._ops))
if index == 1.0:
res -= 1
return res
@property
def op(self):
if len(self._ops) == 0:
raise RuntimeError("This VariableNode doesn't have any operation yet.")
elif self._index is None:
raise RuntimeError(
'This VariableNode doesn\'t have any set operation, please use "set_op(index)" if you want to set one'
)
else:
return self._ops[self._index]
@property
def ops(self):
return self._ops
class ConstantNode(OperationNode):
"""A ConstantNode represents a node with a fixed operation. It means the agent will not make any new decision for this node. The common use case for this node is to add a tensor in the graph.
>>> import tensorflow as tf
>>> from deephyper.nas.space.node import ConstantNode
>>> from deephyper.nas.space.op.op1d import Dense
>>> cnode = ConstantNode(op=Dense(units=100, activation=tf.nn.relu), name='CNode1')
>>> cnode.op
Dense_100_relu
Args:
op (Operation, optional): operation to fix for this node. Defaults to None.
name (str, optional): node name. Defaults to ``''``.
"""
def __init__(self, op=None, name="", *args, **kwargs):
super().__init__(name=name)
if op is not None:
op = self.verify_operation(op)
op.init(self) # set operation
self._op = op
def set_op(self, op):
op = self.verify_operation(op)
op.init(self)
self._op = op
def __str__(self):
return f"{super().__str__()}(Constant[{str(self.op)}])"
@property
def op(self):
return self._op
class MirrorNode(OperationNode):
"""A MirrorNode is a node which reuse an other, it enable the reuse of tf.keras layers. This node will not add operations to choose.
Args:
node (Node): The targeted node to mirror.
>>> from deephyper.nas.space.node import VariableNode, MirrorNode
>>> from deephyper.nas.space.op.op1d import Dense
>>> vnode = VariableNode()
>>> vnode.add_op(Dense(10))
>>> vnode.add_op(Dense(20))
>>> mnode = MirrorNode(vnode)
>>> vnode.set_op(0)
>>> vnode.op
Dense_10
>>> mnode.op
Dense_10
"""
def __init__(self, node):
super().__init__(name=f"Mirror[{str(node)}]")
self._node = node
@property
def op(self):
return self._node.op
class MimeNode(OperationNode):
"""A MimeNode is a node which reuse an the choice made for an VariableNode, it enable the definition of a Cell based search_space. This node reuse the operation from the mimed VariableNode but only the choice made.
Args:
node (VariableNode): the VariableNode to mime.
>>> from deephyper.nas.space.node import VariableNode, MimeNode
>>> from deephyper.nas.space.op.op1d import Dense
>>> vnode = VariableNode()
>>> vnode.add_op(Dense(10))
>>> vnode.add_op(Dense(20))
>>> mnode = MimeNode(vnode)
>>> mnode.add_op(Dense(30))
>>> mnode.add_op(Dense(40))
>>> vnode.set_op(0)
>>> vnode.op
Dense_10
>>> mnode.op
Dense_30
"""
def __init__(self, node, name=""):
super().__init__(name=f"Mime[{name}][src={str(node)}]")
self.node = node
self._ops = list()
def add_op(self, op):
self._ops.append(self.verify_operation(op))
@property
def num_ops(self):
return len(self._ops)
def set_op(self):
if self.node._index is None:
raise deephyper.core.exceptions.DeephyperRuntimeError(
f"{str(self)} cannot be initialized because its source {str(self.node)} is not initialized!"
)
self._ops[self.node._index].init(self)
@property
def op(self):
if self.num_ops != self.node.num_ops:
raise deephyper.core.exceptions.DeephyperRuntimeError(
f"{str(self)} and {str(self.node)} should have the same number of opertions, when {str(self)} has {self.num_ops} and {str(self.node)} has {self.node.num_ops}!"
)
else:
return self._ops[self.node._index]
@property
def ops(self):
return self._ops
| 8,363 | 29.086331 | 218 | py |
deephyper | deephyper-master/deephyper/nas/_keras_search_space.py | import copy
import logging
import warnings
import networkx as nx
import numpy as np
import tensorflow as tf
from deephyper.core.exceptions.nas.space import (
InputShapeOfWrongType,
WrongSequenceToSetOperations,
)
from deephyper.nas._nx_search_space import NxSearchSpace
from deephyper.nas.node import ConstantNode
from deephyper.nas.operation import Tensor
from tensorflow import keras
from tensorflow.python.keras.utils.vis_utils import model_to_dot
logger = logging.getLogger(__name__)
class KSearchSpace(NxSearchSpace):
"""A KSearchSpace represents a search space of neural networks.
>>> import tensorflow as tf
>>> from deephyper.nas import KSearchSpace
>>> from deephyper.nas.node import ConstantNode, VariableNode
>>> from deephyper.nas.operation import operation, Identity
>>> Dense = operation(tf.keras.layers.Dense)
>>> Dropout = operation(tf.keras.layers.Dropout)
>>> class ExampleSpace(KSearchSpace):
... def build(self):
... # input nodes are automatically built based on `input_shape`
... input_node = self.input_nodes[0]
... # we want 4 layers maximum (Identity corresponds to not adding a layer)
... for i in range(4):
... node = VariableNode()
... self.connect(input_node, node)
... # we add 3 possible operations for each node
... node.add_op(Identity())
... node.add_op(Dense(100, "relu"))
... node.add_op(Dropout(0.2))
... input_node = node
... output = ConstantNode(op=Dense(self.output_shape[0]))
... self.connect(input_node, output)
... return self
...
>>>
>>> space = ExampleSpace(input_shape=(1,), output_shape=(1,)).build()
>>> space.sample().summary()
Args:
input_shape (list(tuple(int))): list of shapes of all inputs.
output_shape (tuple(int)): shape of output.
batch_size (list(tuple(int))): batch size of the input layer. If ``input_shape`` is defining a list of inputs, ``batch_size`` should also define a list of inputs.
Raises:
InputShapeOfWrongType: [description]
"""
def __init__(
self, input_shape, output_shape, batch_size=None, seed=None, *args, **kwargs
):
super().__init__()
self._random = np.random.RandomState(seed)
self.input_shape = input_shape
if type(input_shape) is tuple:
# we have only one input tensor here
op = Tensor(
keras.layers.Input(input_shape, name="input_0", batch_size=batch_size)
)
self.input_nodes = [ConstantNode(op=op, name="Input_0")]
elif type(input_shape) is list and all(
map(lambda x: type(x) is tuple, input_shape)
):
# we have a list of input tensors here
self.input_nodes = list()
for i in range(len(input_shape)):
batch_size = batch_size[i] if type(batch_size) is list else None
op = Tensor(
keras.layers.Input(
input_shape[i], name=f"input_{i}", batch_size=batch_size
)
)
inode = ConstantNode(op=op, name=f"Input_{i}")
self.input_nodes.append(inode)
else:
raise InputShapeOfWrongType(input_shape)
for node in self.input_nodes:
self.graph.add_node(node)
self.output_shape = output_shape
self.output_node = None
self._model = None
@property
def input(self):
return self.input_nodes
@property
def output(self):
return self.output_node
@property
def depth(self):
if self._model is None:
raise RuntimeError("Can't compute depth of model without creating a model.")
return len(self.longest_path)
@property
def longest_path(self):
if self._model is None:
raise RuntimeError(
"Can't compute longest path of model without creating a model."
)
nx_graph = nx.drawing.nx_pydot.from_pydot(model_to_dot(self._model))
return nx.algorithms.dag.dag_longest_path(nx_graph)
def set_ops(self, indexes):
"""Set the operations for each node of each cell of the search_space.
:meta private:
Args:
indexes (list): element of list can be float in [0, 1] or int.
Raises:
WrongSequenceToSetOperations: raised when 'indexes' is of a wrong length.
"""
if len(indexes) != len(list(self.variable_nodes)):
raise WrongSequenceToSetOperations(indexes, list(self.variable_nodes))
for op_i, node in zip(indexes, self.variable_nodes):
node.set_op(op_i)
for node in self.mime_nodes:
node.set_op()
self.set_output_node()
def create_model(self):
"""Create the tensors corresponding to the search_space.
:meta private:
Returns:
A keras.Model for the current search_space with the corresponding set of operations.
"""
# !the output layer does not have to be of the same shape as the data
# !this depends on the loss
if type(self.output_node) is list:
output_tensors = [
self.create_tensor_aux(self.graph, out) for out in self.output_node
]
for out_T in output_tensors:
output_n = int(out_T.name.split("/")[0].split("_")[-1])
out_S = self.output_shape[output_n]
if tf.keras.backend.is_keras_tensor(out_T):
out_T_shape = out_T.type_spec.shape
if out_T_shape[1:] != out_S:
warnings.warn(
f"The output tensor of shape {out_T_shape} doesn't match the expected shape {out_S}!",
RuntimeWarning,
)
input_tensors = [inode._tensor for inode in self.input_nodes]
self._model = keras.Model(inputs=input_tensors, outputs=output_tensors)
else:
output_tensors = self.create_tensor_aux(self.graph, self.output_node)
if tf.keras.backend.is_keras_tensor(output_tensors):
output_tensors_shape = output_tensors.type_spec.shape
if output_tensors_shape[1:] != self.output_shape:
warnings.warn(
f"The output tensor of shape {output_tensors_shape} doesn't match the expected shape {self.output_shape}!",
RuntimeWarning,
)
input_tensors = [inode._tensor for inode in self.input_nodes]
self._model = keras.Model(inputs=input_tensors, outputs=[output_tensors])
return self._model
def choices(self):
"""Gives the possible choices for each decision variable of the search space.
Returns:
list: A list of tuple where each element corresponds to a discrete variable represented by ``(low, high)``.
"""
return [(0, vnode.num_ops - 1) for vnode in self.variable_nodes]
def sample(self, choice=None):
"""Sample a ``tf.keras.Model`` from the search space.
Args:
choice (list, optional): A list of decision for the operations of this search space. Defaults to None, will generate a random sample.
Returns:
tf.keras.Model: A Tensorflow Keras model.
"""
if choice is None:
choice = [self._random.randint(c[0], c[1] + 1) for c in self.choices()]
self_copy = copy.deepcopy(self)
self_copy.set_ops(choice)
model = self_copy.create_model()
return model
| 7,837 | 34.789954 | 170 | py |
deephyper | deephyper-master/deephyper/nas/metrics.py | """This module provides different metric functions. A metric can be defined by a keyword (str) or a callable. If it is a keyword it has to be available in ``tensorflow.keras`` or in ``deephyper.netrics``. The loss functions availble in ``deephyper.metrics`` are:
* Sparse Perplexity: ``sparse_perplexity``
* R2: ``r2``
* AUC ROC: ``auroc``
* AUC Precision-Recall: ``aucpr``
"""
import functools
from collections import OrderedDict
import tensorflow as tf
from deephyper.core.utils import load_attr
def r2(y_true, y_pred):
SS_res = tf.math.reduce_sum(tf.math.square(y_true - y_pred), axis=0)
SS_tot = tf.math.reduce_sum(
tf.math.square(y_true - tf.math.reduce_mean(y_true, axis=0)), axis=0
)
output_scores = 1 - SS_res / (SS_tot + tf.keras.backend.epsilon())
r2 = tf.math.reduce_mean(output_scores)
return r2
def mae(y_true, y_pred):
return tf.keras.metrics.mean_absolute_error(y_true, y_pred)
def mse(y_true, y_pred):
return tf.keras.metrics.mean_squared_error(y_true, y_pred)
def rmse(y_true, y_pred):
return tf.math.sqrt(tf.math.reduce_mean(tf.math.square(y_pred - y_true)))
def acc(y_true, y_pred):
return tf.keras.metrics.categorical_accuracy(y_true, y_pred)
def sparse_perplexity(y_true, y_pred):
cross_entropy = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
perplexity = tf.pow(2.0, cross_entropy)
return perplexity
def to_tfp(metric_func):
"""Convert a regular tensorflow-keras metric for tensorflow probability where the output is a distribution.
Args:
metric_func (func): A regular tensorflow-keras metric function.
"""
@functools.wraps(metric_func)
def wrapper(y_true, y_pred):
return metric_func(y_true, y_pred.mean())
wrapper.__name__ = f"tfp_{metric_func.__name__}"
return wrapper
# convert some metrics for Tensorflow Probability where the output of the model is
# a distribution
tfp_r2 = to_tfp(r2)
tfp_mae = to_tfp(mae)
tfp_mse = to_tfp(mse)
tfp_rmse = to_tfp(rmse)
metrics_func = OrderedDict()
metrics_func["mean_absolute_error"] = metrics_func["mae"] = mae
metrics_func["r2"] = r2
metrics_func["mean_squared_error"] = metrics_func["mse"] = mse
metrics_func["root_mean_squared_error"] = metrics_func["rmse"] = rmse
metrics_func["accuracy"] = metrics_func["acc"] = acc
metrics_func["sparse_perplexity"] = sparse_perplexity
metrics_func["tfp_r2"] = tfp_r2
metrics_func["tfp_mse"] = tfp_mse
metrics_func["tfp_mae"] = tfp_mae
metrics_func["tfp_rmse"] = tfp_rmse
metrics_obj = OrderedDict()
metrics_obj["auroc"] = lambda: tf.keras.metrics.AUC(name="auroc", curve="ROC")
metrics_obj["aucpr"] = lambda: tf.keras.metrics.AUC(name="aucpr", curve="PR")
def selectMetric(name: str):
"""Return the metric defined by name.
Args:
name (str): a string referenced in DeepHyper, one referenced in keras or an attribute name to import.
Returns:
str or callable: a string suppossing it is referenced in the keras framework or a callable taking (y_true, y_pred) as inputs and returning a tensor.
"""
if callable(name):
return name
if metrics_func.get(name) is None and metrics_obj.get(name) is None:
try:
return load_attr(name)
except Exception:
return name # supposing it is referenced in keras metrics
else:
if name in metrics_func:
return metrics_func[name]
else:
return metrics_obj[name]()
| 3,474 | 31.175926 | 262 | py |
deephyper | deephyper-master/deephyper/nas/__init__.py | from ._nx_search_space import NxSearchSpace
from ._keras_search_space import KSearchSpace
__all__ = ["NxSearchSpace", "KSearchSpace"]
| 135 | 26.2 | 45 | py |
deephyper | deephyper-master/deephyper/nas/trainer/_utils.py | from collections import OrderedDict
import tensorflow as tf
optimizers_keras = OrderedDict()
optimizers_keras["sgd"] = tf.keras.optimizers.SGD
optimizers_keras["rmsprop"] = tf.keras.optimizers.RMSprop
optimizers_keras["adagrad"] = tf.keras.optimizers.Adagrad
optimizers_keras["adam"] = tf.keras.optimizers.Adam
optimizers_keras["adadelta"] = tf.keras.optimizers.Adadelta
optimizers_keras["adamax"] = tf.keras.optimizers.Adamax
optimizers_keras["nadam"] = tf.keras.optimizers.Nadam
def selectOptimizer_keras(name):
"""Return the optimizer defined by name."""
if optimizers_keras.get(name) is None:
raise RuntimeError('"{0}" is not a defined optimizer for keras.'.format(name))
else:
return optimizers_keras[name]
def check_data_config(data_dict):
gen_keys = ["train_gen", "train_size", "valid_gen", "valid_size", "types", "shapes"]
ndarray_keys = ["train_X", "train_Y", "valid_X", "valid_Y"]
if all([k in data_dict.keys() for k in gen_keys]):
return "gen"
elif all([k in data_dict.keys() for k in ndarray_keys]):
return "ndarray"
else:
raise RuntimeError("Wrong data config...")
| 1,156 | 35.15625 | 88 | py |
deephyper | deephyper-master/deephyper/nas/trainer/_arch.py | # definition of a key
layer_type = "layer_type"
features = "features"
input_shape = "input_shape"
output_shape = "output_shape"
num_outputs = "num_outputs"
num_steps = "num_steps"
max_layers = "max_layers"
min_layers = "min_layers"
hyperparameters = "hyperparameters"
summary = "summary"
logs = "logs"
data = "data"
regression = "regression"
num_features = "num_features"
state_space = "state_space"
model_path = "model_path"
# hyperparameters
optimizer = "optimizer"
batch_size = "batch_size"
learning_rate = "learning_rate"
num_epochs = "num_epochs"
patience = "patience"
eval_freq = "eval_freq"
loss_metric = "loss"
metrics = "metrics"
test_metric = "test_metric"
text_input = "text_input"
objective = "objective"
callbacks = "callbacks"
shuffle_data = "shuffle_data"
cache_data = "cache_data"
# data
train_X = "train_X"
train_Y = "train_Y"
valid_X = "valid_X"
valid_Y = "valid_Y"
train_set = "train_set"
test_X = "test_X"
test_Y = "test_Y"
vocabulary = "vocabulary"
| 973 | 20.644444 | 35 | py |
deephyper | deephyper-master/deephyper/nas/trainer/_horovod.py | import logging
import time
from inspect import signature
import deephyper.nas.trainer._arch as a
import deephyper.nas.trainer._utils as U
import horovod.tensorflow.keras as hvd
import numpy as np
import tensorflow as tf
from deephyper.core.exceptions import DeephyperRuntimeError
from deephyper.nas.losses import selectLoss
from deephyper.nas.metrics import selectMetric
logger = logging.getLogger(__name__)
AUTOTUNE = tf.data.experimental.AUTOTUNE
class HorovodTrainer:
def __init__(self, config, model):
self.cname = self.__class__.__name__
self.config = config
self.model = model
self.callbacks = []
self.data = self.config[a.data]
# hyperparameters
self.config_hp = self.config[a.hyperparameters]
self.optimizer_name = self.config_hp.get(a.optimizer, "adam")
self.optimizer_eps = self.config_hp.get("epsilon", None)
self.batch_size = self.config_hp.get(a.batch_size, 32)
self.clipvalue = self.config_hp.get("clipvalue", None)
self.learning_rate = self.config_hp.get(a.learning_rate, 1e-3)
# augmentation strategy
if not self.config.get("augment", None) is None:
if not self.config["augment"].get("kwargs", None) is None:
self.augment_func = lambda inputs, outputs: self.config["augment"][
"func"
](inputs, outputs, **self.config["augment"]["kwargs"])
else:
self.augment_func = self.config["augment"]["func"]
self.num_epochs = self.config_hp[a.num_epochs]
self.verbose = (
self.config_hp.get("verbose", 1)
if self.config_hp.get("verbose", 1) and hvd.rank() == 0
else 0
)
self.setup_losses_and_metrics()
# DATA loading
self.data_config_type = None
self.train_size = None
self.valid_size = None
self.train_steps_per_epoch = None
self.valid_steps_per_epoch = None
self.load_data()
# DATA preprocessing
self.preprocessing_func = None
if self.config.get("preprocessing"):
self.preprocessing_func = self.config["preprocessing"]["func"]
self.preprocessor = None
self.preprocess_data()
# Dataset
self.dataset_train = None
self.set_dataset_train()
self.dataset_valid = None
self.set_dataset_valid()
self.model_compile()
self.train_history = None
self.init_history()
# Test on validation after each epoch
if self.verbose == 1:
logger.info("KerasTrainer instantiated")
model.summary(print_fn=logger.info)
def init_history(self):
self.train_history = dict()
self.train_history["n_parameters"] = self.model.count_params()
def setup_losses_and_metrics(self):
def selectL(loss):
if type(loss) is dict:
loss = {k: selectLoss(v) for k, v in loss.items()}
else:
loss = selectLoss(loss)
return loss
self.loss_metrics = selectL(self.config[a.loss_metric])
self.loss_weights = self.config.get("loss_weights")
self.class_weights = self.config.get("class_weights")
if self.loss_weights is None and type(self.loss_metrics) is dict:
self.loss_weights = [1.0 for _ in range(len(self.loss_metrics))]
if type(self.config[a.metrics]) is list:
self.metrics_name = [selectMetric(m) for m in self.config[a.metrics]]
else:
def selectM(metric):
if type(metric) is list:
return [selectMetric(m_i) for m_i in metric]
else:
return selectMetric(metric)
self.metrics_name = {
n: selectM(m) for n, m in self.config[a.metrics].items()
}
def load_data(self):
logger.debug("load_data")
self.data_config_type = U.check_data_config(self.data)
logger.debug(f"data config type: {self.data_config_type}")
if self.data_config_type == "gen":
self.load_data_generator()
elif self.data_config_type == "ndarray":
self.load_data_ndarray()
else:
raise DeephyperRuntimeError(
f"Data config is not supported by this Trainer: '{self.data_config_type}'!"
)
# prepare number of steps for training and validation
self.train_steps_per_epoch = self.train_size // self.batch_size
if self.train_steps_per_epoch * self.batch_size < self.train_size:
self.train_steps_per_epoch += 1
self.valid_steps_per_epoch = self.valid_size // self.batch_size
if self.valid_steps_per_epoch * self.batch_size < self.valid_size:
self.valid_steps_per_epoch += 1
self.train_steps_per_epoch //= hvd.size()
self.valid_steps_per_epoch //= hvd.size()
def load_data_generator(self):
self.train_gen = self.data["train_gen"]
self.valid_gen = self.data["valid_gen"]
self.data_types = self.data["types"]
self.data_shapes = self.data["shapes"]
self.train_size = self.data["train_size"]
self.valid_size = self.data["valid_size"]
def load_data_ndarray(self):
def f(x):
return type(x) is np.ndarray
# check data type
# Output data
if (
type(self.config[a.data][a.train_Y]) is np.ndarray
and type(self.config[a.data][a.valid_Y]) is np.ndarray
):
self.train_Y = self.config[a.data][a.train_Y]
self.valid_Y = self.config[a.data][a.valid_Y]
elif (
type(self.config[a.data][a.train_Y]) is list
and type(self.config[a.data][a.valid_Y]) is list
):
if not all(map(f, self.config[a.data][a.train_Y])) or not all(
map(f, self.config[a.data][a.valid_Y])
):
raise DeephyperRuntimeError(
"all outputs data should be of type np.ndarray !"
)
if (
len(self.config[a.data][a.train_Y]) > 1
and len(self.config[a.data][a.valid_Y]) > 1
):
self.train_Y = self.config[a.data][a.train_Y]
self.valid_Y = self.config[a.data][a.valid_Y]
else:
self.train_Y = self.config[a.data][a.train_Y][0]
self.valid_Y = self.config[a.data][a.valid_Y][0]
else:
raise DeephyperRuntimeError(
f"Data are of an unsupported type and should be of same type: type(self.config['data']['train_Y'])=={type(self.config[a.data][a.train_Y])} and type(self.config['data']['valid_Y'])=={type(self.config[a.valid_Y][a.valid_X])} !"
)
# Input data
if (
type(self.config[a.data][a.train_X]) is np.ndarray
and type(self.config[a.data][a.valid_X]) is np.ndarray
):
self.train_X = [self.config[a.data][a.train_X]]
self.valid_X = [self.config[a.data][a.valid_X]]
elif (
type(self.config[a.data][a.train_X]) is list
and type(self.config[a.data][a.valid_X]) is list
):
if not all(map(f, self.config[a.data][a.train_X])) or not all(
map(f, self.config[a.data][a.valid_X])
):
raise DeephyperRuntimeError(
"all inputs data should be of type np.ndarray !"
)
if (
len(self.config[a.data][a.train_X]) > 1
and len(self.config[a.data][a.valid_X]) > 1
):
self.train_X = self.config[a.data][a.train_X]
self.valid_X = self.config[a.data][a.valid_X]
else:
self.train_X = self.config[a.data][a.train_X][0]
self.valid_X = self.config[a.data][a.valid_X][0]
else:
raise DeephyperRuntimeError(
f"Data are of an unsupported type and should be of same type: type(self.config['data']['train_X'])=={type(self.config[a.data][a.train_X])} and type(self.config['data']['valid_X'])=={type(self.config[a.data][a.valid_X])} !"
)
# check data length
self.train_size = np.shape(self.train_X[0])[0]
if not all(map(lambda x: np.shape(x)[0] == self.train_size, self.train_X)):
raise DeephyperRuntimeError(
"All training inputs data should have same length!"
)
self.valid_size = np.shape(self.valid_X[0])[0]
if not all(map(lambda x: np.shape(x)[0] == self.valid_size, self.valid_X)):
raise DeephyperRuntimeError(
"All validation inputs data should have same length!"
)
def preprocess_data(self):
if self.data_config_type == "gen":
return
if self.preprocessor is not None:
raise DeephyperRuntimeError("You can only preprocess data one time.")
if self.preprocessing_func:
logger.debug(f"preprocess_data with: {str(self.preprocessing_func)}")
if len(np.shape(self.train_Y)) == 2:
data_train = np.concatenate((*self.train_X, self.train_Y), axis=1)
data_valid = np.concatenate((*self.valid_X, self.valid_Y), axis=1)
data = np.concatenate((data_train, data_valid), axis=0)
self.preprocessor = self.preprocessing_func()
dt_shp = np.shape(data_train)
tX_shp = [np.shape(x) for x in self.train_X]
preproc_data = self.preprocessor.fit_transform(data)
acc, self.train_X = 0, list()
for shp in tX_shp:
self.train_X.append(preproc_data[: dt_shp[0], acc : acc + shp[1]])
acc += shp[1]
self.train_Y = preproc_data[: dt_shp[0], acc:]
acc, self.valid_X = 0, list()
for shp in tX_shp:
self.valid_X.append(preproc_data[dt_shp[0] :, acc : acc + shp[1]])
acc += shp[1]
self.valid_Y = preproc_data[dt_shp[0] :, acc:]
else:
logger.info("no preprocessing function")
def set_dataset_train(self):
if self.data_config_type == "ndarray":
if type(self.train_Y) is list:
output_mapping = {
f"output_{i}": tY for i, tY in enumerate(self.train_Y)
}
else:
output_mapping = self.train_Y
self.dataset_train = tf.data.Dataset.from_tensor_slices(
(
{f"input_{i}": tX for i, tX in enumerate(self.train_X)},
output_mapping,
)
)
else: # self.data_config_type == "gen"
self.dataset_train = tf.data.Dataset.from_generator(
self.train_gen,
output_types=self.data_types,
output_shapes=(
{
f"input_{i}": tf.TensorShape(
[*self.data_shapes[0][f"input_{i}"]]
)
for i in range(len(self.data_shapes[0]))
},
tf.TensorShape([*self.data_shapes[1]]),
),
)
self.dataset_train = self.dataset_train.shard(
num_shards=hvd.size(), index=hvd.rank()
)
self.dataset_train = self.dataset_train.shuffle(
self.train_size // hvd.size(), reshuffle_each_iteration=True
)
self.dataset_train = self.dataset_train.repeat(self.num_epochs)
if hasattr(self, "augment_func"):
logger.info("Data augmentation set.")
self.dataset_train = self.dataset_train.map(
self.augment_func, num_parallel_calls=AUTOTUNE
)
self.dataset_train = self.dataset_train.batch(self.batch_size)
self.dataset_train = self.dataset_train.prefetch(AUTOTUNE)
# self.dataset_train = self.dataset_train.repeat()
def set_dataset_valid(self):
if self.data_config_type == "ndarray":
if type(self.valid_Y) is list:
output_mapping = {
f"output_{i}": vY for i, vY in enumerate(self.valid_Y)
}
else:
output_mapping = self.valid_Y
self.dataset_valid = tf.data.Dataset.from_tensor_slices(
(
{f"input_{i}": vX for i, vX in enumerate(self.valid_X)},
output_mapping,
)
)
else:
self.dataset_valid = tf.data.Dataset.from_generator(
self.valid_gen,
output_types=self.data_types,
output_shapes=(
{
f"input_{i}": tf.TensorShape(
[*self.data_shapes[0][f"input_{i}"]]
)
for i in range(len(self.data_shapes[0]))
},
tf.TensorShape([*self.data_shapes[1]]),
),
)
self.dataset_valid = self.dataset_valid.batch(self.batch_size).repeat()
def model_compile(self):
optimizer_fn = U.selectOptimizer_keras(self.optimizer_name)
opti_parameters = signature(optimizer_fn).parameters
params = {}
# "lr" and "learning_rate" is checked depending if Keras or Tensorflow optimizer is used
if "lr" in opti_parameters:
params["lr"] = self.learning_rate
elif "learning_rate" in opti_parameters:
params["learning_rate"] = self.learning_rate
else:
raise DeephyperRuntimeError(
f"The learning_rate parameter is not found amoung optimiser arguments: {opti_parameters}"
)
if "epsilon" in opti_parameters:
params["epsilon"] = self.optimizer_eps
if self.clipvalue is not None:
params["clipvalue"] = self.clipvalue
self.optimizer = hvd.DistributedOptimizer(optimizer_fn(**params))
if type(self.loss_metrics) is dict:
self.model.compile(
optimizer=self.optimizer,
loss=self.loss_metrics,
loss_weights=self.loss_weights,
metrics=self.metrics_name,
)
else:
self.model.compile(
optimizer=self.optimizer,
loss=self.loss_metrics,
metrics=self.metrics_name,
)
def predict(self, dataset: str = "valid", keep_normalize: bool = False) -> tuple:
"""[summary]
Args:
dataset (str, optional): 'valid' or 'train'. Defaults to 'valid'.
keep_normalize (bool, optional): if False then the preprocessing will be reversed after prediction. if True nothing will be reversed. Defaults to False.
Raises:
DeephyperRuntimeError: [description]
Returns:
tuple: (y_true, y_pred)
"""
if not (dataset == "valid" or dataset == "train"):
raise DeephyperRuntimeError(
"dataset parameter should be equal to: 'valid' or 'train'"
)
if dataset == "valid":
valid_steps = self.valid_size // self.batch_size
if valid_steps * self.batch_size < self.valid_size:
valid_steps += 1
y_pred = self.model.predict(self.dataset_valid, steps=valid_steps)
else: # dataset == 'train'
y_pred = self.model.predict(
self.dataset_train, steps=self.train_steps_per_epoch
)
if (
self.preprocessing_func
and not keep_normalize
and not self.data_config_type == "gen"
):
if dataset == "valid":
data_X, data_Y = self.valid_X, self.valid_Y
else: # dataset == 'train'
data_X, data_Y = self.train_X, self.train_Y
val_pred = np.concatenate((*data_X, y_pred), axis=1)
val_orig = np.concatenate((*data_X, data_Y), axis=1)
val_pred_trans = self.preprocessor.inverse_transform(val_pred)
val_orig_trans = self.preprocessor.inverse_transform(val_orig)
y_orig = val_orig_trans[:, -np.shape(data_Y)[1] :]
y_pred = val_pred_trans[:, -np.shape(data_Y)[1] :]
else:
if self.data_config_type == "ndarray":
y_orig = self.valid_Y if dataset == "valid" else self.train_Y
else:
gen = self.valid_gen() if dataset == "valid" else self.train_gen()
y_orig = np.array([e[-1] for e in gen])
return y_orig, y_pred
def evaluate(self, dataset="train"):
"""Evaluate the performance of your model for the same configuration.
Args:
dataset (str, optional): must be "train" or "valid". If "train" then metrics will be evaluated on the training dataset. If "valid" then metrics will be evaluated on the "validation" dataset. Defaults to 'train'.
Returns:
list: a list of scalar values corresponding do config loss & metrics.
"""
if dataset == "train":
return self.model.evaluate(
self.dataset_train, steps=self.train_steps_per_epoch
)
else:
return self.model.evaluate(
self.dataset_valid, steps=self.valid_steps_per_epoch
)
def train(
self, num_epochs: int = None, with_pred: bool = False, last_only: bool = False
):
"""Train the model.
Args:
num_epochs (int, optional): override the num_epochs passed to init the Trainer. Defaults to None, will use the num_epochs passed to init the Trainer.
with_pred (bool, optional): will compute a prediction after the training and will add ('y_true', 'y_pred') to the output history. Defaults to False, will skip it (use it to save compute time).
last_only (bool, optional): will compute metrics after the last epoch only. Defaults to False, will compute metrics after each training epoch (use it to save compute time).
Raises:
DeephyperRuntimeError: raised when the ``num_epochs < 0``.
Returns:
dict: a dictionnary corresponding to the training.
"""
num_epochs = self.num_epochs if num_epochs is None else num_epochs
self.init_history()
if num_epochs > 0:
time_start_training = time.time() # TIMING
if not last_only:
logger.info(
"Trainer is computing metrics on validation after each training epoch."
)
history = self.model.fit(
self.dataset_train,
verbose=self.verbose,
epochs=num_epochs,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
validation_data=self.dataset_valid,
validation_steps=self.valid_steps_per_epoch,
)
else:
logger.info(
"Trainer is computing metrics on validation after the last training epoch."
)
if num_epochs > 1:
self.model.fit(
self.dataset_train,
verbose=self.verbose,
epochs=num_epochs - 1,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
)
history = self.model.fit(
self.dataset_train,
epochs=1,
verbose=self.verbose,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
validation_data=self.dataset_valid,
validation_steps=self.valid_steps_per_epoch,
)
time_end_training = time.time() # TIMING
self.train_history["training_time"] = (
time_end_training - time_start_training
)
self.train_history.update(history.history)
elif num_epochs < 0:
raise DeephyperRuntimeError(
f"Trainer: number of epochs should be >= 0: {num_epochs}"
)
if with_pred:
time_start_predict = time.time()
y_true, y_pred = self.predict(dataset="valid")
time_end_predict = time.time()
self.train_history["val_predict_time"] = (
time_end_predict - time_start_predict
)
self.train_history["y_true"] = y_true
self.train_history["y_pred"] = y_pred
return self.train_history
| 21,070 | 37.733456 | 241 | py |
deephyper | deephyper-master/deephyper/nas/trainer/_base.py | import inspect
import logging
import time
from inspect import signature
import deephyper.nas.trainer._arch as a
import deephyper.nas.trainer._utils as U
import numpy as np
import tensorflow as tf
from deephyper.core.exceptions import DeephyperRuntimeError
from deephyper.nas.losses import selectLoss
from deephyper.nas.metrics import selectMetric
logger = logging.getLogger(__name__)
class BaseTrainer:
def __init__(self, config, model):
self.cname = self.__class__.__name__
self.config = config
self.model = model
self.callbacks = []
self.data = self.config[a.data]
self.config_hp = self.config[a.hyperparameters]
self.optimizer_name = self.config_hp.get(a.optimizer, "adam")
self.optimizer_eps = self.config_hp.get("epsilon", None)
self.batch_size = self.config_hp.get(a.batch_size, 32)
self.learning_rate = self.config_hp.get(a.learning_rate, 1e-3)
self.num_epochs = self.config_hp.get(a.num_epochs, 1)
self.shuffle_data = self.config_hp.get(a.shuffle_data, True)
self.cache_data = self.config_hp.get(a.cache_data, True)
self.batch = self.config_hp.get("batch", True)
self.momentum = self.config_hp.get("momentum", 0.0)
self.nesterov = self.config_hp.get("nesterov", False)
self.label_smoothing = self.config_hp.get("label_smoothing", 0.0)
self.verbose = self.config_hp.get("verbose", 1)
# self.balanced = self.config_hp.get("balanced", False)
self.setup_losses_and_metrics()
# DATA loading
self.data_config_type = None
self.train_size = None
self.valid_size = None
self.train_steps_per_epoch = None
self.valid_steps_per_epoch = None
self.load_data()
# DATA preprocessing
self.preprocessing_func = None
if self.config.get("preprocessing"):
self.preprocessing_func = self.config["preprocessing"]["func"]
self.preprocessor = None
self.preprocess_data()
# Dataset
self.dataset_train = None
self.set_dataset_train()
self.dataset_valid = None
self.set_dataset_valid()
self.model_compile()
self.train_history = None
self.init_history()
# Test on validation after each epoch
if self.verbose == 1:
logger.info("KerasTrainer instantiated")
model.summary(print_fn=logger.info)
def init_history(self):
self.train_history = dict()
self.train_history["n_parameters"] = self.model.count_params()
def _select_loss(self, loss):
if type(loss) is dict:
loss = {k: selectLoss(v) for k, v in loss.items()}
else:
loss = selectLoss(loss)
if inspect.isclass(loss):
loss_parameters = signature(loss).parameters
params = {}
if "label_smoothing" in loss_parameters:
params["label_smoothing"] = self.label_smoothing
loss = loss(**params)
return loss
def setup_losses_and_metrics(self):
self.loss_metrics = self._select_loss(self.config[a.loss_metric])
self.loss_weights = self.config.get("loss_weights")
self.class_weights = self.config.get("class_weights")
if self.loss_weights is None and type(self.loss_metrics) is dict:
self.loss_weights = [1.0 for _ in range(len(self.loss_metrics))]
if type(self.config[a.metrics]) is list:
self.metrics_name = [selectMetric(m) for m in self.config[a.metrics]]
else:
def selectM(metric):
if type(metric) is list:
return [selectMetric(m_i) for m_i in metric]
else:
return selectMetric(metric)
self.metrics_name = {
n: selectM(m) for n, m in self.config[a.metrics].items()
}
def load_data(self):
logger.debug("load_data")
self.data_config_type = U.check_data_config(self.data)
logger.debug(f"data config type: {self.data_config_type}")
if self.data_config_type == "gen":
self.load_data_generator()
elif self.data_config_type == "ndarray":
self.load_data_ndarray()
else:
raise DeephyperRuntimeError(
f"Data config is not supported by this Trainer: '{self.data_config_type}'!"
)
# prepare number of steps for training and validation
self.train_steps_per_epoch = self.train_size // self.batch_size
if self.train_steps_per_epoch * self.batch_size < self.train_size:
self.train_steps_per_epoch += 1
self.valid_steps_per_epoch = self.valid_size // self.batch_size
if self.valid_steps_per_epoch * self.batch_size < self.valid_size:
self.valid_steps_per_epoch += 1
def load_data_generator(self):
self.train_gen = self.data["train_gen"]
self.valid_gen = self.data["valid_gen"]
self.data_types = self.data["types"]
self.data_shapes = self.data["shapes"]
self.train_size = self.data["train_size"]
self.valid_size = self.data["valid_size"]
def load_data_ndarray(self):
def f(x):
return type(x) is np.ndarray
# check data type
# Output data
if (
type(self.config[a.data][a.train_Y]) is np.ndarray
and type(self.config[a.data][a.valid_Y]) is np.ndarray
):
self.train_Y = self.config[a.data][a.train_Y]
self.valid_Y = self.config[a.data][a.valid_Y]
elif (
type(self.config[a.data][a.train_Y]) is list
and type(self.config[a.data][a.valid_Y]) is list
):
if not all(map(f, self.config[a.data][a.train_Y])) or not all(
map(f, self.config[a.data][a.valid_Y])
):
raise DeephyperRuntimeError(
"all outputs data should be of type np.ndarray !"
)
if (
len(self.config[a.data][a.train_Y]) > 1
and len(self.config[a.data][a.valid_Y]) > 1
):
self.train_Y = self.config[a.data][a.train_Y]
self.valid_Y = self.config[a.data][a.valid_Y]
else:
self.train_Y = self.config[a.data][a.train_Y][0]
self.valid_Y = self.config[a.data][a.valid_Y][0]
else:
raise DeephyperRuntimeError(
f"Data are of an unsupported type and should be of same type: type(self.config['data']['train_Y'])=={type(self.config[a.data][a.train_Y])} and type(self.config['data']['valid_Y'])=={type(self.config[a.valid_Y][a.valid_X])} !"
)
# Input data
if (
type(self.config[a.data][a.train_X]) is np.ndarray
and type(self.config[a.data][a.valid_X]) is np.ndarray
):
self.train_X = [self.config[a.data][a.train_X]]
self.valid_X = [self.config[a.data][a.valid_X]]
elif (
type(self.config[a.data][a.train_X]) is list
and type(self.config[a.data][a.valid_X]) is list
):
if not all(map(f, self.config[a.data][a.train_X])) or not all(
map(f, self.config[a.data][a.valid_X])
):
raise DeephyperRuntimeError(
"all inputs data should be of type np.ndarray !"
)
if (
len(self.config[a.data][a.train_X]) > 1
and len(self.config[a.data][a.valid_X]) > 1
):
self.train_X = self.config[a.data][a.train_X]
self.valid_X = self.config[a.data][a.valid_X]
else:
self.train_X = self.config[a.data][a.train_X][0]
self.valid_X = self.config[a.data][a.valid_X][0]
else:
raise DeephyperRuntimeError(
f"Data are of an unsupported type and should be of same type: type(self.config['data']['train_X'])=={type(self.config[a.data][a.train_X])} and type(self.config['data']['valid_X'])=={type(self.config[a.data][a.valid_X])} !"
)
logger.debug(f"{self.cname}: {len(self.train_X)} inputs")
# check data length
self.train_size = np.shape(self.train_X[0])[0]
if not all(map(lambda x: np.shape(x)[0] == self.train_size, self.train_X)):
raise DeephyperRuntimeError(
"All training inputs data should have same length!"
)
self.valid_size = np.shape(self.valid_X[0])[0]
if not all(map(lambda x: np.shape(x)[0] == self.valid_size, self.valid_X)):
raise DeephyperRuntimeError(
"All validation inputs data should have same length!"
)
def preprocess_data(self):
logger.debug("Starting preprocess of data")
if self.data_config_type == "gen":
logger.warn("Cannot preprocess data with generator!")
return
if self.preprocessor is not None:
raise DeephyperRuntimeError("You can only preprocess data one time.")
if self.preprocessing_func:
logger.debug(f"preprocess_data with: {str(self.preprocessing_func)}")
if all(
[
len(np.shape(tX)) == len(np.shape(self.train_Y))
for tX in self.train_X
]
):
data_train = np.concatenate((*self.train_X, self.train_Y), axis=-1)
data_valid = np.concatenate((*self.valid_X, self.valid_Y), axis=-1)
self.preprocessor = self.preprocessing_func()
tX_shp = [np.shape(x) for x in self.train_X]
preproc_data_train = self.preprocessor.fit_transform(data_train)
preproc_data_valid = self.preprocessor.transform(data_valid)
acc, self.train_X = 0, list()
for shp in tX_shp:
self.train_X.append(preproc_data_train[..., acc : acc + shp[1]])
acc += shp[1]
self.train_Y = preproc_data_train[..., acc:]
acc, self.valid_X = 0, list()
for shp in tX_shp:
self.valid_X.append(preproc_data_valid[..., acc : acc + shp[1]])
acc += shp[1]
self.valid_Y = preproc_data_valid[..., acc:]
else:
logger.warn(
f"Skipped preprocess because shape {np.shape(self.train_Y)} is not handled!"
)
else:
logger.info("Skipped preprocess of data because no function is defined!")
def set_dataset_train(self):
if self.data_config_type == "ndarray":
if type(self.train_Y) is list:
output_mapping = {
f"output_{i}": tY for i, tY in enumerate(self.train_Y)
}
else:
output_mapping = self.train_Y
self.dataset_train = tf.data.Dataset.from_tensor_slices(
(
{f"input_{i}": tX for i, tX in enumerate(self.train_X)},
output_mapping,
)
)
else: # self.data_config_type == "gen"
self.dataset_train = tf.data.Dataset.from_generator(
self.train_gen,
output_signature=self._get_output_signatures(),
)
if self.cache_data:
self.dataset_train = self.dataset_train.cache()
if self.shuffle_data:
self.dataset_train = self.dataset_train.shuffle(
self.train_size, reshuffle_each_iteration=True
)
if self.batch:
self.dataset_train = self.dataset_train.batch(self.batch_size)
self.dataset_train = self.dataset_train.prefetch(tf.data.AUTOTUNE).repeat(
self.num_epochs
)
def set_dataset_valid(self):
if self.data_config_type == "ndarray":
if type(self.valid_Y) is list:
output_mapping = {
f"output_{i}": vY for i, vY in enumerate(self.valid_Y)
}
else:
output_mapping = self.valid_Y
self.dataset_valid = tf.data.Dataset.from_tensor_slices(
(
{f"input_{i}": vX for i, vX in enumerate(self.valid_X)},
output_mapping,
)
)
else:
self.dataset_valid = tf.data.Dataset.from_generator(
self.valid_gen,
output_signature=self._get_output_signatures(valid=True),
)
self.dataset_valid = self.dataset_valid.cache()
self.dataset_valid = self.dataset_valid.batch(self.batch_size)
self.dataset_valid = self.dataset_valid.prefetch(tf.data.AUTOTUNE).repeat(
self.num_epochs
)
def _get_output_signatures(self, valid=False):
if self.batch or valid:
return (
{
f"input_{i}": tf.TensorSpec(
shape=(*self.data_shapes[0][f"input_{i}"],),
dtype=self.data_types[0][f"input_{i}"],
)
for i in range(len(self.data_shapes[0]))
},
tf.TensorSpec(
shape=(*self.data_shapes[1],),
dtype=self.data_types[1],
),
)
else:
return (
{
f"input_{i}": tf.TensorSpec(
shape=(
None,
*self.data_shapes[0][f"input_{i}"],
),
dtype=self.data_types[0][f"input_{i}"],
)
for i in range(len(self.data_shapes[0]))
},
tf.TensorSpec(
shape=(None, *self.data_shapes[1]),
dtype=self.data_types[1],
),
)
def _setup_optimizer(self):
optimizer_fn = U.selectOptimizer_keras(self.optimizer_name)
opti_parameters = signature(optimizer_fn).parameters
params = {}
if "lr" in opti_parameters:
params["lr"] = self.learning_rate
elif "learning_rate" in opti_parameters:
params["learning_rate"] = self.learning_rate
else:
raise DeephyperRuntimeError(
f"The learning_rate parameter is not found amoung optimiser arguments: {opti_parameters}"
)
if "epsilon" in opti_parameters:
params["epsilon"] = self.optimizer_eps
if "momentum" in opti_parameters:
params["momentum"] = self.momentum
if "nesterov" in opti_parameters:
params["nesterov"] = self.nesterov
self.optimizer = optimizer_fn(**params)
def model_compile(self):
self._setup_optimizer()
if type(self.loss_metrics) is dict:
self.model.compile(
optimizer=self.optimizer,
loss=self.loss_metrics,
loss_weights=self.loss_weights,
metrics=self.metrics_name,
)
else:
self.model.compile(
optimizer=self.optimizer,
loss=self.loss_metrics,
metrics=self.metrics_name,
)
def predict(self, dataset: str = "valid", keep_normalize: bool = False) -> tuple:
"""[summary]
Args:
dataset (str, optional): 'valid' or 'train'. Defaults to 'valid'.
keep_normalize (bool, optional): if False then the preprocessing will be reversed after prediction. if True nothing will be reversed. Defaults to False.
Raises:
DeephyperRuntimeError: [description]
Returns:
tuple: (y_true, y_pred)
"""
if not (dataset == "valid" or dataset == "train"):
raise DeephyperRuntimeError(
"dataset parameter should be equal to: 'valid' or 'train'"
)
if dataset == "valid":
y_pred = self.model.predict(
self.dataset_valid, steps=self.valid_steps_per_epoch
)
else: # dataset == 'train'
y_pred = self.model.predict(
self.dataset_train, steps=self.train_steps_per_epoch
)
if (
self.preprocessing_func
and not keep_normalize
and not self.data_config_type == "gen"
):
if dataset == "valid":
data_X, data_Y = self.valid_X, self.valid_Y
else: # dataset == 'train'
data_X, data_Y = self.train_X, self.train_Y
val_pred = np.concatenate((*data_X, y_pred), axis=1)
val_orig = np.concatenate((*data_X, data_Y), axis=1)
val_pred_trans = self.preprocessor.inverse_transform(val_pred)
val_orig_trans = self.preprocessor.inverse_transform(val_orig)
y_orig = val_orig_trans[:, -np.shape(data_Y)[1] :]
y_pred = val_pred_trans[:, -np.shape(data_Y)[1] :]
else:
if self.data_config_type == "ndarray":
y_orig = self.valid_Y if dataset == "valid" else self.train_Y
else:
gen = self.valid_gen() if dataset == "valid" else self.train_gen()
y_orig = np.array([e[-1] for e in gen])
return y_orig, y_pred
def evaluate(self, dataset="train"):
"""Evaluate the performance of your model for the same configuration.
Args:
dataset (str, optional): must be "train" or "valid". If "train" then metrics will be evaluated on the training dataset. If "valid" then metrics will be evaluated on the "validation" dataset. Defaults to 'train'.
Returns:
list: a list of scalar values corresponding do config loss & metrics.
"""
if dataset == "train":
return self.model.evaluate(
self.dataset_train, steps=self.train_steps_per_epoch
)
else:
return self.model.evaluate(
self.dataset_valid, steps=self.valid_steps_per_epoch
)
def train(
self, num_epochs: int = None, with_pred: bool = False, last_only: bool = False
):
"""Train the model.
Args:
num_epochs (int, optional): override the num_epochs passed to init the Trainer. Defaults to None, will use the num_epochs passed to init the Trainer.
with_pred (bool, optional): will compute a prediction after the training and will add ('y_true', 'y_pred') to the output history. Defaults to False, will skip it (use it to save compute time).
last_only (bool, optional): will compute metrics after the last epoch only. Defaults to False, will compute metrics after each training epoch (use it to save compute time).
Raises:
DeephyperRuntimeError: raised when the ``num_epochs < 0``.
Returns:
dict: a dictionnary corresponding to the training.
"""
num_epochs = self.num_epochs if num_epochs is None else num_epochs
self.init_history()
if num_epochs > 0:
time_start_training = time.time() # TIMING
if not last_only:
logger.info(
"Trainer is computing metrics on validation after each training epoch."
)
history = self.model.fit(
self.dataset_train,
verbose=self.verbose,
epochs=num_epochs,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
validation_data=self.dataset_valid,
validation_steps=self.valid_steps_per_epoch,
class_weight=self.class_weights,
)
else:
logger.info(
"Trainer is computing metrics on validation after the last training epoch."
)
if num_epochs > 1:
self.model.fit(
self.dataset_train,
verbose=self.verbose,
epochs=num_epochs - 1,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
class_weight=self.class_weights,
)
history = self.model.fit(
self.dataset_train,
epochs=1,
verbose=self.verbose,
steps_per_epoch=self.train_steps_per_epoch,
callbacks=self.callbacks,
validation_data=self.dataset_valid,
validation_steps=self.valid_steps_per_epoch,
class_weight=self.class_weights,
)
time_end_training = time.time() # TIMING
self.train_history["training_time"] = (
time_end_training - time_start_training
)
self.train_history.update(history.history)
elif num_epochs < 0:
raise DeephyperRuntimeError(
f"Trainer: number of epochs should be >= 0: {num_epochs}"
)
if with_pred:
time_start_predict = time.time()
y_true, y_pred = self.predict(dataset="valid")
time_end_predict = time.time()
self.train_history["val_predict_time"] = (
time_end_predict - time_start_predict
)
self.train_history["y_true"] = y_true
self.train_history["y_pred"] = y_pred
return self.train_history
| 21,923 | 37.0625 | 241 | py |
deephyper | deephyper-master/deephyper/nas/trainer/__init__.py | from ._base import BaseTrainer
__all__ = ["BaseTrainer"]
try:
from ._horovod import HorovodTrainer # noqa: F401
__all__.append("HorovodTrainer")
except Exception:
pass
| 184 | 15.818182 | 54 | py |
deephyper | deephyper-master/deephyper/nas/spacelib/__init__.py | """Library of neural architecture search spaces."""
| 52 | 25.5 | 51 | py |
deephyper | deephyper-master/deephyper/nas/spacelib/tabular/one_layer.py | import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import ConstantNode, VariableNode
from deephyper.nas.operation import operation, Concatenate
Dense = operation(tf.keras.layers.Dense)
Dropout = operation(tf.keras.layers.Dropout)
class OneLayerSpace(KSearchSpace):
def __init__(
self, input_shape, output_shape, batch_size=None, seed=None, regression=True
):
super().__init__(input_shape, output_shape, batch_size=batch_size, seed=seed)
self.regression = regression
def build(self):
if type(self.input_shape) is list:
vnodes = []
for i in range(len(self.input_shape)):
vn = self.gen_vnode()
vnodes.append(vn)
self.connect(self.input_nodes[i], vn)
print(i)
prev_node = ConstantNode(Concatenate(self, vnodes))
else:
prev_node = self.gen_vnode()
self.connect(self.input_nodes[0], prev_node)
output_node = ConstantNode(
Dense(
self.output_shape[0], activation=None if self.regression else "softmax"
)
)
self.connect(prev_node, output_node)
return self
def gen_vnode(self) -> VariableNode:
vnode = VariableNode()
for i in range(1, 1000):
vnode.add_op(Dense(i, tf.nn.relu))
return vnode
if __name__ == "__main__":
from tensorflow.keras.utils import plot_model
shapes = dict(input_shape=[(10,), (10,)], output_shape=(1,))
space = OneLayerSpace(**shapes).build()
model = space.sample()
plot_model(model)
| 1,655 | 27.551724 | 87 | py |
deephyper | deephyper-master/deephyper/nas/spacelib/tabular/supervised_reg_auto_encoder.py | import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import ConstantNode, VariableNode
from deephyper.nas.operation import Identity, operation
Dense = operation(tf.keras.layers.Dense)
class SupervisedRegAutoEncoderSpace(KSearchSpace):
def __init__(
self,
input_shape,
output_shape,
batch_size=None,
seed=None,
units=[128, 64, 32, 16, 8, 16, 32, 64, 128],
num_layers=5,
):
super().__init__(input_shape, output_shape, batch_size=batch_size, seed=seed)
self.units = units
self.num_layers = num_layers
def build(self):
inp = self.input_nodes[0]
# auto-encoder
units = [128, 64, 32, 16, 8, 16, 32, 64, 128]
prev_node = inp
d = 1
for i in range(len(units)):
vnode = VariableNode()
vnode.add_op(Identity())
if d == 1 and units[i] < units[i + 1]:
d = -1
for u in range(min(2, units[i]), max(2, units[i]) + 1, 2):
vnode.add_op(Dense(u, tf.nn.relu))
latente_space = vnode
else:
for u in range(
min(units[i], units[i + d]), max(units[i], units[i + d]) + 1, 2
):
vnode.add_op(Dense(u, tf.nn.relu))
self.connect(prev_node, vnode)
prev_node = vnode
out2 = ConstantNode(op=Dense(self.output_shape[0][0], name="output_0"))
self.connect(prev_node, out2)
# regressor
prev_node = latente_space
# prev_node = inp
for _ in range(self.num_layers):
vnode = VariableNode()
for i in range(16, 129, 16):
vnode.add_op(Dense(i, tf.nn.relu))
self.connect(prev_node, vnode)
prev_node = vnode
out1 = ConstantNode(op=Dense(self.output_shape[1][0], name="output_1"))
self.connect(prev_node, out1)
return self
if __name__ == "__main__":
from tensorflow.keras.utils import plot_model
shapes = dict(input_shape=(100,), output_shape=[(100,), (10,)])
space = SupervisedRegAutoEncoderSpace(**shapes).build()
model = space.sample()
plot_model(model)
| 2,268 | 28.855263 | 85 | py |
deephyper | deephyper-master/deephyper/nas/spacelib/tabular/feed_forward.py | import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import ConstantNode, VariableNode
from deephyper.nas.operation import Identity, operation
Dense = operation(tf.keras.layers.Dense)
class FeedForwardSpace(KSearchSpace):
"""Simple search space for a feed-forward neural network. No skip-connection. Looking over the number of units per layer and the number of layers.
Args:
input_shape (tuple, optional): True shape of inputs (no batch size dimension). Defaults to (2,).
output_shape (tuple, optional): True shape of outputs (no batch size dimension).. Defaults to (1,).
num_layers (int, optional): Maximum number of layers to have. Defaults to 10.
num_units (tuple, optional): Range of number of units such as range(start, end, step_size). Defaults to (1, 11).
regression (bool, optional): A boolean defining if the model is a regressor or a classifier. Defaults to True.
"""
def __init__(
self,
input_shape,
output_shape,
batch_size=None,
seed=None,
regression=True,
num_units=(1, 11),
num_layers=10,
):
super().__init__(input_shape, output_shape, batch_size=batch_size, seed=seed)
self.regression = regression
self.num_units = num_units
self.num_layers = num_layers
def build(self):
prev_node = self.input_nodes[0]
for _ in range(self.num_layers):
vnode = VariableNode()
vnode.add_op(Identity())
for i in range(*self.num_units):
vnode.add_op(Dense(i, tf.nn.relu))
self.connect(prev_node, vnode)
prev_node = vnode
output_node = ConstantNode(
Dense(
self.output_shape[0], activation=None if self.regression else "softmax"
)
)
self.connect(prev_node, output_node)
return self
if __name__ == "__main__":
from tensorflow.keras.utils import plot_model
shapes = dict(input_shape=(10,), output_shape=(1,))
space = FeedForwardSpace(**shapes).build()
model = space.sample()
plot_model(model)
| 2,179 | 32.030303 | 150 | py |
deephyper | deephyper-master/deephyper/nas/spacelib/tabular/dense_skipco.py | import collections
import tensorflow as tf
from deephyper.nas import KSearchSpace
from deephyper.nas.node import ConstantNode, VariableNode
from deephyper.nas.operation import operation, Zero, Connect, AddByProjecting, Identity
Dense = operation(tf.keras.layers.Dense)
Dropout = operation(tf.keras.layers.Dropout)
class DenseSkipCoSpace(KSearchSpace):
def __init__(
self,
input_shape,
output_shape,
batch_size=None,
seed=None,
regression=True,
num_layers=10,
dropout=0.0,
):
super().__init__(input_shape, output_shape, batch_size=batch_size, seed=seed)
self.regression = regression
self.num_layers = num_layers
self.dropout = dropout
def build(self):
source = prev_input = self.input_nodes[0]
# look over skip connections within a range of the 3 previous nodes
anchor_points = collections.deque([source], maxlen=3)
for _ in range(self.num_layers):
vnode = VariableNode()
self.add_dense_to_(vnode)
self.connect(prev_input, vnode)
# * Cell output
cell_output = vnode
cmerge = ConstantNode()
cmerge.set_op(AddByProjecting(self, [cell_output], activation="relu"))
for anchor in anchor_points:
skipco = VariableNode()
skipco.add_op(Zero())
skipco.add_op(Connect(self, anchor))
self.connect(skipco, cmerge)
prev_input = cmerge
# ! for next iter
anchor_points.append(prev_input)
if self.dropout >= 0.0:
dropout_node = ConstantNode(op=Dropout(rate=self.dropout))
self.connect(prev_input, dropout_node)
prev_input = dropout_node
output_node = ConstantNode(
Dense(
self.output_shape[0], activation=None if self.regression else "softmax"
)
)
self.connect(prev_input, output_node)
return self
def add_dense_to_(self, node):
node.add_op(Identity()) # we do not want to create a layer in this case
activations = [None, tf.nn.swish, tf.nn.relu, tf.nn.tanh, tf.nn.sigmoid]
for units in range(16, 97, 16):
for activation in activations:
node.add_op(Dense(units=units, activation=activation))
if __name__ == "__main__":
from tensorflow.keras.utils import plot_model
shapes = dict(input_shape=(10,), output_shape=(1,))
space = DenseSkipCoSpace(**shapes).build()
model = space.sample()
plot_model(model)
| 2,637 | 28.311111 | 87 | py |
deephyper | deephyper-master/deephyper/nas/spacelib/tabular/__init__.py | """Neural architecture search spaces for tabular data."""
from .dense_skipco import DenseSkipCoSpace
from .one_layer import OneLayerSpace
from .feed_forward import FeedForwardSpace
from .supervised_reg_auto_encoder import SupervisedRegAutoEncoderSpace
__all__ = [
"DenseSkipCoSpace",
"OneLayerSpace",
"FeedForwardSpace",
"SupervisedRegAutoEncoderSpace",
]
| 373 | 27.769231 | 70 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.